diff --git a/testbed/openvinotoolkit__datumaro/.bandit b/testbed/openvinotoolkit__datumaro/.bandit new file mode 100644 index 0000000000000000000000000000000000000000..17c7fbe6c78af71e36c04c7068b42362eecbc2e1 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/.bandit @@ -0,0 +1,8 @@ +[bandit] +# B101 : assert_used +# B102 : exec_used +# B320 : xml_bad_etree +# B404 : import_subprocess +# B406 : import_xml_sax +# B410 : import_lxml +skips: B101,B102,B320,B404,B406,B410 diff --git a/testbed/openvinotoolkit__datumaro/.coveragerc b/testbed/openvinotoolkit__datumaro/.coveragerc new file mode 100644 index 0000000000000000000000000000000000000000..d0f21a64a625adc77ec15e5d15c843e894857333 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/.coveragerc @@ -0,0 +1,34 @@ +[run] +branch = true +# relative_files = true # does not work? + +source = + datumaro/ + +omit = + datumaro/__main__.py + datumaro/version.py + tests/* + +[report] +# Regexes for lines to exclude from consideration +exclude_lines = + # Have to re-enable the standard pragma + pragma: no cover + + # Don't complain about missing debug-only code: + def __repr__ + if\s+[\w\.()]+\.isEnabledFor\(log\.DEBUG\): + + # Don't complain if tests don't hit defensive assertion code: + raise AssertionError + raise NotImplementedError + + # Don't complain if non-runnable code isn't run: + if 0: + if __name__ == .__main__.: + +# don't fail on the code that can be found +ignore_errors = true + +skip_empty = true diff --git a/testbed/openvinotoolkit__datumaro/.gitattributes b/testbed/openvinotoolkit__datumaro/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..7c0b591b072c8747ea938679ce41b8ea6dac2cec --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/.gitattributes @@ -0,0 +1,29 @@ +* text=auto whitespace=trailing-space,space-before-tab,-indent-with-non-tab,tab-in-indent,tabwidth=4 + +.git* text export-ignore + +*.txt text +*.htm text +*.html text +*.js text +*.py text +*.css text +*.md text +*.yml text +Dockerfile text +LICENSE text +*.conf text +*.mimetypes text +*.sh text eol=lf + +*.avi binary +*.bmp binary +*.exr binary +*.ico binary +*.jpeg binary +*.jpg binary +*.png binary +*.gif binary +*.ttf binary +*.pdf binary + diff --git a/testbed/openvinotoolkit__datumaro/.gitignore b/testbed/openvinotoolkit__datumaro/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..79362022a996f9ae315e6b87851a5a2b2c626e0d --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/.gitignore @@ -0,0 +1,57 @@ +.DS_Store + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Sphinx documentation +docs/_build/ \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/.pylintrc b/testbed/openvinotoolkit__datumaro/.pylintrc new file mode 100644 index 0000000000000000000000000000000000000000..09864236e69021cc8a8413202f7f0dfe7bcafa6d --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/.pylintrc @@ -0,0 +1,420 @@ +[MASTER] + +# Specify a configuration file. +#rcfile= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS + +# Add files or directories matching the regex patterns to the blacklist. The +# regex matches against base names, not paths. +ignore-patterns= + +# Pickle collected data for later comparisons. +persistent=yes + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + +# Use multiple processes to speed up Pylint. +jobs=1 + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code +extension-pkg-whitelist= + +# Allow optimization of some AST trees. This will activate a peephole AST +# optimizer, which will apply various small optimizations. For instance, it can +# be used to obtain the result of joining multiple strings with the addition +# operator. Joining a lot of strings can lead to a maximum recursion error in +# Pylint and this flag can prevent that. It has one side effect, the resulting +# AST will be different than the one from reality. This option is deprecated +# and it will be removed in Pylint 2.0. +optimize-ast=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED +confidence= + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +disable=all +enable= E0001,E0100,E0101,E0102,E0103,E0104,E0105,E0106,E0107,E0110, + E0113,E0114,E0115,E0116,E0117,E0108,E0202,E0203,E0211,E0236, + E0238,E0239,E0240,E0241,E0301,E0302,E0601,E0603,E0604,E0701, + E0702,E0703,E0704,E0710,E0711,E0712,E1003,E1102,E1111,E0112, + E1120,E1121,E1123,E1124,E1125,E1126,E1127,E1132,E1200,E1201, + E1205,E1206,E1300,E1301,E1302,E1303,E1304,E1305,E1306, + C0123,C0200,C0303,C1001, + W0101,W0102,W0104,W0105,W0106,W0107,W0108,W0109,W0110,W0120, + W0122,W0124,W0150,W0199,W0221,W0222,W0233,W0404,W0410,W0601, + W0602,W0604,W0611,W0612,W0622,W0623,W0702,W0705,W0711,W1300, + W1301,W1302,W1303,,W1305,W1306,W1307 + R0102,R0202,R0203 + + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once).You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use"--disable=all --enable=classes +# --disable=W" +#disable=old-octal-literal,basestring-builtin,no-absolute-import,old-division,coerce-method,long-suffix,reload-builtin,unichr-builtin,indexing-exception,raising-string,dict-iter-method,metaclass-assignment,filter-builtin-not-iterating,import-star-module-level,next-method-called,cmp-method,raw_input-builtin,old-raise-syntax,cmp-builtin,apply-builtin,getslice-method,input-builtin,backtick,coerce-builtin,range-builtin-not-iterating,xrange-builtin,using-cmp-argument,buffer-builtin,hex-method,execfile-builtin,unpacking-in-except,standarderror-builtin,round-builtin,nonzero-method,unicode-builtin,reduce-builtin,file-builtin,dict-view-method,old-ne-operator,print-statement,suppressed-message,oct-method,useless-suppression,delslice-method,long-builtin,setslice-method,zip-builtin-not-iterating,map-builtin-not-iterating,intern-builtin,parameter-unpacking + + +[REPORTS] + +# Set the output format. Available formats are text, parseable, colorized, msvs +# (visual studio) and html. You can also give a reporter class, eg +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Put messages in a separate file for each module / package specified on the +# command line instead of printing them on stdout. Reports (if any) will be +# written in a file name "pylint_global.[txt|html]". This option is deprecated +# and it will be removed in Pylint 2.0. +files-output=no + +# Tells whether to display a full report or only the messages +reports=yes + +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details +#msg-template= + + +[BASIC] + +# Good variable names which should always be accepted, separated by a comma +good-names=i,j,k,ex,Run,_ + +# Bad variable names which should always be refused, separated by a comma +bad-names=foo,bar,baz,toto,tutu,tata + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Include a hint for the correct naming format with invalid-name +include-naming-hint=no + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +property-classes=abc.abstractproperty + +# Regular expression matching correct constant names +const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Naming hint for constant names +const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Regular expression matching correct class names +class-rgx=[A-Z_][a-zA-Z0-9]+$ + +# Naming hint for class names +class-name-hint=[A-Z_][a-zA-Z0-9]+$ + +# Regular expression matching correct argument names +argument-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for argument names +argument-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct variable names +variable-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for variable names +variable-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct class attribute names +class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ + +# Naming hint for class attribute names +class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ + +# Regular expression matching correct method names +method-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for method names +method-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct module names +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Naming hint for module names +module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Regular expression matching correct function names +function-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for function names +function-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct attribute names +attr-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Naming hint for attribute names +attr-name-hint=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression matching correct inline iteration names +inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ + +# Naming hint for inline iteration names +inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + + +[ELIF] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + + +[FORMAT] + +# Maximum number of characters on a single line. +max-line-length=80 + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + +# List of optional constructs for which whitespace checking is disabled. `dict- +# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. +# `trailing-comma` allows a space between comma and closing bracket: (a, ). +# `empty-line` allows space-only lines. +no-space-check=trailing-comma,dict-separator + +# Maximum number of lines in a module +max-module-lines=1000 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + + +[LOGGING] + +# Logging modules to check that the string format arguments are in logging +# function parameter format +logging-modules=logging + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME,XXX,TODO + + +[SIMILARITIES] + +# Minimum lines number of a similarity. +min-similarity-lines=4 + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + + +[SPELLING] + +# Spelling dictionary name. Available dictionaries: none. To make it working +# install python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to indicated private dictionary in +# --spelling-private-dict-file option instead of raising a message. +spelling-store-unknown-words=no + + +[TYPECHECK] + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis. It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + + +[VARIABLES] + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# A regular expression matching the name of dummy variables (i.e. expectedly +# not used). +dummy-variables-rgx=(_+[a-zA-Z0-9]*?$)|dummy + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_,_cb + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,future.builtins + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__,__new__,setUp + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make + + +[DESIGN] + +# Maximum number of arguments for function / method +max-args=5 + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore +ignored-argument-names=_.* + +# Maximum number of locals for function / method body +max-locals=15 + +# Maximum number of return / yield for function / method body +max-returns=6 + +# Maximum number of branch for function / method body +max-branches=12 + +# Maximum number of statements in function / method body +max-statements=50 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of boolean expressions in a if statement +max-bool-expr=5 + + +[IMPORTS] + +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=optparse + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled) +import-graph= + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled) +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "Exception" +overgeneral-exceptions=Exception diff --git a/testbed/openvinotoolkit__datumaro/.travis.yml b/testbed/openvinotoolkit__datumaro/.travis.yml new file mode 100644 index 0000000000000000000000000000000000000000..24843e8ee68add58922e88ce1eb8b5a375edf21d --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/.travis.yml @@ -0,0 +1,37 @@ +language: python + +cache: pip + +python: + - '3.6' + - '3.7' + - '3.8' + +matrix: + include: + - dist: xenial + + # measure coverage here + - dist: bionic + python: '3.6' + before_install: + - pip install coverage + script: + - coverage run -m unittest discover -v + - coverage run -a datum.py -h + after_success: + - coverage xml + - bash <(curl -Ls https://coverage.codacy.com/get.sh) report -r coverage.xml + + - dist: bionic + python: '3.7' + - dist: bionic + python: '3.8' + +install: + - pip install -e ./ + - pip install tensorflow + +script: + - python -m unittest discover -v + - datum -h \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/CHANGELOG.md b/testbed/openvinotoolkit__datumaro/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..21c110c5c70adba0a88e65517d96bb515b12f17d --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/CHANGELOG.md @@ -0,0 +1,145 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + + +## [Unreleased] +### Added +- Task-specific Splitter () +- `WiderFace` dataset format () +- Function to transform annotations to labels () +- `VGGFace2` dataset format () + +### Changed +- + +### Deprecated +- + +### Removed +- + +### Fixed +- + +### Security +- + +## 12/10/2020 - Release v0.1.4 +### Added +- `CamVid` dataset format () +- Ability to install `opencv-python-headless` dependency with `DATUMARO_HEADLESS=1` + enviroment variable instead of `opencv-python` () + +### Changed +- Allow empty supercategory in COCO () +- Allow Pascal VOC to search in subdirectories () + +### Deprecated +- + +### Removed +- + +### Fixed +- + +### Security +- + +## 10/28/2020 - Release v0.1.3 +### Added +- `ImageNet` and `ImageNetTxt` dataset formats () + +### Changed +- + +### Deprecated +- + +### Removed +- + +### Fixed +- Default `label-map` parameter value for VOC converter () +- Randomness of random split transform () +- `Transform.subsets()` method () +- Supported unknown image formats in TF Detection API converter () +- Supported empty attribute values in CVAT extractor () + +### Security +- + + +## 10/05/2020 - Release v0.1.2 +### Added +- `ByteImage` class to represent encoded images in memory and avoid recoding on save () + +### Changed +- Implementation of format plugins simplified () +- `default` is now a default subset name, instead of `None`. The values are interchangeable. () +- Improved performance of transforms () + +### Deprecated +- + +### Removed +- `image/depth` value from VOC export () + +### Fixed +- Zero division errors in dataset statistics () + +### Security +- + + +## 09/24/2020 - Release v0.1.1 +### Added +- `reindex` option in COCO and CVAT converters () +- Support for relative paths in LabelMe format () +- MOTS png mask format support () + +### Changed +- + +### Deprecated +- + +### Removed +- + +### Fixed +- + +### Security +- + + +## 09/10/2020 - Release v0.1.0 +### Added +- Initial release + +## Template +``` +## [Unreleased] +### Added +- + +### Changed +- + +### Deprecated +- + +### Removed +- + +### Fixed +- + +### Security +- +``` diff --git a/testbed/openvinotoolkit__datumaro/CONTRIBUTING.md b/testbed/openvinotoolkit__datumaro/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..4822c000757b4429eb5f3a1437a454c59745e608 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/CONTRIBUTING.md @@ -0,0 +1,99 @@ +## Table of Contents + +- [Installation](#installation) +- [Usage](#usage) +- [Testing](#testing) +- [Design](#design-and-code-structure) + +## Installation + +### Prerequisites + +- Python (3.5+) + +``` bash +git clone https://github.com/openvinotoolkit/datumaro +``` + +Optionally, install a virtual environment (recommended): + +``` bash +python -m pip install virtualenv +python -m virtualenv venv +. venv/bin/activate +``` + +Then install all dependencies: + +``` bash +while read -r p; do pip install $p; done < requirements.txt +``` + +If you're working inside of a CVAT environment: +``` bash +. .env/bin/activate +while read -r p; do pip install $p; done < datumaro/requirements.txt +``` + +Install Datumaro: +``` bash +pip install -e /path/to/the/cloned/repo/ +``` + +**Optional dependencies** + +These components are only required for plugins and not installed by default: + +- OpenVINO +- Accuracy Checker +- TensorFlow +- PyTorch +- MxNet +- Caffe + +## Usage + +``` bash +datum --help +python -m datumaro --help +python datumaro/ --help +python datum.py --help +``` + +``` python +import datumaro +``` + +## Testing + +It is expected that all Datumaro functionality is covered and checked by +unit tests. Tests are placed in `tests/` directory. + +To run tests use: + +``` bash +python -m unittest discover -s tests +``` + +If you're working inside of a CVAT environment, you can also use: + +``` bash +python manage.py test datumaro/ +``` + +## Design and code structure + +- [Design document](docs/design.md) +- [Developer guide](docs/developer_guide.md) + +## Code style + +Try to be readable and consistent with the existing codebase. +The project mostly follows PEP8 with little differences. +Continuation lines have a standard indentation step by default, +or any other, if it improves readability. For long conditionals use 2 steps. +No trailing whitespaces, 80 characters per line. + +## Environment + +The recommended editor is VS Code with the Python plugin. \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/LICENSE b/testbed/openvinotoolkit__datumaro/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..ae9cf7104ec37fa7e9019e30cf295e262e1c3e40 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (C) 2019-2020 Intel Corporation +  +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom +the Software is furnished to do so, subject to the following conditions: +  +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. +  +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES +OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE +OR OTHER DEALINGS IN THE SOFTWARE. +  diff --git a/testbed/openvinotoolkit__datumaro/README.md b/testbed/openvinotoolkit__datumaro/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b124fd6dc63a9df8ff614da0582da708199b1428 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/README.md @@ -0,0 +1,230 @@ +# Dataset Management Framework (Datumaro) + +[![Build Status](https://travis-ci.org/openvinotoolkit/datumaro.svg?branch=develop)](https://travis-ci.org/openvinotoolkit/datumaro) +[![Codacy Badge](https://api.codacy.com/project/badge/Grade/759d2d873b59495aa3d3f8c51b786246)](https://app.codacy.com/gh/openvinotoolkit/datumaro?utm_source=github.com&utm_medium=referral&utm_content=openvinotoolkit/datumaro&utm_campaign=Badge_Grade_Dashboard) +[![Codacy Badge](https://app.codacy.com/project/badge/Coverage/9511b691ff134e739ea6fc524f7cc760)](https://www.codacy.com/gh/openvinotoolkit/datumaro?utm_source=github.com&utm_medium=referral&utm_content=openvinotoolkit/datumaro&utm_campaign=Badge_Coverage) + +A framework and CLI tool to build, transform, and analyze datasets. + + +``` +VOC dataset ---> Annotation tool + + / +COCO dataset -----> Datumaro ---> dataset ------> Model training + + \ +CVAT annotations ---> Publication, statistics etc. +``` + + +# Table of Contents + +- [Examples](#examples) +- [Features](#features) +- [Installation](#installation) +- [Usage](#usage) +- [User manual](docs/user_manual.md) +- [Contributing](#contributing) + +## Examples + +[(Back to top)](#table-of-contents) + + + + +- Convert PASCAL VOC dataset to COCO format, keep only images with `cat` class presented: + ```bash + # Download VOC dataset: + # http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar + datum convert --input-format voc --input-path \ + --output-format coco \ + --filter '/item[annotation/label="cat"]' + ``` + +- Convert only non-`occluded` annotations from a [CVAT](https://github.com/opencv/cvat) project to TFrecord: + ```bash + # export Datumaro dataset in CVAT UI, extract somewhere, go to the project dir + datum project filter -e '/item/annotation[occluded="False"]' \ + --mode items+anno --output-dir not_occluded + datum project export --project not_occluded \ + --format tf_detection_api -- --save-images + ``` + +- Annotate MS COCO dataset, extract image subset, re-annotate it in [CVAT](https://github.com/opencv/cvat), update old dataset: + ```bash + # Download COCO dataset http://cocodataset.org/#download + # Put images to coco/images/ and annotations to coco/annotations/ + datum project import --format coco --input-path + datum project export --filter '/image[images_I_dont_like]' --format cvat \ + --output-dir reannotation + # import dataset and images to CVAT, re-annotate + # export Datumaro project, extract to 'reannotation-upd' + datum project project merge reannotation-upd + datum project export --format coco + ``` + +- Annotate instance polygons in [CVAT](https://github.com/opencv/cvat), export as masks in COCO: + ```bash + datum convert --input-format cvat --input-path \ + --output-format coco -- --segmentation-mode masks + ``` + +- Apply an OpenVINO detection model to some COCO-like dataset, + then compare annotations with ground truth and visualize in TensorBoard: + ```bash + datum project import --format coco --input-path + # create model results interpretation script + datum model add mymodel openvino \ + --weights model.bin --description model.xml \ + --interpretation-script parse_results.py + datum model run --model mymodel --output-dir mymodel_inference/ + datum project diff mymodel_inference/ --format tensorboard --output-dir diff + ``` + +- Change colors in PASCAL VOC-like `.png` masks: + ```bash + datum project import --format voc --input-path + + # Create a color map file with desired colors: + # + # label : color_rgb : parts : actions + # cat:0,0,255:: + # dog:255,0,0:: + # + # Save as mycolormap.txt + + datum project export --format voc_segmentation -- --label-map mycolormap.txt + # add "--apply-colormap=0" to save grayscale (indexed) masks + # check "--help" option for more info + # use "datum --loglevel debug" for extra conversion info + ``` + + + + +## Features + +[(Back to top)](#table-of-contents) + +- Dataset reading, writing, conversion in any direction. Supported formats: + - [COCO](http://cocodataset.org/#format-data) (`image_info`, `instances`, `person_keypoints`, `captions`, `labels`*) + - [PASCAL VOC](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/htmldoc/index.html) (`classification`, `detection`, `segmentation`, `action_classification`, `person_layout`) + - [YOLO](https://github.com/AlexeyAB/darknet#how-to-train-pascal-voc-data) (`bboxes`) + - [TF Detection API](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/using_your_own_dataset.md) (`bboxes`, `masks`) + - [WIDER Face](http://shuoyang1213.me/WIDERFACE/) (`bboxes`) + - [VGGFace2](https://github.com/ox-vgg/vgg_face2) (`landmarks`, `bboxes`) + - [MOT sequences](https://arxiv.org/pdf/1906.04567.pdf) + - [MOTS PNG](https://www.vision.rwth-aachen.de/page/mots) + - [ImageNet](http://image-net.org/) + - [CamVid](http://mi.eng.cam.ac.uk/research/projects/VideoRec/CamVid/) + - [CVAT](https://github.com/opencv/cvat/blob/develop/cvat/apps/documentation/xml_format.md) + - [LabelMe](http://labelme.csail.mit.edu/Release3.0) +- Dataset building + - Merging multiple datasets into one + - Dataset filtering by a custom criteria: + - remove polygons of a certain class + - remove images without annotations of a specific class + - remove `occluded` annotations from images + - keep only vertically-oriented images + - remove small area bounding boxes from annotations + - Annotation conversions, for instance: + - polygons to instance masks and vise-versa + - apply a custom colormap for mask annotations + - rename or remove dataset labels +- Dataset quality checking + - Simple checking for errors + - Comparison with model infernece + - Merging and comparison of multiple datasets +- Dataset comparison +- Dataset statistics (image mean and std, annotation statistics) +- Model integration + - Inference (OpenVINO, Caffe, PyTorch, TensorFlow, MxNet, etc.) + - Explainable AI ([RISE algorithm](https://arxiv.org/abs/1806.07421)) + +> Check [the design document](docs/design.md) for a full list of features. +> Check [the user manual](docs/user_manual.md) for usage instructions. + +## Installation + +[(Back to top)](#table-of-contents) + +### Dependencies + +- Python (3.6+) +- Optional: OpenVINO, TensforFlow, PyTorch, MxNet, Caffe, Accuracy Checker + +Optionally, create a virtual environment: + +``` bash +python -m pip install virtualenv +python -m virtualenv venv +. venv/bin/activate +``` + +Install Datumaro package: + +``` bash +pip install 'git+https://github.com/openvinotoolkit/datumaro' +``` + +## Usage + +[(Back to top)](#table-of-contents) + +There are several options available: +- [A standalone command-line tool](#standalone-tool) +- [A python module](#python-module) + +### Standalone tool + +Datuaro as a standalone tool allows to do various dataset operations from +the command line interface: + +``` bash +datum --help +python -m datumaro --help +``` + +### Python module + +Datumaro can be used in custom scripts as a Python module. Used this way, it +allows to use its features from an existing codebase, enabling dataset +reading, exporting and iteration capabilities, simplifying integration of custom +formats and providing high performance operations: + +``` python +from datumaro.components.project import Project # project-related things +import datumaro.components.extractor # annotations and high-level interfaces + +# load a Datumaro project +project = Project.load('directory') + +# create a dataset +dataset = project.make_dataset() + +# keep only annotated images +dataset = dataset.select(lambda item: len(item.annotations) != 0) + +# change dataset labels +dataset = dataset.transform(project.env.transforms.get('remap_labels'), + {'cat': 'dog', # rename cat to dog + 'truck': 'car', # rename truck to car + 'person': '', # remove this label + }, default='delete') + +for item in dataset: + print(item.id, item.annotations) + +# export the resulting dataset in COCO format +project.env.converters.get('coco').convert(dataset, save_dir='dst/dir') +``` + +> Check our [developer guide](docs/developer_guide.md) for additional information. + +## Contributing + +[(Back to top)](#table-of-contents) + +Feel free to [open an Issue](https://github.com/openvinotoolkit/datumaro/issues/new), if you +think something needs to be changed. You are welcome to participate in development, +instructions are available in our [contribution guide](CONTRIBUTING.md). diff --git a/testbed/openvinotoolkit__datumaro/datum.py b/testbed/openvinotoolkit__datumaro/datum.py new file mode 100644 index 0000000000000000000000000000000000000000..12c150bd167e38da85d15e07e722a7e9bcaaa8a7 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datum.py @@ -0,0 +1,8 @@ +#!/usr/bin/env python +import sys + +from datumaro.cli.__main__ import main + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/testbed/openvinotoolkit__datumaro/datumaro/__init__.py b/testbed/openvinotoolkit__datumaro/datumaro/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eb864e52b5104beeded2c3a7c2318d93cdf768b6 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/__init__.py @@ -0,0 +1,4 @@ + +# Copyright (C) 2019-2020 Intel Corporation +# +# SPDX-License-Identifier: MIT diff --git a/testbed/openvinotoolkit__datumaro/datumaro/__main__.py b/testbed/openvinotoolkit__datumaro/datumaro/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..be1cb092981b18f1d1f90a89be868aca51ee6023 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/__main__.py @@ -0,0 +1,12 @@ + +# Copyright (C) 2019-2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +import sys + +from datumaro.cli.__main__ import main + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/testbed/openvinotoolkit__datumaro/datumaro/cli/commands/merge.py b/testbed/openvinotoolkit__datumaro/datumaro/cli/commands/merge.py new file mode 100644 index 0000000000000000000000000000000000000000..2583cd8641bb613717aae948fa548a8e0d54e8cc --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/cli/commands/merge.py @@ -0,0 +1,124 @@ + +# Copyright (C) 2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +import argparse +import json +import logging as log +import os.path as osp +from collections import OrderedDict + +from datumaro.components.project import Project +from datumaro.components.operations import (IntersectMerge, + QualityError, MergeError) + +from ..util import at_least, MultilineFormatter, CliException +from ..util.project import generate_next_file_name, load_project + + +def build_parser(parser_ctor=argparse.ArgumentParser): + parser = parser_ctor(help="Merge few projects", + description=""" + Merges multiple datasets into one. This can be useful if you + have few annotations and wish to merge them, + taking into consideration potential overlaps and conflicts. + This command can try to find a common ground by voting or + return a list of conflicts.|n + |n + Examples:|n + - Merge annotations from 3 (or more) annotators:|n + |s|smerge project1/ project2/ project3/|n + - Check groups of the merged dataset for consistence:|n + |s|s|slook for groups consising of 'person', 'hand' 'head', 'foot'|n + |s|smerge project1/ project2/ -g 'person,hand?,head,foot?' + """, + formatter_class=MultilineFormatter) + + def _group(s): + return s.split(',') + + parser.add_argument('project', nargs='+', action=at_least(2), + help="Path to a project (repeatable)") + parser.add_argument('-iou', '--iou-thresh', default=0.25, type=float, + help="IoU match threshold for segments (default: %(default)s)") + parser.add_argument('-oconf', '--output-conf-thresh', + default=0.0, type=float, + help="Confidence threshold for output " + "annotations (default: %(default)s)") + parser.add_argument('--quorum', default=0, type=int, + help="Minimum count for a label and attribute voting " + "results to be counted (default: %(default)s)") + parser.add_argument('-g', '--groups', action='append', type=_group, + default=[], + help="A comma-separated list of labels in " + "annotation groups to check. '?' postfix can be added to a label to" + "make it optional in the group (repeatable)") + parser.add_argument('-o', '--output-dir', dest='dst_dir', default=None, + help="Output directory (default: current project's dir)") + parser.add_argument('--overwrite', action='store_true', + help="Overwrite existing files in the save directory") + parser.set_defaults(command=merge_command) + + return parser + +def merge_command(args): + source_projects = [load_project(p) for p in args.project] + + dst_dir = args.dst_dir + if dst_dir: + if not args.overwrite and osp.isdir(dst_dir) and os.listdir(dst_dir): + raise CliException("Directory '%s' already exists " + "(pass --overwrite to overwrite)" % dst_dir) + else: + dst_dir = generate_next_file_name('merged') + + source_datasets = [] + for p in source_projects: + log.debug("Loading project '%s' dataset", p.config.project_name) + source_datasets.append(p.make_dataset()) + + merger = IntersectMerge(conf=IntersectMerge.Conf( + pairwise_dist=args.iou_thresh, groups=args.groups, + output_conf_thresh=args.output_conf_thresh, quorum=args.quorum + )) + merged_dataset = merger(source_datasets) + + merged_project = Project() + output_dataset = merged_project.make_dataset() + output_dataset.define_categories(merged_dataset.categories()) + merged_dataset = output_dataset.update(merged_dataset) + merged_dataset.save(save_dir=dst_dir) + + report_path = osp.join(dst_dir, 'merge_report.json') + save_merge_report(merger, report_path) + + dst_dir = osp.abspath(dst_dir) + log.info("Merge results have been saved to '%s'" % dst_dir) + log.info("Report has been saved to '%s'" % report_path) + + return 0 + +def save_merge_report(merger, path): + item_errors = OrderedDict() + source_errors = OrderedDict() + all_errors = [] + + for e in merger.errors: + if isinstance(e, QualityError): + item_errors[str(e.item_id)] = item_errors.get(str(e.item_id), 0) + 1 + elif isinstance(e, MergeError): + for s in e.sources: + source_errors[s] = source_errors.get(s, 0) + 1 + item_errors[str(e.item_id)] = item_errors.get(str(e.item_id), 0) + 1 + + all_errors.append(str(e)) + + errors = OrderedDict([ + ('Item errors', item_errors), + ('Source errors', source_errors), + ('All errors', all_errors), + ]) + + with open(path, 'w') as f: + json.dump(errors, f, indent=4) \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/__init__.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/accuracy_checker_plugin/launcher.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/accuracy_checker_plugin/launcher.py new file mode 100644 index 0000000000000000000000000000000000000000..1525110830118f1e78a547140c050f0ef638bace --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/accuracy_checker_plugin/launcher.py @@ -0,0 +1,37 @@ + +# Copyright (C) 2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +import os.path as osp +import yaml + +from datumaro.components.cli_plugin import CliPlugin +from datumaro.components.launcher import Launcher + +from .details.ac import GenericAcLauncher as _GenericAcLauncher + + +class AcLauncher(Launcher, CliPlugin): + """ + Generic model launcher with Accuracy Checker backend. + """ + + @classmethod + def build_cmdline_parser(cls, **kwargs): + parser = super().build_cmdline_parser(**kwargs) + parser.add_argument('-c', '--config', type=osp.abspath, required=True, + help="Path to the launcher configuration file (.yml)") + return parser + + def __init__(self, config, model_dir=None): + model_dir = model_dir or '' + with open(osp.join(model_dir, config), 'r') as f: + config = yaml.safe_load(f) + self._launcher = _GenericAcLauncher.from_config(config) + + def launch(self, inputs): + return self._launcher.launch(inputs) + + def categories(self): + return self._launcher.categories() diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/camvid_format.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/camvid_format.py new file mode 100644 index 0000000000000000000000000000000000000000..6049ce6af4aa7aa613250d3ec74966fe332b2ea2 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/camvid_format.py @@ -0,0 +1,344 @@ + +# Copyright (C) 2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +import os +import os.path as osp +from collections import OrderedDict +from enum import Enum +from glob import glob + +import numpy as np +from datumaro.components.converter import Converter +from datumaro.components.extractor import (AnnotationType, CompiledMask, + DatasetItem, Importer, LabelCategories, Mask, + MaskCategories, SourceExtractor) +from datumaro.util import find, str_to_bool +from datumaro.util.image import save_image +from datumaro.util.mask_tools import lazy_mask, paint_mask, generate_colormap + + +CamvidLabelMap = OrderedDict([ + ('Void', (0, 0, 0)), + ('Animal', (64, 128, 64)), + ('Archway', (192, 0, 128)), + ('Bicyclist', (0, 128, 192)), + ('Bridge', (0, 128, 64)), + ('Building', (128, 0, 0)), + ('Car', (64, 0, 128)), + ('CartLuggagePram', (64, 0, 192)), + ('Child', (192, 128, 64)), + ('Column_Pole', (192, 192, 128)), + ('Fence', (64, 64, 128)), + ('LaneMkgsDriv', (128, 0, 192)), + ('LaneMkgsNonDriv', (192, 0, 64)), + ('Misc_Text', (128, 128, 64)), + ('MotorcycycleScooter', (192, 0, 192)), + ('OtherMoving', (128, 64, 64)), + ('ParkingBlock', (64, 192, 128)), + ('Pedestrian', (64, 64, 0)), + ('Road', (128, 64, 128)), + ('RoadShoulder', (128, 128, 192)), + ('Sidewalk', (0, 0, 192)), + ('SignSymbol', (192, 128, 128)), + ('Sky', (128, 128, 128)), + ('SUVPickupTruck', (64, 128, 192)), + ('TrafficCone', (0, 0, 64)), + ('TrafficLight', (0, 64, 64)), + ('Train', (192, 64, 128)), + ('Tree', (128, 128, 0)), + ('Truck_Bus', (192, 128, 192)), + ('Tunnel', (64, 0, 64)), + ('VegetationMisc', (192, 192, 0)), + ('Wall', (64, 192, 0)) +]) + +class CamvidPath: + LABELMAP_FILE = 'label_colors.txt' + SEGM_DIR = "annot" + IMAGE_EXT = '.png' + + +def parse_label_map(path): + if not path: + return None + + label_map = OrderedDict() + with open(path, 'r') as f: + for line in f: + # skip empty and commented lines + line = line.strip() + if not line or line and line[0] == '#': + continue + + # color, name + label_desc = line.strip().split() + + if 2 < len(label_desc): + name = label_desc[3] + color = tuple([int(c) for c in label_desc[:-1]]) + else: + name = label_desc[0] + color = None + + if name in label_map: + raise ValueError("Label '%s' is already defined" % name) + + label_map[name] = color + return label_map + +def write_label_map(path, label_map): + with open(path, 'w') as f: + for label_name, label_desc in label_map.items(): + if label_desc: + color_rgb = ' '.join(str(c) for c in label_desc) + else: + color_rgb = '' + f.write('%s %s\n' % (color_rgb, label_name)) + +def make_camvid_categories(label_map=None): + if label_map is None: + label_map = CamvidLabelMap + + # There must always be a label with color (0, 0, 0) at index 0 + bg_label = find(label_map.items(), lambda x: x[1] == (0, 0, 0)) + if bg_label is not None: + bg_label = bg_label[0] + else: + bg_label = 'background' + if bg_label not in label_map: + has_colors = any(v is not None for v in label_map.values()) + color = (0, 0, 0) if has_colors else None + label_map[bg_label] = color + label_map.move_to_end(bg_label, last=False) + + categories = {} + label_categories = LabelCategories() + for label, desc in label_map.items(): + label_categories.add(label) + categories[AnnotationType.label] = label_categories + + has_colors = any(v is not None for v in label_map.values()) + if not has_colors: # generate new colors + colormap = generate_colormap(len(label_map)) + else: # only copy defined colors + label_id = lambda label: label_categories.find(label)[0] + colormap = { label_id(name): (desc[0], desc[1], desc[2]) + for name, desc in label_map.items() } + mask_categories = MaskCategories(colormap) + mask_categories.inverse_colormap # pylint: disable=pointless-statement + categories[AnnotationType.mask] = mask_categories + return categories + + +class CamvidExtractor(SourceExtractor): + def __init__(self, path): + assert osp.isfile(path), path + self._path = path + self._dataset_dir = osp.dirname(path) + super().__init__(subset=osp.splitext(osp.basename(path))[0]) + + self._categories = self._load_categories(self._dataset_dir) + self._items = list(self._load_items(path).values()) + + def _load_categories(self, path): + label_map = None + label_map_path = osp.join(path, CamvidPath.LABELMAP_FILE) + if osp.isfile(label_map_path): + label_map = parse_label_map(label_map_path) + else: + label_map = CamvidLabelMap + self._labels = [label for label in label_map] + return make_camvid_categories(label_map) + + def _load_items(self, path): + items = {} + with open(path, encoding='utf-8') as f: + for line in f: + objects = line.split() + image = objects[0] + item_id = ('/'.join(image.split('/')[2:]))[:-len(CamvidPath.IMAGE_EXT)] + image_path = osp.join(self._dataset_dir, + (image, image[1:])[image[0] == '/']) + item_annotations = [] + if 1 < len(objects): + gt = objects[1] + gt_path = osp.join(self._dataset_dir, + (gt, gt[1:]) [gt[0] == '/']) + inverse_cls_colormap = \ + self._categories[AnnotationType.mask].inverse_colormap + mask = lazy_mask(gt_path, inverse_cls_colormap) + # loading mask through cache + mask = mask() + classes = np.unique(mask) + labels = self._categories[AnnotationType.label]._indices + labels = { labels[label_name]: label_name + for label_name in labels } + for label_id in classes: + if labels[label_id] in self._labels: + image = self._lazy_extract_mask(mask, label_id) + item_annotations.append(Mask(image=image, label=label_id)) + items[item_id] = DatasetItem(id=item_id, subset=self._subset, + image=image_path, annotations=item_annotations) + return items + + @staticmethod + def _lazy_extract_mask(mask, c): + return lambda: mask == c + + +class CamvidImporter(Importer): + @classmethod + def find_sources(cls, path): + subset_paths = [p for p in glob(osp.join(path, '**.txt'), recursive=True) + if osp.basename(p) != CamvidPath.LABELMAP_FILE] + sources = [] + for subset_path in subset_paths: + sources += cls._find_sources_recursive( + subset_path, '.txt', 'camvid') + return sources + + +LabelmapType = Enum('LabelmapType', ['camvid', 'source']) + +class CamvidConverter(Converter): + DEFAULT_IMAGE_EXT = '.png' + + @classmethod + def build_cmdline_parser(cls, **kwargs): + parser = super().build_cmdline_parser(**kwargs) + + parser.add_argument('--apply-colormap', type=str_to_bool, default=True, + help="Use colormap for class masks (default: %(default)s)") + parser.add_argument('--label-map', type=cls._get_labelmap, default=None, + help="Labelmap file path or one of %s" % \ + ', '.join(t.name for t in LabelmapType)) + + def __init__(self, extractor, save_dir, + apply_colormap=True, label_map=None, **kwargs): + super().__init__(extractor, save_dir, **kwargs) + + self._apply_colormap = apply_colormap + + if label_map is None: + label_map = LabelmapType.source.name + self._load_categories(label_map) + + def apply(self): + subset_dir = self._save_dir + os.makedirs(subset_dir, exist_ok=True) + + for subset_name, subset in self._extractor.subsets().items(): + segm_list = {} + for item in subset: + masks = [a for a in item.annotations + if a.type == AnnotationType.mask] + + if masks: + compiled_mask = CompiledMask.from_instance_masks(masks, + instance_labels=[self._label_id_mapping(m.label) + for m in masks]) + + self.save_segm(osp.join(subset_dir, + subset_name + CamvidPath.SEGM_DIR, + item.id + CamvidPath.IMAGE_EXT), + compiled_mask.class_mask) + segm_list[item.id] = True + else: + segm_list[item.id] = False + + if self._save_images: + self._save_image(item, osp.join(subset_dir, subset_name, + item.id + CamvidPath.IMAGE_EXT)) + + self.save_segm_lists(subset_name, segm_list) + self.save_label_map() + + def save_segm(self, path, mask, colormap=None): + if self._apply_colormap: + if colormap is None: + colormap = self._categories[AnnotationType.mask].colormap + mask = paint_mask(mask, colormap) + save_image(path, mask, create_dir=True) + + def save_segm_lists(self, subset_name, segm_list): + if not segm_list: + return + + ann_file = osp.join(self._save_dir, subset_name + '.txt') + with open(ann_file, 'w') as f: + for item in segm_list: + if segm_list[item]: + path_mask = '/%s/%s' % (subset_name + CamvidPath.SEGM_DIR, + item + CamvidPath.IMAGE_EXT) + else: + path_mask = '' + f.write('/%s/%s %s\n' % (subset_name, + item + CamvidPath.IMAGE_EXT, path_mask)) + + def save_label_map(self): + path = osp.join(self._save_dir, CamvidPath.LABELMAP_FILE) + labels = self._extractor.categories()[AnnotationType.label]._indices + if len(self._label_map) > len(labels): + self._label_map.pop('background') + write_label_map(path, self._label_map) + + def _load_categories(self, label_map_source): + if label_map_source == LabelmapType.camvid.name: + # use the default Camvid colormap + label_map = CamvidLabelMap + + elif label_map_source == LabelmapType.source.name and \ + AnnotationType.mask not in self._extractor.categories(): + # generate colormap for input labels + labels = self._extractor.categories() \ + .get(AnnotationType.label, LabelCategories()) + label_map = OrderedDict((item.name, None) + for item in labels.items) + + elif label_map_source == LabelmapType.source.name and \ + AnnotationType.mask in self._extractor.categories(): + # use source colormap + labels = self._extractor.categories()[AnnotationType.label] + colors = self._extractor.categories()[AnnotationType.mask] + label_map = OrderedDict() + for idx, item in enumerate(labels.items): + color = colors.colormap.get(idx) + if color is not None: + label_map[item.name] = color + + elif isinstance(label_map_source, dict): + label_map = OrderedDict( + sorted(label_map_source.items(), key=lambda e: e[0])) + + elif isinstance(label_map_source, str) and osp.isfile(label_map_source): + label_map = parse_label_map(label_map_source) + + else: + raise Exception("Wrong labelmap specified, " + "expected one of %s or a file path" % \ + ', '.join(t.name for t in LabelmapType)) + + self._categories = make_camvid_categories(label_map) + self._label_map = label_map + self._label_id_mapping = self._make_label_id_map() + + def _make_label_id_map(self): + source_labels = { + id: label.name for id, label in + enumerate(self._extractor.categories().get( + AnnotationType.label, LabelCategories()).items) + } + target_labels = { + label.name: id for id, label in + enumerate(self._categories[AnnotationType.label].items) + } + id_mapping = { + src_id: target_labels.get(src_label, 0) + for src_id, src_label in source_labels.items() + } + + def map_id(src_id): + return id_mapping.get(src_id, 0) + return map_id diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/cvat_format/format.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/cvat_format/format.py new file mode 100644 index 0000000000000000000000000000000000000000..e5572a89bebaf183256394d49e0f5c90777b2378 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/cvat_format/format.py @@ -0,0 +1,9 @@ + +# Copyright (C) 2019-2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +class CvatPath: + IMAGES_DIR = 'images' + + IMAGE_EXT = '.jpg' diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/datumaro_format/__init__.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/datumaro_format/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/datumaro_format/converter.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/datumaro_format/converter.py new file mode 100644 index 0000000000000000000000000000000000000000..445dd269684b5f92adac14fb715e922949878bc7 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/datumaro_format/converter.py @@ -0,0 +1,258 @@ + +# Copyright (C) 2019-2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +# pylint: disable=no-self-use + +import json +import numpy as np +import os +import os.path as osp + +from datumaro.components.converter import Converter +from datumaro.components.extractor import ( + DEFAULT_SUBSET_NAME, Annotation, _Shape, + Label, Mask, RleMask, Points, Polygon, PolyLine, Bbox, Caption, + LabelCategories, MaskCategories, PointsCategories +) +from datumaro.util import cast +import pycocotools.mask as mask_utils + +from .format import DatumaroPath + + +class _SubsetWriter: + def __init__(self, name, context): + self._name = name + self._context = context + + self._data = { + 'info': {}, + 'categories': {}, + 'items': [], + } + + @property + def categories(self): + return self._data['categories'] + + @property + def items(self): + return self._data['items'] + + def write_item(self, item): + annotations = [] + item_desc = { + 'id': item.id, + 'annotations': annotations, + } + if item.attributes: + item_desc['attr'] = item.attributes + if item.path: + item_desc['path'] = item.path + if item.has_image: + path = item.image.path + if self._context._save_images: + path = self._context._make_image_filename(item) + self._context._save_image(item, path) + + item_desc['image'] = { + 'size': item.image.size, + 'path': path, + } + self.items.append(item_desc) + + for ann in item.annotations: + if isinstance(ann, Label): + converted_ann = self._convert_label_object(ann) + elif isinstance(ann, Mask): + converted_ann = self._convert_mask_object(ann) + elif isinstance(ann, Points): + converted_ann = self._convert_points_object(ann) + elif isinstance(ann, PolyLine): + converted_ann = self._convert_polyline_object(ann) + elif isinstance(ann, Polygon): + converted_ann = self._convert_polygon_object(ann) + elif isinstance(ann, Bbox): + converted_ann = self._convert_bbox_object(ann) + elif isinstance(ann, Caption): + converted_ann = self._convert_caption_object(ann) + else: + raise NotImplementedError() + annotations.append(converted_ann) + + def write_categories(self, categories): + for ann_type, desc in categories.items(): + if isinstance(desc, LabelCategories): + converted_desc = self._convert_label_categories(desc) + elif isinstance(desc, MaskCategories): + converted_desc = self._convert_mask_categories(desc) + elif isinstance(desc, PointsCategories): + converted_desc = self._convert_points_categories(desc) + else: + raise NotImplementedError() + self.categories[ann_type.name] = converted_desc + + def write(self, save_dir): + with open(osp.join(save_dir, '%s.json' % (self._name)), 'w') as f: + json.dump(self._data, f) + + def _convert_annotation(self, obj): + assert isinstance(obj, Annotation) + + ann_json = { + 'id': cast(obj.id, int), + 'type': cast(obj.type.name, str), + 'attributes': obj.attributes, + 'group': cast(obj.group, int, 0), + } + return ann_json + + def _convert_label_object(self, obj): + converted = self._convert_annotation(obj) + + converted.update({ + 'label_id': cast(obj.label, int), + }) + return converted + + def _convert_mask_object(self, obj): + converted = self._convert_annotation(obj) + + if isinstance(obj, RleMask): + rle = obj.rle + else: + rle = mask_utils.encode( + np.require(obj.image, dtype=np.uint8, requirements='F')) + + converted.update({ + 'label_id': cast(obj.label, int), + 'rle': { + # serialize as compressed COCO mask + 'counts': rle['counts'].decode('ascii'), + 'size': list(int(c) for c in rle['size']), + }, + 'z_order': obj.z_order, + }) + return converted + + def _convert_shape_object(self, obj): + assert isinstance(obj, _Shape) + converted = self._convert_annotation(obj) + + converted.update({ + 'label_id': cast(obj.label, int), + 'points': [float(p) for p in obj.points], + 'z_order': obj.z_order, + }) + return converted + + def _convert_polyline_object(self, obj): + return self._convert_shape_object(obj) + + def _convert_polygon_object(self, obj): + return self._convert_shape_object(obj) + + def _convert_bbox_object(self, obj): + converted = self._convert_shape_object(obj) + converted.pop('points', None) + converted['bbox'] = [float(p) for p in obj.get_bbox()] + return converted + + def _convert_points_object(self, obj): + converted = self._convert_shape_object(obj) + + converted.update({ + 'visibility': [int(v.value) for v in obj.visibility], + }) + return converted + + def _convert_caption_object(self, obj): + converted = self._convert_annotation(obj) + + converted.update({ + 'caption': cast(obj.caption, str), + }) + return converted + + def _convert_label_categories(self, obj): + converted = { + 'labels': [], + } + for label in obj.items: + converted['labels'].append({ + 'name': cast(label.name, str), + 'parent': cast(label.parent, str), + }) + return converted + + def _convert_mask_categories(self, obj): + converted = { + 'colormap': [], + } + for label_id, color in obj.colormap.items(): + converted['colormap'].append({ + 'label_id': int(label_id), + 'r': int(color[0]), + 'g': int(color[1]), + 'b': int(color[2]), + }) + return converted + + def _convert_points_categories(self, obj): + converted = { + 'items': [], + } + for label_id, item in obj.items.items(): + converted['items'].append({ + 'label_id': int(label_id), + 'labels': [cast(label, str) for label in item.labels], + 'joints': [list(map(int, j)) for j in item.joints], + }) + return converted + +class DatumaroConverter(Converter): + DEFAULT_IMAGE_EXT = DatumaroPath.IMAGE_EXT + + def apply(self): + os.makedirs(self._save_dir, exist_ok=True) + + images_dir = osp.join(self._save_dir, DatumaroPath.IMAGES_DIR) + os.makedirs(images_dir, exist_ok=True) + self._images_dir = images_dir + + annotations_dir = osp.join(self._save_dir, DatumaroPath.ANNOTATIONS_DIR) + os.makedirs(annotations_dir, exist_ok=True) + self._annotations_dir = annotations_dir + + subsets = {s: _SubsetWriter(s, self) for s in self._extractor.subsets()} + for subset, writer in subsets.items(): + writer.write_categories(self._extractor.categories()) + + for item in self._extractor: + subset = item.subset or DEFAULT_SUBSET_NAME + writer = subsets[subset] + + writer.write_item(item) + + for subset, writer in subsets.items(): + writer.write(annotations_dir) + + def _save_image(self, item, path=None): + super()._save_image(item, + osp.join(self._images_dir, self._make_image_filename(item))) + +class DatumaroProjectConverter(Converter): + @classmethod + def convert(cls, extractor, save_dir, **kwargs): + os.makedirs(save_dir, exist_ok=True) + + from datumaro.components.project import Project + project = Project.generate(save_dir, + config=kwargs.pop('project_config', None)) + + DatumaroConverter.convert(extractor, + save_dir=osp.join( + project.config.project_dir, project.config.dataset_dir), + **kwargs) \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/datumaro_format/format.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/datumaro_format/format.py new file mode 100644 index 0000000000000000000000000000000000000000..501c100b0928839cf7b16a67f824721602a39d81 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/datumaro_format/format.py @@ -0,0 +1,12 @@ + +# Copyright (C) 2019-2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +class DatumaroPath: + IMAGES_DIR = 'images' + ANNOTATIONS_DIR = 'annotations' + MASKS_DIR = 'masks' + + IMAGE_EXT = '.jpg' + MASK_EXT = '.png' diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/image_dir.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/image_dir.py new file mode 100644 index 0000000000000000000000000000000000000000..9be3092944f4a36f6607d057584730c001fb5a79 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/image_dir.py @@ -0,0 +1,52 @@ + +# Copyright (C) 2019-2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +import logging as log +import os +import os.path as osp + +from datumaro.components.extractor import DatasetItem, SourceExtractor, Importer +from datumaro.components.converter import Converter +from datumaro.util.image import Image + + +class ImageDirImporter(Importer): + @classmethod + def find_sources(cls, path): + if not osp.isdir(path): + return [] + return [{ 'url': path, 'format': 'image_dir' }] + +class ImageDirExtractor(SourceExtractor): + def __init__(self, url): + super().__init__() + + assert osp.isdir(url), url + + for dirpath, _, filenames in os.walk(url): + for name in filenames: + path = osp.join(dirpath, name) + image = Image(path=path) + try: + # force loading + image.data # pylint: disable=pointless-statement + except Exception: + continue + + item_id = osp.relpath(osp.splitext(path)[0], url) + self._items.append(DatasetItem(id=item_id, image=image)) + +class ImageDirConverter(Converter): + DEFAULT_IMAGE_EXT = '.jpg' + + def apply(self): + os.makedirs(self._save_dir, exist_ok=True) + + for item in self._extractor: + if item.has_image: + self._save_image(item, + osp.join(self._save_dir, self._make_image_filename(item))) + else: + log.debug("Item '%s' has no image info", item.id) \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/imagenet_format.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/imagenet_format.py new file mode 100644 index 0000000000000000000000000000000000000000..0e0669a9dbf35cd6d4faab6b59fa508b1ffaa3c5 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/imagenet_format.py @@ -0,0 +1,90 @@ + +# Copyright (C) 2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +from glob import glob +import logging as log +import os +import os.path as osp + +from datumaro.components.extractor import (DatasetItem, Label, + LabelCategories, AnnotationType, SourceExtractor, Importer +) +from datumaro.components.converter import Converter + + +class ImagenetPath: + IMAGES_EXT = '.jpg' + IMAGES_DIR_NO_LABEL = 'no_label' + + +class ImagenetExtractor(SourceExtractor): + def __init__(self, path, subset=None): + assert osp.isdir(path), path + super().__init__(subset=subset) + + self._categories = self._load_categories(path) + self._items = list(self._load_items(path).values()) + + def _load_categories(self, path): + label_cat = LabelCategories() + for images_dir in sorted(os.listdir(path)): + if images_dir != ImagenetPath.IMAGES_DIR_NO_LABEL: + label_cat.add(images_dir) + return { AnnotationType.label: label_cat } + + def _load_items(self, path): + items = {} + for image_path in glob(osp.join(path, '*', '*')): + if osp.splitext(image_path)[1] != ImagenetPath.IMAGES_EXT: + continue + label = osp.basename(osp.dirname(image_path)) + image_name = osp.splitext(osp.basename(image_path))[0][len(label) + 1:] + item = items.get(image_name) + if item is None: + item = DatasetItem(id=image_name, subset=self._subset, + image=image_path) + annotations = item.annotations + if label != ImagenetPath.IMAGES_DIR_NO_LABEL: + label = self._categories[AnnotationType.label].find(label)[0] + annotations.append(Label(label=label)) + items[image_name] = item + return items + + +class ImagenetImporter(Importer): + @classmethod + def find_sources(cls, path): + if not osp.isdir(path): + return [] + return [{ 'url': path, 'format': 'imagenet' }] + + +class ImagenetConverter(Converter): + DEFAULT_IMAGE_EXT = ImagenetPath.IMAGES_EXT + + def apply(self): + if 1 < len(self._extractor.subsets()): + log.warning("ImageNet format supports exporting only a single " + "subset, subset information will not be used.") + + subset_dir = self._save_dir + extractor = self._extractor + labels = {} + for item in self._extractor: + image_name = item.id + labels[image_name] = [p.label for p in item.annotations + if p.type == AnnotationType.label] + for label in labels[image_name]: + label_name = extractor.categories()[AnnotationType.label][label].name + self._save_image(item, osp.join(subset_dir, label_name, + '%s_%s%s' % \ + (label_name, image_name, ImagenetPath.IMAGES_EXT) + )) + + if not labels[image_name]: + self._save_image(item, osp.join(subset_dir, + ImagenetPath.IMAGES_DIR_NO_LABEL, + ImagenetPath.IMAGES_DIR_NO_LABEL + '_' + + image_name + ImagenetPath.IMAGES_EXT)) diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/imagenet_txt_format.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/imagenet_txt_format.py new file mode 100644 index 0000000000000000000000000000000000000000..00ee4ae789f77f20c62d277888f05b8fc04bd780 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/imagenet_txt_format.py @@ -0,0 +1,105 @@ + +# Copyright (C) 2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +from glob import glob +import os +import os.path as osp + +from datumaro.components.extractor import (DatasetItem, Label, + LabelCategories, AnnotationType, SourceExtractor, Importer +) +from datumaro.components.converter import Converter + + +class ImagenetTxtPath: + LABELS_FILE = 'synsets.txt' + IMAGE_DIR = 'images' + +class ImagenetTxtExtractor(SourceExtractor): + def __init__(self, path, labels=None, image_dir=None): + assert osp.isfile(path), path + super().__init__(subset=osp.splitext(osp.basename(path))[0]) + + if not image_dir: + image_dir = ImagenetTxtPath.IMAGE_DIR + self.image_dir = osp.join(osp.dirname(path), image_dir) + + if labels is None: + labels = osp.join(osp.dirname(path), ImagenetTxtPath.LABELS_FILE) + labels = self._parse_labels(labels) + else: + assert all(isinstance(e, str) for e in labels) + + self._categories = self._load_categories(labels) + self._items = list(self._load_items(path).values()) + + @staticmethod + def _parse_labels(path): + with open(path, encoding='utf-8') as labels_file: + return [s.strip() for s in labels_file] + + def _load_categories(self, labels): + return { AnnotationType.label: LabelCategories().from_iterable(labels) } + + def _load_items(self, path): + items = {} + with open(path, encoding='utf-8') as f: + for line in f: + item = line.split() + item_id = item[0] + label_ids = [int(id) for id in item[1:]] + anno = [] + for label in label_ids: + assert 0 <= label and \ + label < len(self._categories[AnnotationType.label]), \ + "Image '%s': unknown label id '%s'" % (item_id, label) + anno.append(Label(label)) + items[item_id] = DatasetItem(id=item_id, subset=self._subset, + image=osp.join(self.image_dir, item_id + '.jpg'), + annotations=anno) + return items + + +class ImagenetTxtImporter(Importer): + @classmethod + def find_sources(cls, path): + subset_paths = [p for p in glob(osp.join(path, '*.txt')) + if osp.basename(p) != ImagenetTxtPath.LABELS_FILE] + sources = [] + for subset_path in subset_paths: + sources += cls._find_sources_recursive( + subset_path, '.txt', 'imagenet_txt') + return sources + + +class ImagenetTxtConverter(Converter): + DEFAULT_IMAGE_EXT = '.jpg' + + def apply(self): + subset_dir = self._save_dir + os.makedirs(subset_dir, exist_ok=True) + + extractor = self._extractor + for subset_name, subset in self._extractor.subsets().items(): + annotation_file = osp.join(subset_dir, '%s.txt' % subset_name) + labels = {} + for item in subset: + labels[item.id] = [str(p.label) for p in item.annotations + if p.type == AnnotationType.label] + + if self._save_images and item.has_image: + self._save_image(item, + osp.join(self._save_dir, ImagenetTxtPath.IMAGE_DIR, + self._make_image_filename(item))) + + with open(annotation_file, 'w', encoding='utf-8') as f: + f.writelines(['%s %s\n' % (item_id, ' '.join(labels[item_id])) + for item_id in labels]) + + labels_file = osp.join(subset_dir, ImagenetTxtPath.LABELS_FILE) + with open(labels_file, 'w', encoding='utf-8') as f: + f.write('\n'.join(l.name + for l in extractor.categories()[AnnotationType.label]) + ) diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/labelme_format.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/labelme_format.py new file mode 100644 index 0000000000000000000000000000000000000000..a3a83147b6696d51f8986c16e7022a108381f6bf --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/labelme_format.py @@ -0,0 +1,393 @@ +# Copyright (C) 2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +from collections import defaultdict +from defusedxml import ElementTree +import logging as log +import numpy as np +import os +import os.path as osp + +from datumaro.components.extractor import (SourceExtractor, Importer, + DatasetItem, AnnotationType, Mask, Bbox, Polygon, LabelCategories +) +from datumaro.components.converter import Converter +from datumaro.util.image import Image, save_image +from datumaro.util.mask_tools import load_mask, find_mask_bbox + + +class LabelMePath: + MASKS_DIR = 'Masks' + IMAGE_EXT = '.jpg' + +class LabelMeExtractor(SourceExtractor): + def __init__(self, path, subset_name=None): + assert osp.isdir(path), path + super().__init__(subset=subset_name) + + items, categories = self._parse(path) + self._categories = categories + self._items = items + + def _parse(self, path): + categories = { + AnnotationType.label: LabelCategories(attributes={ + 'occluded', 'username' + }) + } + + items = [] + for p in os.listdir(path): + if not p.endswith('.xml'): + continue + root = ElementTree.parse(osp.join(path, p)) + + item_id = osp.join(root.find('folder').text or '', + root.find('filename').text) + image_path = osp.join(path, item_id) + image_size = None + imagesize_elem = root.find('imagesize') + if imagesize_elem is not None: + width_elem = imagesize_elem.find('ncols') + height_elem = imagesize_elem.find('nrows') + image_size = (int(height_elem.text), int(width_elem.text)) + image = Image(path=image_path, size=image_size) + + annotations = self._parse_annotations(root, path, categories) + + items.append(DatasetItem(id=osp.splitext(item_id)[0], + subset=self._subset, image=image, annotations=annotations)) + return items, categories + + @classmethod + def _parse_annotations(cls, xml_root, dataset_root, categories): + def parse_attributes(attr_str): + parsed = [] + if not attr_str: + return parsed + + for attr in [a.strip() for a in attr_str.split(',') if a.strip()]: + if '=' in attr: + name, value = attr.split('=', maxsplit=1) + if value.lower() in {'true', 'false'}: + value = value.lower() == 'true' + else: + try: + value = float(value) + except ValueError: + pass + parsed.append((name, value)) + else: + parsed.append((attr, True)) + + return parsed + + label_cat = categories[AnnotationType.label] + def get_label_id(label): + if not label: + return None + idx, _ = label_cat.find(label) + if idx is None: + idx = label_cat.add(label) + return idx + + image_annotations = [] + + parsed_annotations = dict() + group_assignments = dict() + root_annotations = set() + for obj_elem in xml_root.iter('object'): + obj_id = int(obj_elem.find('id').text) + + ann_items = [] + + label = get_label_id(obj_elem.find('name').text) + + attributes = [] + attributes_elem = obj_elem.find('attributes') + if attributes_elem is not None and attributes_elem.text: + attributes = parse_attributes(attributes_elem.text) + + occluded = False + occluded_elem = obj_elem.find('occluded') + if occluded_elem is not None and occluded_elem.text: + occluded = (occluded_elem.text == 'yes') + attributes.append(('occluded', occluded)) + + deleted = False + deleted_elem = obj_elem.find('deleted') + if deleted_elem is not None and deleted_elem.text: + deleted = bool(int(deleted_elem.text)) + + user = '' + + poly_elem = obj_elem.find('polygon') + segm_elem = obj_elem.find('segm') + type_elem = obj_elem.find('type') # the only value is 'bounding_box' + if poly_elem is not None: + user_elem = poly_elem.find('username') + if user_elem is not None and user_elem.text: + user = user_elem.text + attributes.append(('username', user)) + + points = [] + for point_elem in poly_elem.iter('pt'): + x = float(point_elem.find('x').text) + y = float(point_elem.find('y').text) + points.append(x) + points.append(y) + + if type_elem is not None and type_elem.text == 'bounding_box': + xmin = min(points[::2]) + xmax = max(points[::2]) + ymin = min(points[1::2]) + ymax = max(points[1::2]) + ann_items.append(Bbox(xmin, ymin, xmax - xmin, ymax - ymin, + label=label, attributes=attributes, id=obj_id, + )) + else: + ann_items.append(Polygon(points, + label=label, attributes=attributes, id=obj_id, + )) + elif segm_elem is not None: + user_elem = segm_elem.find('username') + if user_elem is not None and user_elem.text: + user = user_elem.text + attributes.append(('username', user)) + + mask_path = osp.join(dataset_root, LabelMePath.MASKS_DIR, + segm_elem.find('mask').text) + if not osp.isfile(mask_path): + raise Exception("Can't find mask at '%s'" % mask_path) + mask = load_mask(mask_path) + mask = np.any(mask, axis=2) + ann_items.append(Mask(image=mask, label=label, id=obj_id, + attributes=attributes)) + + if not deleted: + parsed_annotations[obj_id] = ann_items + + # Find parents and children + parts_elem = obj_elem.find('parts') + if parts_elem is not None: + children_ids = [] + hasparts_elem = parts_elem.find('hasparts') + if hasparts_elem is not None and hasparts_elem.text: + children_ids = [int(c) for c in hasparts_elem.text.split(',')] + + parent_ids = [] + ispartof_elem = parts_elem.find('ispartof') + if ispartof_elem is not None and ispartof_elem.text: + parent_ids = [int(c) for c in ispartof_elem.text.split(',')] + + if children_ids and not parent_ids and hasparts_elem.text: + root_annotations.add(obj_id) + group_assignments[obj_id] = [None, children_ids] + + # assign single group to all grouped annotations + current_group_id = 0 + annotations_to_visit = list(root_annotations) + while annotations_to_visit: + ann_id = annotations_to_visit.pop() + ann_assignment = group_assignments[ann_id] + group_id, children_ids = ann_assignment + if group_id: + continue + + if ann_id in root_annotations: + current_group_id += 1 # start a new group + + group_id = current_group_id + ann_assignment[0] = group_id + + # continue with children + annotations_to_visit.extend(children_ids) + + assert current_group_id == len(root_annotations) + + for ann_id, ann_items in parsed_annotations.items(): + group_id = 0 + if ann_id in group_assignments: + ann_assignment = group_assignments[ann_id] + group_id = ann_assignment[0] + + for ann_item in ann_items: + if group_id: + ann_item.group = group_id + + image_annotations.append(ann_item) + + return image_annotations + + +class LabelMeImporter(Importer): + EXTRACTOR = 'label_me' + + @classmethod + def find_sources(cls, path): + subset_paths = [] + if not osp.isdir(path): + return [] + + path = osp.normpath(path) + + def has_annotations(d): + return len([p for p in os.listdir(d) if p.endswith('.xml')]) != 0 + + if has_annotations(path): + subset_paths.append({'url': path, 'format': cls.EXTRACTOR}) + else: + for d in os.listdir(path): + subset = d + d = osp.join(path, d) + if osp.isdir(d) and has_annotations(d): + subset_paths.append({'url': d, 'format': cls.EXTRACTOR, + 'options': {'subset_name': subset} + }) + return subset_paths + + +class LabelMeConverter(Converter): + DEFAULT_IMAGE_EXT = LabelMePath.IMAGE_EXT + + def apply(self): + for subset_name, subset in self._extractor.subsets().items(): + subset_dir = osp.join(self._save_dir, subset_name) + os.makedirs(subset_dir, exist_ok=True) + os.makedirs(osp.join(subset_dir, LabelMePath.MASKS_DIR), + exist_ok=True) + + for index, item in enumerate(subset): + self._save_item(item, subset_dir, index) + + def _get_label(self, label_id): + if label_id is None: + return '' + return self._extractor.categories()[AnnotationType.label][label_id].name + + def _save_item(self, item, subset_dir, index): + from lxml import etree as ET + + log.debug("Converting item '%s'", item.id) + + image_filename = self._make_image_filename(item) + if self._save_images: + if item.has_image and item.image.has_data: + self._save_image(item, osp.join(subset_dir, image_filename)) + else: + log.debug("Item '%s' has no image", item.id) + + root_elem = ET.Element('annotation') + ET.SubElement(root_elem, 'filename').text = osp.basename(image_filename) + ET.SubElement(root_elem, 'folder').text = osp.dirname(image_filename) + + source_elem = ET.SubElement(root_elem, 'source') + ET.SubElement(source_elem, 'sourceImage').text = '' + ET.SubElement(source_elem, 'sourceAnnotation').text = 'Datumaro' + + if item.has_image: + image_elem = ET.SubElement(root_elem, 'imagesize') + image_size = item.image.size + ET.SubElement(image_elem, 'nrows').text = str(image_size[0]) + ET.SubElement(image_elem, 'ncols').text = str(image_size[1]) + + groups = defaultdict(list) + + obj_id = 0 + for ann in item.annotations: + if not ann.type in { AnnotationType.polygon, + AnnotationType.bbox, AnnotationType.mask }: + continue + + obj_elem = ET.SubElement(root_elem, 'object') + ET.SubElement(obj_elem, 'name').text = self._get_label(ann.label) + ET.SubElement(obj_elem, 'deleted').text = '0' + ET.SubElement(obj_elem, 'verified').text = '0' + ET.SubElement(obj_elem, 'occluded').text = \ + 'yes' if ann.attributes.pop('occluded', '') == True else 'no' + ET.SubElement(obj_elem, 'date').text = '' + ET.SubElement(obj_elem, 'id').text = str(obj_id) + + parts_elem = ET.SubElement(obj_elem, 'parts') + if ann.group: + groups[ann.group].append((obj_id, parts_elem)) + else: + ET.SubElement(parts_elem, 'hasparts').text = '' + ET.SubElement(parts_elem, 'ispartof').text = '' + + if ann.type == AnnotationType.bbox: + ET.SubElement(obj_elem, 'type').text = 'bounding_box' + + poly_elem = ET.SubElement(obj_elem, 'polygon') + x0, y0, x1, y1 = ann.points + points = [ (x0, y0), (x1, y0), (x1, y1), (x0, y1) ] + for x, y in points: + point_elem = ET.SubElement(poly_elem, 'pt') + ET.SubElement(point_elem, 'x').text = '%.2f' % x + ET.SubElement(point_elem, 'y').text = '%.2f' % y + + ET.SubElement(poly_elem, 'username').text = \ + str(ann.attributes.pop('username', '')) + elif ann.type == AnnotationType.polygon: + poly_elem = ET.SubElement(obj_elem, 'polygon') + for x, y in zip(ann.points[::2], ann.points[1::2]): + point_elem = ET.SubElement(poly_elem, 'pt') + ET.SubElement(point_elem, 'x').text = '%.2f' % x + ET.SubElement(point_elem, 'y').text = '%.2f' % y + + ET.SubElement(poly_elem, 'username').text = \ + str(ann.attributes.pop('username', '')) + elif ann.type == AnnotationType.mask: + mask_filename = '%s_mask_%s.png' % \ + (item.id.replace('/', '_'), obj_id) + save_image(osp.join(subset_dir, LabelMePath.MASKS_DIR, + mask_filename), + self._paint_mask(ann.image)) + + segm_elem = ET.SubElement(obj_elem, 'segm') + ET.SubElement(segm_elem, 'mask').text = mask_filename + + bbox = find_mask_bbox(ann.image) + box_elem = ET.SubElement(segm_elem, 'box') + ET.SubElement(box_elem, 'xmin').text = '%.2f' % bbox[0] + ET.SubElement(box_elem, 'ymin').text = '%.2f' % bbox[1] + ET.SubElement(box_elem, 'xmax').text = \ + '%.2f' % (bbox[0] + bbox[2]) + ET.SubElement(box_elem, 'ymax').text = \ + '%.2f' % (bbox[1] + bbox[3]) + + ET.SubElement(segm_elem, 'username').text = \ + str(ann.attributes.pop('username', '')) + else: + raise NotImplementedError("Unknown shape type '%s'" % ann.type) + + attrs = [] + for k, v in ann.attributes.items(): + attrs.append('%s=%s' % (k, v)) + ET.SubElement(obj_elem, 'attributes').text = ', '.join(attrs) + + obj_id += 1 + + for _, group in groups.items(): + leader_id, leader_parts_elem = group[0] + leader_parts = [str(o_id) for o_id, _ in group[1:]] + ET.SubElement(leader_parts_elem, 'hasparts').text = \ + ','.join(leader_parts) + ET.SubElement(leader_parts_elem, 'ispartof').text = '' + + for obj_id, parts_elem in group[1:]: + ET.SubElement(parts_elem, 'hasparts').text = '' + ET.SubElement(parts_elem, 'ispartof').text = str(leader_id) + + xml_path = osp.join(subset_dir, 'item_%09d.xml' % index) + with open(xml_path, 'w', encoding='utf-8') as f: + xml_data = ET.tostring(root_elem, encoding='unicode', + pretty_print=True) + f.write(xml_data) + + @staticmethod + def _paint_mask(mask): + # TODO: check if mask colors are random + return np.array([[0, 0, 0, 0], [255, 203, 0, 153]], + dtype=np.uint8)[mask.astype(np.uint8)] \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/mot_format.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/mot_format.py new file mode 100644 index 0000000000000000000000000000000000000000..ba8c33a5ac0f989add05f365cbade58a23b7e6ba --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/mot_format.py @@ -0,0 +1,271 @@ +# Copyright (C) 2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +# The Multiple Object Tracking Benchmark challenge format support +# Format description: https://arxiv.org/pdf/1906.04567.pdf +# Another description: https://motchallenge.net/instructions + +from collections import OrderedDict +import csv +from enum import Enum +import logging as log +import os +import os.path as osp + +from datumaro.components.extractor import (SourceExtractor, Importer, + DatasetItem, AnnotationType, Bbox, LabelCategories +) +from datumaro.components.converter import Converter +from datumaro.util import cast +from datumaro.util.image import Image + + +MotLabel = Enum('MotLabel', [ + ('pedestrian', 1), + ('person on vehicle', 2), + ('car', 3), + ('bicycle', 4), + ('motorbike', 5), + ('non motorized vehicle', 6), + ('static person', 7), + ('distractor', 8), + ('occluder', 9), + ('occluder on the ground', 10), + ('occluder full', 11), + ('reflection', 12), +]) + +class MotPath: + IMAGE_DIR = 'img1' + SEQINFO_FILE = 'seqinfo.ini' + LABELS_FILE = 'labels.txt' + GT_FILENAME = 'gt.txt' + DET_FILENAME = 'det.txt' + + IMAGE_EXT = '.jpg' + + FIELDS = [ + 'frame_id', + 'track_id', + 'x', + 'y', + 'w', + 'h', + 'confidence', # or 'not ignored' flag for GT anns + 'class_id', + 'visibility' + ] + + +class MotSeqExtractor(SourceExtractor): + def __init__(self, path, labels=None, occlusion_threshold=0, is_gt=None): + super().__init__() + + assert osp.isfile(path) + seq_root = osp.dirname(osp.dirname(path)) + self._image_dir = '' + if osp.isdir(osp.join(seq_root, MotPath.IMAGE_DIR)): + self._image_dir = osp.join(seq_root, MotPath.IMAGE_DIR) + + seq_info = osp.join(seq_root, MotPath.SEQINFO_FILE) + if osp.isfile(seq_info): + seq_info = self._parse_seq_info(seq_info) + self._image_dir = osp.join(seq_root, seq_info['imdir']) + else: + seq_info = None + self._seq_info = seq_info + + self._occlusion_threshold = float(occlusion_threshold) + + assert is_gt in {None, True, False} + if is_gt is None: + if osp.basename(path) == MotPath.DET_FILENAME: + is_gt = False + else: + is_gt = True + self._is_gt = is_gt + + if labels is None: + labels = osp.join(osp.dirname(path), MotPath.LABELS_FILE) + if not osp.isfile(labels): + labels = [lbl.name for lbl in MotLabel] + if isinstance(labels, str): + labels = self._parse_labels(labels) + elif isinstance(labels, list): + assert all(isinstance(lbl, str) for lbl in labels), labels + else: + raise TypeError("Unexpected type of 'labels' argument: %s" % labels) + self._categories = self._load_categories(labels) + self._items = list(self._load_items(path).values()) + + @staticmethod + def _parse_labels(path): + with open(path, encoding='utf-8') as labels_file: + return [s.strip() for s in labels_file] + + def _load_categories(self, labels): + attributes = ['track_id'] + if self._is_gt: + attributes += ['occluded', 'visibility', 'ignored'] + else: + attributes += ['score'] + label_cat = LabelCategories(attributes=attributes) + for label in labels: + label_cat.add(label) + + return { AnnotationType.label: label_cat } + + def _load_items(self, path): + labels_count = len(self._categories[AnnotationType.label].items) + items = OrderedDict() + + if self._seq_info: + for frame_id in range(self._seq_info['seqlength']): + items[frame_id] = DatasetItem( + id=frame_id, + subset=self._subset, + image=Image( + path=osp.join(self._image_dir, + '%06d%s' % (frame_id, self._seq_info['imext'])), + size=(self._seq_info['imheight'], self._seq_info['imwidth']) + ) + ) + elif osp.isdir(self._image_dir): + for p in os.listdir(self._image_dir): + if p.endswith(MotPath.IMAGE_EXT): + frame_id = int(osp.splitext(p)[0]) + items[frame_id] = DatasetItem( + id=frame_id, + subset=self._subset, + image=osp.join(self._image_dir, p), + ) + + with open(path, newline='', encoding='utf-8') as csv_file: + # NOTE: Different MOT files have different count of fields + # (7, 9 or 10). This is handled by reader: + # - all extra fields go to a separate field + # - all unmet fields have None values + for row in csv.DictReader(csv_file, fieldnames=MotPath.FIELDS): + frame_id = int(row['frame_id']) + item = items.get(frame_id) + if item is None: + item = DatasetItem(id=frame_id, subset=self._subset) + annotations = item.annotations + + x, y = float(row['x']), float(row['y']) + w, h = float(row['w']), float(row['h']) + label_id = row.get('class_id') + if label_id and label_id != '-1': + label_id = int(label_id) - 1 + assert label_id < labels_count, label_id + else: + label_id = None + + attributes = {} + + # Annotations for detection task are not related to any track + track_id = int(row['track_id']) + if 0 < track_id: + attributes['track_id'] = track_id + + confidence = cast(row.get('confidence'), float, 1) + visibility = cast(row.get('visibility'), float, 1) + if self._is_gt: + attributes['visibility'] = visibility + attributes['occluded'] = \ + visibility <= self._occlusion_threshold + attributes['ignored'] = confidence == 0 + else: + attributes['score'] = float(confidence) + + annotations.append(Bbox(x, y, w, h, label=label_id, + attributes=attributes)) + + items[frame_id] = item + return items + + @classmethod + def _parse_seq_info(cls, path): + fields = {} + with open(path, encoding='utf-8') as f: + for line in f: + entry = line.lower().strip().split('=', maxsplit=1) + if len(entry) == 2: + fields[entry[0]] = entry[1] + cls._check_seq_info(fields) + for k in { 'framerate', 'seqlength', 'imwidth', 'imheight' }: + fields[k] = int(fields[k]) + return fields + + @staticmethod + def _check_seq_info(seq_info): + assert set(seq_info) == {'name', 'imdir', 'framerate', 'seqlength', 'imwidth', 'imheight', 'imext'}, seq_info + +class MotSeqImporter(Importer): + @classmethod + def find_sources(cls, path): + return cls._find_sources_recursive(path, '.txt', 'mot_seq', + filename=osp.join('gt', osp.splitext(MotPath.GT_FILENAME)[0])) + +class MotSeqGtConverter(Converter): + DEFAULT_IMAGE_EXT = MotPath.IMAGE_EXT + + def apply(self): + extractor = self._extractor + + images_dir = osp.join(self._save_dir, MotPath.IMAGE_DIR) + os.makedirs(images_dir, exist_ok=True) + self._images_dir = images_dir + + anno_dir = osp.join(self._save_dir, 'gt') + os.makedirs(anno_dir, exist_ok=True) + anno_file = osp.join(anno_dir, MotPath.GT_FILENAME) + with open(anno_file, 'w', encoding="utf-8") as csv_file: + writer = csv.DictWriter(csv_file, fieldnames=MotPath.FIELDS) + + track_id_mapping = {-1: -1} + for idx, item in enumerate(extractor): + log.debug("Converting item '%s'", item.id) + + frame_id = cast(item.id, int, 1 + idx) + + for anno in item.annotations: + if anno.type != AnnotationType.bbox: + continue + + track_id = int(anno.attributes.get('track_id', -1)) + if track_id not in track_id_mapping: + track_id_mapping[track_id] = len(track_id_mapping) + track_id = track_id_mapping[track_id] + + writer.writerow({ + 'frame_id': frame_id, + 'track_id': track_id, + 'x': anno.x, + 'y': anno.y, + 'w': anno.w, + 'h': anno.h, + 'confidence': int(anno.attributes.get('ignored') != True), + 'class_id': 1 + cast(anno.label, int, -2), + 'visibility': float( + anno.attributes.get('visibility', + 1 - float( + anno.attributes.get('occluded', False) + ) + ) + ) + }) + + if self._save_images: + if item.has_image and item.image.has_data: + self._save_image(item, osp.join(self._images_dir, + '%06d%s' % (frame_id, self._find_image_ext(item)))) + else: + log.debug("Item '%s' has no image", item.id) + + labels_file = osp.join(anno_dir, MotPath.LABELS_FILE) + with open(labels_file, 'w', encoding='utf-8') as f: + f.write('\n'.join(l.name + for l in extractor.categories()[AnnotationType.label]) + ) diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/mots_format.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/mots_format.py new file mode 100644 index 0000000000000000000000000000000000000000..cd1fa3f60969dee75f54e7db6db89f382254b9e4 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/mots_format.py @@ -0,0 +1,145 @@ +# Copyright (C) 2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +# Implements MOTS format https://www.vision.rwth-aachen.de/page/mots + +from enum import Enum +from glob import glob +import logging as log +import numpy as np +import os +import os.path as osp + +from datumaro.components.extractor import (SourceExtractor, Importer, + DatasetItem, AnnotationType, Mask, LabelCategories +) +from datumaro.components.converter import Converter +from datumaro.util.image import load_image, save_image +from datumaro.util.mask_tools import merge_masks + + +class MotsPath: + MASKS_DIR = 'instances' + IMAGE_DIR = 'images' + IMAGE_EXT = '.jpg' + LABELS_FILE = 'labels.txt' + MAX_INSTANCES = 1000 + +MotsLabels = Enum('MotsLabels', [ + ('background', 0), + ('car', 1), + ('pedestrian', 2), + ('ignored', 10), +]) + +class MotsPngExtractor(SourceExtractor): + @staticmethod + def detect_dataset(path): + if osp.isdir(osp.join(path, MotsPath.MASKS_DIR)): + return [{'url': path, 'format': 'mots_png'}] + return [] + + def __init__(self, path, subset_name=None): + assert osp.isdir(path), path + super().__init__(subset=subset_name) + self._images_dir = osp.join(path, 'images') + self._anno_dir = osp.join(path, MotsPath.MASKS_DIR) + self._categories = self._parse_categories( + osp.join(self._anno_dir, MotsPath.LABELS_FILE)) + self._items = self._parse_items() + + def _parse_categories(self, path): + if osp.isfile(path): + with open(path) as f: + labels = [l.strip() for l in f] + else: + labels = [l.name for l in MotsLabels] + return { AnnotationType.label: LabelCategories.from_iterable(labels) } + + def _parse_items(self): + items = [] + for p in sorted(p for p in + glob(self._anno_dir + '/**/*.png', recursive=True)): + item_id = osp.splitext(osp.relpath(p, self._anno_dir))[0] + items.append(DatasetItem(id=item_id, subset=self._subset, + image=osp.join(self._images_dir, item_id + MotsPath.IMAGE_EXT), + annotations=self._parse_annotations(p))) + return items + + @staticmethod + def _lazy_extract_mask(mask, v): + return lambda: mask == v + + def _parse_annotations(self, path): + combined_mask = load_image(path, dtype=np.uint16) + masks = [] + for obj_id in np.unique(combined_mask): + class_id, instance_id = divmod(obj_id, MotsPath.MAX_INSTANCES) + z_order = 0 + if class_id == 0: + continue # background + if class_id == 10 and \ + len(self._categories[AnnotationType.label]) < 10: + z_order = 1 + class_id = self._categories[AnnotationType.label].find( + MotsLabels.ignored.name)[0] + else: + class_id -= 1 + masks.append(Mask(self._lazy_extract_mask(combined_mask, obj_id), + label=class_id, z_order=z_order, + attributes={'track_id': instance_id})) + return masks + + +class MotsImporter(Importer): + @classmethod + def find_sources(cls, path): + subsets = MotsPngExtractor.detect_dataset(path) + if not subsets: + for p in os.listdir(path): + detected = MotsPngExtractor.detect_dataset(osp.join(path, p)) + for s in detected: + s.setdefault('options', {})['subset_name'] = p + subsets.extend(detected) + return subsets + + +class MotsPngConverter(Converter): + DEFAULT_IMAGE_EXT = MotsPath.IMAGE_EXT + + def apply(self): + for subset_name, subset in self._extractor.subsets().items(): + subset_dir = osp.join(self._save_dir, subset_name) + images_dir = osp.join(subset_dir, MotsPath.IMAGE_DIR) + anno_dir = osp.join(subset_dir, MotsPath.MASKS_DIR) + os.makedirs(anno_dir, exist_ok=True) + + for item in subset: + log.debug("Converting item '%s'", item.id) + + if self._save_images: + if item.has_image and item.image.has_data: + self._save_image(item, + osp.join(images_dir, self._make_image_filename(item))) + else: + log.debug("Item '%s' has no image", item.id) + + self._save_annotations(item, anno_dir) + + with open(osp.join(anno_dir, MotsPath.LABELS_FILE), 'w') as f: + f.write('\n'.join(l.name + for l in subset.categories()[AnnotationType.label].items)) + + def _save_annotations(self, item, anno_dir): + masks = [a for a in item.annotations if a.type == AnnotationType.mask] + if not masks: + return + + instance_ids = [int(a.attributes['track_id']) for a in masks] + masks = sorted(zip(masks, instance_ids), key=lambda e: e[0].z_order) + mask = merge_masks([ + m.image * (MotsPath.MAX_INSTANCES * (1 + m.label) + id) + for m, id in masks]) + save_image(osp.join(anno_dir, item.id + '.png'), mask, + create_dir=True, dtype=np.uint16) diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/openvino_launcher.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/openvino_launcher.py new file mode 100644 index 0000000000000000000000000000000000000000..abdaa0fcae2c2fa777932b86377ec890453736c0 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/openvino_launcher.py @@ -0,0 +1,188 @@ + +# Copyright (C) 2019-2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +# pylint: disable=exec-used + +import cv2 +import logging as log +import numpy as np +import os.path as osp +import shutil + +from openvino.inference_engine import IECore + +from datumaro.components.cli_plugin import CliPlugin +from datumaro.components.launcher import Launcher + + +class OpenVinoImporter(CliPlugin): + @classmethod + def build_cmdline_parser(cls, **kwargs): + parser = super().build_cmdline_parser(**kwargs) + parser.add_argument('-d', '--description', required=True, + help="Path to the model description file (.xml)") + parser.add_argument('-w', '--weights', required=True, + help="Path to the model weights file (.bin)") + parser.add_argument('-i', '--interpreter', required=True, + help="Path to the network output interprter script (.py)") + parser.add_argument('--device', default='CPU', + help="Target device (default: %(default)s)") + return parser + + @staticmethod + def copy_model(model_dir, model): + shutil.copy(model['description'], + osp.join(model_dir, osp.basename(model['description']))) + model['description'] = osp.basename(model['description']) + + shutil.copy(model['weights'], + osp.join(model_dir, osp.basename(model['weights']))) + model['weights'] = osp.basename(model['weights']) + + shutil.copy(model['interpreter'], + osp.join(model_dir, osp.basename(model['interpreter']))) + model['interpreter'] = osp.basename(model['interpreter']) + + +class InterpreterScript: + def __init__(self, path): + with open(path, 'r') as f: + script = f.read() + + context = {} + exec(script, context, context) + + process_outputs = context.get('process_outputs') + if not callable(process_outputs): + raise Exception("Can't find 'process_outputs' function in " + "the interpreter script") + self.__dict__['process_outputs'] = process_outputs + + get_categories = context.get('get_categories') + assert get_categories is None or callable(get_categories) + if get_categories: + self.__dict__['get_categories'] = get_categories + + @staticmethod + def get_categories(): + return None + + @staticmethod + def process_outputs(inputs, outputs): + raise NotImplementedError( + "Function should be implemented in the interpreter script") + + +class OpenVinoLauncher(Launcher): + cli_plugin = OpenVinoImporter + + def __init__(self, description, weights, interpreter, + plugins_path=None, device=None, model_dir=None): + model_dir = model_dir or '' + if not osp.isfile(description): + description = osp.join(model_dir, description) + if not osp.isfile(description): + raise Exception('Failed to open model description file "%s"' % \ + (description)) + + if not osp.isfile(weights): + weights = osp.join(model_dir, weights) + if not osp.isfile(weights): + raise Exception('Failed to open model weights file "%s"' % \ + (weights)) + + if not osp.isfile(interpreter): + interpreter = osp.join(model_dir, interpreter) + if not osp.isfile(interpreter): + raise Exception('Failed to open model interpreter script file "%s"' % \ + (interpreter)) + + self._interpreter = InterpreterScript(interpreter) + + self._device = device or 'CPU' + + self._ie = IECore() + if hasattr(self._ie, 'read_network'): + self._network = self._ie.read_network(description, weights) + else: # backward compatibility + from openvino.inference_engine import IENetwork + self._network = IENetwork.from_ir(description, weights) + self._check_model_support(self._network, self._device) + self._load_executable_net() + + def _check_model_support(self, net, device): + supported_layers = set(self._ie.query_network(net, device)) + not_supported_layers = set(net.layers) - supported_layers + if len(not_supported_layers) != 0: + log.error("The following layers are not supported " \ + "by the plugin for device '%s': %s." % \ + (device, ', '.join(not_supported_layers))) + raise NotImplementedError( + "Some layers are not supported on the device") + + def _load_executable_net(self, batch_size=1): + network = self._network + + iter_inputs = iter(network.inputs) + self._input_blob_name = next(iter_inputs) + self._output_blob_name = next(iter(network.outputs)) + + # NOTE: handling for the inclusion of `image_info` in OpenVino2019 + self._require_image_info = 'image_info' in network.inputs + if self._input_blob_name == 'image_info': + self._input_blob_name = next(iter_inputs) + + input_type = network.inputs[self._input_blob_name] + self._input_layout = input_type if isinstance(input_type, list) else input_type.shape + + self._input_layout[0] = batch_size + network.reshape({self._input_blob_name: self._input_layout}) + self._batch_size = batch_size + + self._net = self._ie.load_network(network=network, num_requests=1, + device_name=self._device) + + def infer(self, inputs): + assert len(inputs.shape) == 4, \ + "Expected an input image in (N, H, W, C) format, got %s" % \ + (inputs.shape) + assert inputs.shape[3] == 3, "Expected BGR input, got %s" % inputs.shape + + n, c, h, w = self._input_layout + if inputs.shape[1:3] != (h, w): + resized_inputs = np.empty((n, h, w, c), dtype=inputs.dtype) + for inp, resized_input in zip(inputs, resized_inputs): + cv2.resize(inp, (w, h), resized_input) + inputs = resized_inputs + inputs = inputs.transpose((0, 3, 1, 2)) # NHWC to NCHW + inputs = {self._input_blob_name: inputs} + if self._require_image_info: + info = np.zeros([1, 3]) + info[0, 0] = h + info[0, 1] = w + info[0, 2] = 1.0 # scale + inputs['image_info'] = info + + results = self._net.infer(inputs) + if len(results) == 1: + return results[self._output_blob_name] + else: + return results + + def launch(self, inputs): + batch_size = len(inputs) + if self._batch_size < batch_size: + self._load_executable_net(batch_size) + + outputs = self.infer(inputs) + results = self.process_outputs(inputs, outputs) + return results + + def categories(self): + return self._interpreter.get_categories() + + def process_outputs(self, inputs, outputs): + return self._interpreter.process_outputs(inputs, outputs) + diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/splitter.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/splitter.py new file mode 100644 index 0000000000000000000000000000000000000000..704e8c0966eb62ed46db0380e4089c69a8573c56 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/splitter.py @@ -0,0 +1,522 @@ +# Copyright (C) 2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +import logging as log +import numpy as np + +from datumaro.components.extractor import (Transform, AnnotationType, + DEFAULT_SUBSET_NAME) + +NEAR_ZERO = 1e-7 + + +class _TaskSpecificSplit(Transform): + def __init__(self, dataset, splits, seed): + super().__init__(dataset) + + snames, sratio = self._validate_splits(splits) + + self._snames = snames + self._sratio = sratio + + self._seed = seed + + self._subsets = {"train", "val", "test"} # output subset names + self._parts = [] + self._length = "parent" + + self._initialized = False + + def _set_parts(self, by_splits): + self._parts = [] + for subset in self._subsets: + self._parts.append((set(by_splits[subset]), subset)) + + @staticmethod + def _get_uniq_annotations(dataset): + annotations = [] + for item in dataset: + labels = [a for a in item.annotations + if a.type == AnnotationType.label] + if len(labels) != 1: + raise Exception("Item '%s' contains %s labels, " + "but exactly one is expected" % (item.id, len(labels))) + annotations.append(labels[0]) + return annotations + + @staticmethod + def _validate_splits(splits, valid=None): + snames = [] + ratios = [] + if valid is None: + valid = ["train", "val", "test"] + for subset, ratio in splits: + assert subset in valid, \ + "Subset name must be one of %s, but got %s" % (valid, subset) + assert 0.0 <= ratio and ratio <= 1.0, \ + "Ratio is expected to be in the range " \ + "[0, 1], but got %s for %s" % (ratio, subset) + snames.append(subset) + ratios.append(float(ratio)) + ratios = np.array(ratios) + + total_ratio = np.sum(ratios) + if not abs(total_ratio - 1.0) <= NEAR_ZERO: + raise Exception( + "Sum of ratios is expected to be 1, got %s, which is %s" + % (splits, total_ratio) + ) + return snames, ratios + + @staticmethod + def _get_required(ratio): + min_value = np.max(ratio) + for i in ratio: + if NEAR_ZERO < i and i < min_value: + min_value = i + required = int(np.around(1.0) / min_value) + return required + + @staticmethod + def _get_sections(dataset_size, ratio): + n_splits = [int(np.around(dataset_size * r)) for r in ratio[:-1]] + n_splits.append(dataset_size - np.sum(n_splits)) + + # if there are splits with zero samples even if ratio is not 0, + # borrow one from the split who has one or more. + for ii, num_split in enumerate(n_splits): + if num_split == 0 and NEAR_ZERO < ratio[ii]: + midx = np.argmax(n_splits) + if n_splits[midx] > 0: + n_splits[ii] += 1 + n_splits[midx] -= 1 + sections = np.add.accumulate(n_splits[:-1]) + return sections + + @staticmethod + def _group_by_attr(items): + """ + Args: + items: list of (idx, ann). ann is the annotation from Label object. + Returns: + by_attributes: dict of { combination-of-attrs : list of index } + """ + # group by attributes + by_attributes = dict() + for idx, ann in items: + attributes = tuple(sorted(ann.attributes.items())) + if attributes not in by_attributes: + by_attributes[attributes] = [] + by_attributes[attributes].append(idx) + return by_attributes + + def _split_by_attr(self, datasets, snames, ratio, out_splits, + dataset_key="label"): + required = self._get_required(ratio) + for key, items in datasets.items(): + np.random.shuffle(items) + by_attributes = self._group_by_attr(items) + for attributes, indice in by_attributes.items(): + gname = "%s: %s, attrs: %s" % (dataset_key, key, attributes) + splits = self._split_indice(indice, gname, ratio, required) + for subset, split in zip(snames, splits): + if 0 < len(split): + out_splits[subset].extend(split) + + def _split_indice(self, indice, group_name, ratio, required): + filtered_size = len(indice) + if filtered_size < required: + log.warning("Not enough samples for a group, '%s'" % group_name) + sections = self._get_sections(filtered_size, ratio) + splits = np.array_split(indice, sections) + return splits + + def _find_split(self, index): + for subset_indices, subset in self._parts: + if index in subset_indices: + return subset + return DEFAULT_SUBSET_NAME # all the possible remainder --> default + + def _split_dataset(self): + raise NotImplementedError() + + def __iter__(self): + # lazy splitting + if self._initialized is False: + self._split_dataset() + self._initialized = True + for i, item in enumerate(self._extractor): + yield self.wrap_item(item, subset=self._find_split(i)) + + +class ClassificationSplit(_TaskSpecificSplit): + """ + Splits dataset into train/val/test set in class-wise manner. |n + |n + Notes:|n + - Single label is expected for each DatasetItem.|n + - If there are not enough images in some class or attributes group, + the split ratio can't be guaranteed.|n + """ + + def __init__(self, dataset, splits, seed=None): + """ + Parameters + ---------- + dataset : Dataset + splits : list + A list of (subset(str), ratio(float)) + Subset is expected to be one of ["train", "val", "test"]. + The sum of ratios is expected to be 1. + seed : int, optional + """ + super().__init__(dataset, splits, seed) + + def _split_dataset(self): + np.random.seed(self._seed) + + # support only single label for a DatasetItem + # 1. group by label + by_labels = dict() + annotations = self._get_uniq_annotations(self._extractor) + for idx, ann in enumerate(annotations): + label = getattr(ann, 'label', None) + if label not in by_labels: + by_labels[label] = [] + by_labels[label].append((idx, ann)) + + by_splits = dict() + for subset in self._subsets: + by_splits[subset] = [] + + # 2. group by attributes + self._split_by_attr(by_labels, self._snames, self._sratio, by_splits) + self._set_parts(by_splits) + + +class MatchingReIDSplit(_TaskSpecificSplit): + """ + Splits dataset for matching, especially re-id task.|n + First, splits dataset into 'train+val' and 'test' sets by person id.|n + Note that this splitting is not by DatasetItem. |n + Then, tags 'test' into 'gallery'/'query' in class-wise random manner.|n + Then, splits 'train+val' into 'train'/'val' sets in the same way.|n + Therefore, the final subsets would be 'train', 'val', 'test'. |n + And 'gallery', 'query' are tagged using anntoation group.|n + You can get the 'gallery' and 'query' sets using 'get_subset_by_group'.|n + Notes:|n + - Single label is expected for each DatasetItem.|n + - Each label is expected to have attribute representing the person id. |n + """ + + _group_map = dict() + + def __init__(self, dataset, splits, test_splits, pid_name="PID", seed=None): + """ + Parameters + ---------- + dataset : Dataset + splits : list + A list of (subset(str), ratio(float)) + Subset is expected to be one of ["train", "val", "test"]. + The sum of ratios is expected to be 1. + test_splits : list + A list of (subset(str), ratio(float)) + Subset is expected to be one of ["gallery", "query"]. + The sum of ratios is expected to be 1. + pid_name: str + attribute name representing the person id. (default: PID) + seed : int, optional + """ + super().__init__(dataset, splits, seed) + + self._test_splits = test_splits + self._pid_name = pid_name + + def _split_dataset(self): + np.random.seed(self._seed) + + id_snames, id_ratio = self._snames, self._sratio + + pid_name = self._pid_name + dataset = self._extractor + + groups = set() + + # group by PID(pid_name) + by_pid = dict() + annotations = self._get_uniq_annotations(dataset) + for idx, ann in enumerate(annotations): + attributes = dict(ann.attributes.items()) + assert pid_name in attributes, \ + "'%s' is expected as an attribute name" % pid_name + person_id = attributes[pid_name] + if person_id not in by_pid: + by_pid[person_id] = [] + by_pid[person_id].append((idx, ann)) + groups.add(ann.group) + + max_group_id = max(groups) + self._group_map["gallery"] = max_group_id + 1 + self._group_map["query"] = max_group_id + 2 + + required = self._get_required(id_ratio) + if len(by_pid) < required: + log.warning("There's not enough IDs, which is %s, " + "so train/val/test ratio can't be guaranteed." + % len(by_pid) + ) + + # 1. split dataset into trval and test + # IDs in test set should not exist in train/val set. + test = id_ratio[id_snames.index("test")] if "test" in id_snames else 0 + if NEAR_ZERO < test: # has testset + split_ratio = np.array([test, 1.0 - test]) + person_ids = list(by_pid.keys()) + np.random.shuffle(person_ids) + sections = self._get_sections(len(person_ids), split_ratio) + splits = np.array_split(person_ids, sections) + testset = {pid: by_pid[pid] for pid in splits[0]} + trval = {pid: by_pid[pid] for pid in splits[1]} + + # follow the ratio of datasetitems as possible. + # naive heuristic: exchange the best item one by one. + expected_count = int(len(self._extractor) * split_ratio[0]) + testset_total = int(np.sum([len(v) for v in testset.values()])) + self._rebalancing(testset, trval, expected_count, testset_total) + else: + testset = dict() + trval = by_pid + + by_splits = dict() + for subset in self._subsets: + by_splits[subset] = [] + + # 2. split 'test' into 'gallery' and 'query' + if 0 < len(testset): + for person_id, items in testset.items(): + indice = [idx for idx, _ in items] + by_splits["test"].extend(indice) + + valid = ["gallery", "query"] + test_splits = self._test_splits + test_snames, test_ratio = self._validate_splits(test_splits, valid) + by_groups = {s: [] for s in test_snames} + self._split_by_attr(testset, test_snames, test_ratio, by_groups, + dataset_key=pid_name) + + # tag using group + for idx, item in enumerate(self._extractor): + for subset, split in by_groups.items(): + if idx in split: + group_id = self._group_map[subset] + item.annotations[0].group = group_id + break + + # 3. split 'trval' into 'train' and 'val' + trval_snames = ["train", "val"] + trval_ratio = [] + for subset in trval_snames: + if subset in id_snames: + val = id_ratio[id_snames.index(subset)] + else: + val = 0.0 + trval_ratio.append(val) + trval_ratio = np.array(trval_ratio) + total_ratio = np.sum(trval_ratio) + if total_ratio < NEAR_ZERO: + trval_splits = list(zip(["train", "val"], trval_ratio)) + log.warning("Sum of ratios is expected to be positive, " + "got %s, which is %s" + % (trval_splits, total_ratio) + ) + else: + trval_ratio /= total_ratio # normalize + self._split_by_attr(trval, trval_snames, trval_ratio, by_splits, + dataset_key=pid_name) + + self._set_parts(by_splits) + + @staticmethod + def _rebalancing(test, trval, expected_count, testset_total): + diffs = dict() + for id_test, items_test in test.items(): + count_test = len(items_test) + for id_trval, items_trval in trval.items(): + count_trval = len(items_trval) + diff = count_trval - count_test + if diff == 0: + continue # exchange has no effect + if diff not in diffs: + diffs[diff] = [(id_test, id_trval)] + else: + diffs[diff].append((id_test, id_trval)) + exchanges = [] + while True: + target_diff = expected_count - testset_total + # find nearest diff. + keys = np.array(list(diffs.keys())) + idx = (np.abs(keys - target_diff)).argmin() + nearest = keys[idx] + if abs(target_diff) <= abs(target_diff - nearest): + break + choice = np.random.choice(range(len(diffs[nearest]))) + pid_test, pid_trval = diffs[nearest][choice] + testset_total += nearest + new_diffs = dict() + for diff, person_ids in diffs.items(): + new_list = [] + for id1, id2 in person_ids: + if id1 == pid_test or id2 == pid_trval: + continue + new_list.append((id1, id2)) + if 0 < len(new_list): + new_diffs[diff] = new_list + diffs = new_diffs + exchanges.append((pid_test, pid_trval)) + # exchange + for pid_test, pid_trval in exchanges: + test[pid_trval] = trval.pop(pid_trval) + trval[pid_test] = test.pop(pid_test) + + def get_subset_by_group(self, group: str): + available = list(self._group_map.keys()) + assert group in self._group_map, \ + "Unknown group '%s', available groups: %s" \ + % (group, available) + group_id = self._group_map[group] + return self.select(lambda item: item.annotations[0].group == group_id) + + +class DetectionSplit(_TaskSpecificSplit): + """ + Splits dataset into train/val/test set for detection task.|n + For detection dataset, each image can have multiple bbox annotations.|n + Since one DataItem can't be included in multiple subsets at the same time, + the dataset can't be divided according to the bbox annotations.|n + Thus, we split dataset based on DatasetItem + while preserving label distribution as possible.|n + |n + Notes:|n + - Each DatsetItem is expected to have one or more Bbox annotations.|n + - Label annotations are ignored. We only focus on the Bbox annotations.|n + """ + + def __init__(self, dataset, splits, seed=None): + """ + Parameters + ---------- + dataset : Dataset + splits : list + A list of (subset(str), ratio(float)) + Subset is expected to be one of ["train", "val", "test"]. + The sum of ratios is expected to be 1. + seed : int, optional + """ + super().__init__(dataset, splits, seed) + + @staticmethod + def _group_by_bbox_labels(dataset): + by_labels = dict() + for idx, item in enumerate(dataset): + bbox_anns = [a for a in item.annotations + if a.type == AnnotationType.bbox] + assert 0 < len(bbox_anns), \ + "Expected more than one bbox annotation in the dataset" + for ann in bbox_anns: + label = getattr(ann, 'label', None) + if label not in by_labels: + by_labels[label] = [(idx, ann)] + else: + by_labels[label].append((idx, ann)) + return by_labels + + def _split_dataset(self): + np.random.seed(self._seed) + + subsets, sratio = self._snames, self._sratio + + # 1. group by bbox label + by_labels = self._group_by_bbox_labels(self._extractor) + + # 2. group by attributes + by_combinations = dict() + for label, items in by_labels.items(): + by_attributes = self._group_by_attr(items) + for attributes, indice in by_attributes.items(): + gname = "label: %s, attributes: %s" % (label, attributes) + by_combinations[gname] = indice + + # total number of GT samples per label-attr combinations + n_combs = {k: len(v) for k, v in by_combinations.items()} + + # 3-1. initially count per-image GT samples + scores_all = {} + init_scores = {} + for idx, _ in enumerate(self._extractor): + counts = {k: v.count(idx) for k, v in by_combinations.items()} + scores_all[idx] = counts + init_scores[idx] = np.sum( + [v / n_combs[k] for k, v in counts.items()] + ) + + by_splits = dict() + for sname in self._subsets: + by_splits[sname] = [] + + total = len(self._extractor) + target_size = dict() + expected = [] # expected numbers of per split GT samples + for sname, ratio in zip(subsets, sratio): + target_size[sname] = total * ratio + expected.append( + (sname, {k: v * ratio for k, v in n_combs.items()}) + ) + + ## + # functions for keep the # of annotations not exceed the expected num + def compute_penalty(counts, n_combs): + p = 0 + for k, v in counts.items(): + p += max(0, (v / n_combs[k]) - 1.0) + return p + + def update_nc(counts, n_combs): + for k, v in counts.items(): + n_combs[k] = max(0, n_combs[k] - v) + if n_combs[k] == 0: + n_combs[k] = -1 + return n_combs + + ## + + # 3-2. assign each DatasetItem to a split, one by one + for idx, _ in sorted( + init_scores.items(), key=lambda item: item[1], reverse=True + ): + counts = scores_all[idx] + + # shuffling split order to add randomness + # when two or more splits have the same penalty value + np.random.shuffle(expected) + + pp = [] + for sname, nc in expected: + if target_size[sname] <= len(by_splits[sname]): + # the split has enough images, + # stop adding more images to this split + pp.append(1e08) + else: + # compute penalty based on the number of GT samples + # added in the split + pp.append(compute_penalty(counts, nc)) + + # we push an image to a split with the minimum penalty + midx = np.argmin(pp) + + sname, nc = expected[midx] + by_splits[sname].append(idx) + update_nc(counts, nc) + + self._set_parts(by_splits) diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/tf_detection_api_format/converter.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/tf_detection_api_format/converter.py new file mode 100644 index 0000000000000000000000000000000000000000..382149dc3449508fb41db15e2c52117445493129 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/tf_detection_api_format/converter.py @@ -0,0 +1,212 @@ + +# Copyright (C) 2019-2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +import codecs +from collections import OrderedDict +import hashlib +import logging as log +import os +import os.path as osp +import string + +from datumaro.components.extractor import (AnnotationType, DEFAULT_SUBSET_NAME, + LabelCategories +) +from datumaro.components.converter import Converter +from datumaro.util.image import encode_image, ByteImage +from datumaro.util.annotation_util import (max_bbox, + find_group_leader, find_instances) +from datumaro.util.mask_tools import merge_masks +from datumaro.util.tf_util import import_tf as _import_tf + +from .format import DetectionApiPath +tf = _import_tf() + + +# filter out non-ASCII characters, otherwise training will crash +_printable = set(string.printable) +def _make_printable(s): + return ''.join(filter(lambda x: x in _printable, s)) + +def int64_feature(value): + return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) + +def int64_list_feature(value): + return tf.train.Feature(int64_list=tf.train.Int64List(value=value)) + +def bytes_feature(value): + return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) + +def bytes_list_feature(value): + return tf.train.Feature(bytes_list=tf.train.BytesList(value=value)) + +def float_list_feature(value): + return tf.train.Feature(float_list=tf.train.FloatList(value=value)) + +class TfDetectionApiConverter(Converter): + DEFAULT_IMAGE_EXT = DetectionApiPath.DEFAULT_IMAGE_EXT + + @classmethod + def build_cmdline_parser(cls, **kwargs): + parser = super().build_cmdline_parser(**kwargs) + parser.add_argument('--save-masks', action='store_true', + help="Include instance masks (default: %(default)s)") + return parser + + def __init__(self, extractor, save_dir, save_masks=False, **kwargs): + super().__init__(extractor, save_dir, **kwargs) + + self._save_masks = save_masks + + def apply(self): + os.makedirs(self._save_dir, exist_ok=True) + + label_categories = self._extractor.categories().get(AnnotationType.label, + LabelCategories()) + get_label = lambda label_id: label_categories.items[label_id].name \ + if label_id is not None else '' + label_ids = OrderedDict((label.name, 1 + idx) + for idx, label in enumerate(label_categories.items)) + map_label_id = lambda label_id: label_ids.get(get_label(label_id), 0) + self._get_label = get_label + self._get_label_id = map_label_id + + for subset_name, subset in self._extractor.subsets().items(): + labelmap_path = osp.join(self._save_dir, DetectionApiPath.LABELMAP_FILE) + with codecs.open(labelmap_path, 'w', encoding='utf8') as f: + for label, idx in label_ids.items(): + f.write( + 'item {\n' + + ('\tid: %s\n' % (idx)) + + ("\tname: '%s'\n" % (label)) + + '}\n\n' + ) + + anno_path = osp.join(self._save_dir, '%s.tfrecord' % (subset_name)) + with tf.io.TFRecordWriter(anno_path) as writer: + for item in subset: + tf_example = self._make_tf_example(item) + writer.write(tf_example.SerializeToString()) + + @staticmethod + def _find_instances(annotations): + return find_instances(a for a in annotations + if a.type in { AnnotationType.bbox, AnnotationType.mask }) + + def _find_instance_parts(self, group, img_width, img_height): + boxes = [a for a in group if a.type == AnnotationType.bbox] + masks = [a for a in group if a.type == AnnotationType.mask] + + anns = boxes + masks + leader = find_group_leader(anns) + bbox = max_bbox(anns) + + mask = None + if self._save_masks: + mask = merge_masks([m.image for m in masks]) + + return [leader, mask, bbox] + + def _export_instances(self, instances, width, height): + xmins = [] # List of normalized left x coordinates of bounding boxes (1 per box) + xmaxs = [] # List of normalized right x coordinates of bounding boxes (1 per box) + ymins = [] # List of normalized top y coordinates of bounding boxes (1 per box) + ymaxs = [] # List of normalized bottom y coordinates of bounding boxes (1 per box) + classes_text = [] # List of class names of bounding boxes (1 per box) + classes = [] # List of class ids of bounding boxes (1 per box) + masks = [] # List of PNG-encoded instance masks (1 per box) + + for leader, mask, box in instances: + label = _make_printable(self._get_label(leader.label)) + classes_text.append(label.encode('utf-8')) + classes.append(self._get_label_id(leader.label)) + + xmins.append(box[0] / width) + xmaxs.append((box[0] + box[2]) / width) + ymins.append(box[1] / height) + ymaxs.append((box[1] + box[3]) / height) + + if self._save_masks: + if mask is not None: + mask = encode_image(mask, '.png') + else: + mask = b'' + masks.append(mask) + + result = {} + if classes: + result = { + 'image/object/bbox/xmin': float_list_feature(xmins), + 'image/object/bbox/xmax': float_list_feature(xmaxs), + 'image/object/bbox/ymin': float_list_feature(ymins), + 'image/object/bbox/ymax': float_list_feature(ymaxs), + 'image/object/class/text': bytes_list_feature(classes_text), + 'image/object/class/label': int64_list_feature(classes), + } + if masks: + result['image/object/mask'] = bytes_list_feature(masks) + return result + + def _make_tf_example(self, item): + features = { + 'image/source_id': bytes_feature( + str(item.attributes.get('source_id') or '').encode('utf-8') + ), + } + + filename = self._make_image_filename(item) + features['image/filename'] = bytes_feature(filename.encode('utf-8')) + + if not item.has_image: + raise Exception("Failed to export dataset item '%s': " + "item has no image info" % item.id) + height, width = item.image.size + + features.update({ + 'image/height': int64_feature(height), + 'image/width': int64_feature(width), + }) + + features.update({ + 'image/encoded': bytes_feature(b''), + 'image/format': bytes_feature(b''), + 'image/key/sha256': bytes_feature(b''), + }) + if self._save_images: + if item.has_image and item.image.has_data: + buffer, fmt = self._save_image(item, filename) + key = hashlib.sha256(buffer).hexdigest() + + features.update({ + 'image/encoded': bytes_feature(buffer), + 'image/format': bytes_feature(fmt.encode('utf-8')), + 'image/key/sha256': bytes_feature(key.encode('utf8')), + }) + else: + log.warning("Item '%s' has no image" % item.id) + + instances = self._find_instances(item.annotations) + instances = [self._find_instance_parts(i, width, height) for i in instances] + features.update(self._export_instances(instances, width, height)) + + tf_example = tf.train.Example( + features=tf.train.Features(feature=features)) + + return tf_example + + def _save_image(self, item, path=None): + src_ext = item.image.ext.lower() + dst_ext = osp.splitext(osp.basename(path))[1].lower() + fmt = DetectionApiPath.IMAGE_EXT_FORMAT.get(dst_ext, '') + if not fmt: + log.warning("Item '%s': can't find format string for the '%s' " + "image extension, the corresponding field will be empty." % \ + (item.id, dst_ext)) + + if src_ext == dst_ext and isinstance(item.image, ByteImage): + buffer = item.image.get_bytes() + else: + buffer = encode_image(item.image.data, dst_ext) + return buffer, fmt \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/tf_detection_api_format/extractor.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/tf_detection_api_format/extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..75d560453bde552e0d85a9fb30ad8ea386276b31 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/tf_detection_api_format/extractor.py @@ -0,0 +1,187 @@ + +# Copyright (C) 2019-2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +from collections import OrderedDict +import numpy as np +import os.path as osp +import re + +from datumaro.components.extractor import (SourceExtractor, DatasetItem, + AnnotationType, Bbox, Mask, LabelCategories, Importer +) +from datumaro.util.image import ByteImage, decode_image, lazy_image +from datumaro.util.tf_util import import_tf as _import_tf + +from .format import DetectionApiPath +tf = _import_tf() + + +def clamp(value, _min, _max): + return max(min(_max, value), _min) + +class TfDetectionApiExtractor(SourceExtractor): + def __init__(self, path): + assert osp.isfile(path), path + images_dir = '' + root_dir = osp.dirname(osp.abspath(path)) + if osp.basename(root_dir) == DetectionApiPath.ANNOTATIONS_DIR: + root_dir = osp.dirname(root_dir) + images_dir = osp.join(root_dir, DetectionApiPath.IMAGES_DIR) + if not osp.isdir(images_dir): + images_dir = '' + + super().__init__(subset=osp.splitext(osp.basename(path))[0]) + + items, labels = self._parse_tfrecord_file(path, self._subset, images_dir) + self._items = items + self._categories = self._load_categories(labels) + + @staticmethod + def _load_categories(labels): + label_categories = LabelCategories().from_iterable( + e[0] for e in sorted(labels.items(), key=lambda item: item[1]) + ) + return { AnnotationType.label: label_categories } + + @classmethod + def _parse_labelmap(cls, text): + id_pattern = r'(?:id\s*:\s*(?P\d+))' + name_pattern = r'(?:name\s*:\s*[\'\"](?P.*?)[\'\"])' + entry_pattern = r'(\{(?:[\s\n]*(?:%(id)s|%(name)s)[\s\n]*){2}\})+' % \ + {'id': id_pattern, 'name': name_pattern} + matches = re.finditer(entry_pattern, text) + + labelmap = {} + for match in matches: + label_id = match.group('id') + label_name = match.group('name') + if label_id is not None and label_name is not None: + labelmap[label_name] = int(label_id) + + return labelmap + + @classmethod + def _parse_tfrecord_file(cls, filepath, subset, images_dir): + dataset = tf.data.TFRecordDataset(filepath) + features = { + 'image/filename': tf.io.FixedLenFeature([], tf.string), + 'image/source_id': tf.io.FixedLenFeature([], tf.string), + 'image/height': tf.io.FixedLenFeature([], tf.int64), + 'image/width': tf.io.FixedLenFeature([], tf.int64), + 'image/encoded': tf.io.FixedLenFeature([], tf.string), + 'image/format': tf.io.FixedLenFeature([], tf.string), + + # use varlen to avoid errors when this field is missing + 'image/key/sha256': tf.io.VarLenFeature(tf.string), + + # Object boxes and classes. + 'image/object/bbox/xmin': tf.io.VarLenFeature(tf.float32), + 'image/object/bbox/xmax': tf.io.VarLenFeature(tf.float32), + 'image/object/bbox/ymin': tf.io.VarLenFeature(tf.float32), + 'image/object/bbox/ymax': tf.io.VarLenFeature(tf.float32), + 'image/object/class/label': tf.io.VarLenFeature(tf.int64), + 'image/object/class/text': tf.io.VarLenFeature(tf.string), + 'image/object/mask': tf.io.VarLenFeature(tf.string), + } + + dataset_labels = OrderedDict() + labelmap_path = osp.join(osp.dirname(filepath), + DetectionApiPath.LABELMAP_FILE) + if osp.exists(labelmap_path): + with open(labelmap_path, 'r', encoding='utf-8') as f: + labelmap_text = f.read() + dataset_labels.update({ label: id - 1 + for label, id in cls._parse_labelmap(labelmap_text).items() + }) + + dataset_items = [] + + for record in dataset: + parsed_record = tf.io.parse_single_example(record, features) + frame_id = parsed_record['image/source_id'].numpy().decode('utf-8') + frame_filename = \ + parsed_record['image/filename'].numpy().decode('utf-8') + frame_height = tf.cast( + parsed_record['image/height'], tf.int64).numpy().item() + frame_width = tf.cast( + parsed_record['image/width'], tf.int64).numpy().item() + frame_image = parsed_record['image/encoded'].numpy() + xmins = tf.sparse.to_dense( + parsed_record['image/object/bbox/xmin']).numpy() + ymins = tf.sparse.to_dense( + parsed_record['image/object/bbox/ymin']).numpy() + xmaxs = tf.sparse.to_dense( + parsed_record['image/object/bbox/xmax']).numpy() + ymaxs = tf.sparse.to_dense( + parsed_record['image/object/bbox/ymax']).numpy() + label_ids = tf.sparse.to_dense( + parsed_record['image/object/class/label']).numpy() + labels = tf.sparse.to_dense( + parsed_record['image/object/class/text'], + default_value=b'').numpy() + masks = tf.sparse.to_dense( + parsed_record['image/object/mask'], + default_value=b'').numpy() + + for label, label_id in zip(labels, label_ids): + label = label.decode('utf-8') + if not label: + continue + if label_id <= 0: + continue + if label in dataset_labels: + continue + dataset_labels[label] = label_id - 1 + + item_id = osp.splitext(frame_filename)[0] + + annotations = [] + for shape_id, shape in enumerate( + np.dstack((labels, xmins, ymins, xmaxs, ymaxs))[0]): + label = shape[0].decode('utf-8') + + mask = None + if len(masks) != 0: + mask = masks[shape_id] + + if mask is not None: + if isinstance(mask, bytes): + mask = lazy_image(mask, decode_image) + annotations.append(Mask(image=mask, + label=dataset_labels.get(label) + )) + else: + x = clamp(shape[1] * frame_width, 0, frame_width) + y = clamp(shape[2] * frame_height, 0, frame_height) + w = clamp(shape[3] * frame_width, 0, frame_width) - x + h = clamp(shape[4] * frame_height, 0, frame_height) - y + annotations.append(Bbox(x, y, w, h, + label=dataset_labels.get(label) + )) + + image_size = None + if frame_height and frame_width: + image_size = (frame_height, frame_width) + + image_params = {} + if frame_image: + image_params['data'] = frame_image + if frame_filename: + image_params['path'] = osp.join(images_dir, frame_filename) + + image = None + if image_params: + image = ByteImage(**image_params, size=image_size) + + dataset_items.append(DatasetItem(id=item_id, subset=subset, + image=image, annotations=annotations, + attributes={'source_id': frame_id})) + + return dataset_items, dataset_labels + +class TfDetectionApiImporter(Importer): + @classmethod + def find_sources(cls, path): + return cls._find_sources_recursive(path, '.tfrecord', 'tf_detection_api') \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/transforms.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..c5030251f281bf17339cb21ee9798ef99526a210 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/transforms.py @@ -0,0 +1,559 @@ +# Copyright (C) 2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +from collections import Counter +from enum import Enum +import logging as log +import os.path as osp +import random +import re + +import pycocotools.mask as mask_utils + +from datumaro.components.extractor import (Transform, AnnotationType, + RleMask, Polygon, Bbox, Label, DEFAULT_SUBSET_NAME, + LabelCategories, MaskCategories, PointsCategories +) +from datumaro.components.cli_plugin import CliPlugin +import datumaro.util.mask_tools as mask_tools +from datumaro.util.annotation_util import find_group_leader, find_instances + + +class CropCoveredSegments(Transform, CliPlugin): + def transform_item(self, item): + annotations = [] + segments = [] + for ann in item.annotations: + if ann.type in {AnnotationType.polygon, AnnotationType.mask}: + segments.append(ann) + else: + annotations.append(ann) + if not segments: + return item + + if not item.has_image: + raise Exception("Image info is required for this transform") + h, w = item.image.size + segments = self.crop_segments(segments, w, h) + + annotations += segments + return self.wrap_item(item, annotations=annotations) + + @classmethod + def crop_segments(cls, segment_anns, img_width, img_height): + segment_anns = sorted(segment_anns, key=lambda x: x.z_order) + + segments = [] + for s in segment_anns: + if s.type == AnnotationType.polygon: + segments.append(s.points) + elif s.type == AnnotationType.mask: + if isinstance(s, RleMask): + rle = s.rle + else: + rle = mask_tools.mask_to_rle(s.image) + segments.append(rle) + + segments = mask_tools.crop_covered_segments( + segments, img_width, img_height) + + new_anns = [] + for ann, new_segment in zip(segment_anns, segments): + fields = {'z_order': ann.z_order, 'label': ann.label, + 'id': ann.id, 'group': ann.group, 'attributes': ann.attributes + } + if ann.type == AnnotationType.polygon: + if fields['group'] is None: + fields['group'] = cls._make_group_id( + segment_anns + new_anns, fields['id']) + for polygon in new_segment: + new_anns.append(Polygon(points=polygon, **fields)) + else: + rle = mask_tools.mask_to_rle(new_segment) + rle = mask_utils.frPyObjects(rle, *rle['size']) + new_anns.append(RleMask(rle=rle, **fields)) + + return new_anns + + @staticmethod + def _make_group_id(anns, ann_id): + if ann_id: + return ann_id + max_gid = max(anns, default=0, key=lambda x: x.group) + return max_gid + 1 + +class MergeInstanceSegments(Transform, CliPlugin): + """ + Replaces instance masks and, optionally, polygons with a single mask. + """ + + @classmethod + def build_cmdline_parser(cls, **kwargs): + parser = super().build_cmdline_parser(**kwargs) + parser.add_argument('--include-polygons', action='store_true', + help="Include polygons") + return parser + + def __init__(self, extractor, include_polygons=False): + super().__init__(extractor) + + self._include_polygons = include_polygons + + def transform_item(self, item): + annotations = [] + segments = [] + for ann in item.annotations: + if ann.type in {AnnotationType.polygon, AnnotationType.mask}: + segments.append(ann) + else: + annotations.append(ann) + if not segments: + return item + + if not item.has_image: + raise Exception("Image info is required for this transform") + h, w = item.image.size + instances = self.find_instances(segments) + segments = [self.merge_segments(i, w, h, self._include_polygons) + for i in instances] + segments = sum(segments, []) + + annotations += segments + return self.wrap_item(item, annotations=annotations) + + @classmethod + def merge_segments(cls, instance, img_width, img_height, + include_polygons=False): + polygons = [a for a in instance if a.type == AnnotationType.polygon] + masks = [a for a in instance if a.type == AnnotationType.mask] + if not polygons and not masks: + return [] + + leader = find_group_leader(polygons + masks) + instance = [] + + # Build the resulting mask + mask = None + + if include_polygons and polygons: + polygons = [p.points for p in polygons] + mask = mask_tools.rles_to_mask(polygons, img_width, img_height) + else: + instance += polygons # keep unused polygons + + if masks: + masks = [m.image for m in masks] + if mask is not None: + masks += [mask] + mask = mask_tools.merge_masks(masks) + + if mask is None: + return instance + + mask = mask_tools.mask_to_rle(mask) + mask = mask_utils.frPyObjects(mask, *mask['size']) + instance.append( + RleMask(rle=mask, label=leader.label, z_order=leader.z_order, + id=leader.id, attributes=leader.attributes, group=leader.group + ) + ) + return instance + + @staticmethod + def find_instances(annotations): + return find_instances(a for a in annotations + if a.type in {AnnotationType.polygon, AnnotationType.mask}) + +class PolygonsToMasks(Transform, CliPlugin): + def transform_item(self, item): + annotations = [] + for ann in item.annotations: + if ann.type == AnnotationType.polygon: + if not item.has_image: + raise Exception("Image info is required for this transform") + h, w = item.image.size + annotations.append(self.convert_polygon(ann, h, w)) + else: + annotations.append(ann) + + return self.wrap_item(item, annotations=annotations) + + @staticmethod + def convert_polygon(polygon, img_h, img_w): + rle = mask_utils.frPyObjects([polygon.points], img_h, img_w)[0] + + return RleMask(rle=rle, label=polygon.label, z_order=polygon.z_order, + id=polygon.id, attributes=polygon.attributes, group=polygon.group) + +class BoxesToMasks(Transform, CliPlugin): + def transform_item(self, item): + annotations = [] + for ann in item.annotations: + if ann.type == AnnotationType.bbox: + if not item.has_image: + raise Exception("Image info is required for this transform") + h, w = item.image.size + annotations.append(self.convert_bbox(ann, h, w)) + else: + annotations.append(ann) + + return self.wrap_item(item, annotations=annotations) + + @staticmethod + def convert_bbox(bbox, img_h, img_w): + rle = mask_utils.frPyObjects([bbox.as_polygon()], img_h, img_w)[0] + + return RleMask(rle=rle, label=bbox.label, z_order=bbox.z_order, + id=bbox.id, attributes=bbox.attributes, group=bbox.group) + +class MasksToPolygons(Transform, CliPlugin): + def transform_item(self, item): + annotations = [] + for ann in item.annotations: + if ann.type == AnnotationType.mask: + polygons = self.convert_mask(ann) + if not polygons: + log.debug("[%s]: item %s: " + "Mask conversion to polygons resulted in too " + "small polygons, which were discarded" % \ + (self._get_name(__class__), item.id)) + annotations.extend(polygons) + else: + annotations.append(ann) + + return self.wrap_item(item, annotations=annotations) + + @staticmethod + def convert_mask(mask): + polygons = mask_tools.mask_to_polygons(mask.image) + + return [ + Polygon(points=p, label=mask.label, z_order=mask.z_order, + id=mask.id, attributes=mask.attributes, group=mask.group) + for p in polygons + ] + +class ShapesToBoxes(Transform, CliPlugin): + def transform_item(self, item): + annotations = [] + for ann in item.annotations: + if ann.type in { AnnotationType.mask, AnnotationType.polygon, + AnnotationType.polyline, AnnotationType.points, + }: + annotations.append(self.convert_shape(ann)) + else: + annotations.append(ann) + + return self.wrap_item(item, annotations=annotations) + + @staticmethod + def convert_shape(shape): + bbox = shape.get_bbox() + return Bbox(*bbox, label=shape.label, z_order=shape.z_order, + id=shape.id, attributes=shape.attributes, group=shape.group) + +class Reindex(Transform, CliPlugin): + @classmethod + def build_cmdline_parser(cls, **kwargs): + parser = super().build_cmdline_parser(**kwargs) + parser.add_argument('-s', '--start', type=int, default=1, + help="Start value for item ids") + return parser + + def __init__(self, extractor, start=1): + super().__init__(extractor) + self._length = 'parent' + self._start = start + + def __iter__(self): + for i, item in enumerate(self._extractor): + yield self.wrap_item(item, id=i + self._start) + +class MapSubsets(Transform, CliPlugin): + @staticmethod + def _mapping_arg(s): + parts = s.split(':') + if len(parts) != 2: + import argparse + raise argparse.ArgumentTypeError() + return parts + + @classmethod + def build_cmdline_parser(cls, **kwargs): + parser = super().build_cmdline_parser(**kwargs) + parser.add_argument('-s', '--subset', action='append', + type=cls._mapping_arg, dest='mapping', + help="Subset mapping of the form: 'src:dst' (repeatable)") + return parser + + def __init__(self, extractor, mapping=None): + super().__init__(extractor) + + if mapping is None: + mapping = {} + elif not isinstance(mapping, dict): + mapping = dict(tuple(m) for m in mapping) + self._mapping = mapping + + if extractor._subsets: + counts = Counter(mapping.get(s, s) or DEFAULT_SUBSET_NAME + for s in extractor._subsets) + if all(c == 1 for c in counts.values()): + self._length = 'parent' + self._subsets = set(counts) + + def transform_item(self, item): + return self.wrap_item(item, + subset=self._mapping.get(item.subset, item.subset)) + +class RandomSplit(Transform, CliPlugin): + """ + Joins all subsets into one and splits the result into few parts. + It is expected that item ids are unique and subset ratios sum up to 1.|n + |n + Example:|n + |s|s%(prog)s --subset train:.67 --subset test:.33 + """ + + # avoid https://bugs.python.org/issue16399 + _default_split = [('train', 0.67), ('test', 0.33)] + + @staticmethod + def _split_arg(s): + parts = s.split(':') + if len(parts) != 2: + import argparse + raise argparse.ArgumentTypeError() + return (parts[0], float(parts[1])) + + @classmethod + def build_cmdline_parser(cls, **kwargs): + parser = super().build_cmdline_parser(**kwargs) + parser.add_argument('-s', '--subset', action='append', + type=cls._split_arg, dest='splits', + help="Subsets in the form: ':' " + "(repeatable, default: %s)" % dict(cls._default_split)) + parser.add_argument('--seed', type=int, help="Random seed") + return parser + + def __init__(self, extractor, splits, seed=None): + super().__init__(extractor) + + if splits is None: + splits = self._default_split + + assert 0 < len(splits), "Expected at least one split" + assert all(0.0 <= r and r <= 1.0 for _, r in splits), \ + "Ratios are expected to be in the range [0; 1], but got %s" % splits + + total_ratio = sum(s[1] for s in splits) + if not abs(total_ratio - 1.0) <= 1e-7: + raise Exception( + "Sum of ratios is expected to be 1, got %s, which is %s" % + (splits, total_ratio)) + + dataset_size = len(extractor) + indices = list(range(dataset_size)) + random.seed(seed) + random.shuffle(indices) + parts = [] + s = 0 + lower_boundary = 0 + for split_idx, (subset, ratio) in enumerate(splits): + s += ratio + upper_boundary = int(s * dataset_size) + if split_idx == len(splits) - 1: + upper_boundary = dataset_size + subset_indices = set(indices[lower_boundary : upper_boundary]) + parts.append((subset_indices, subset)) + lower_boundary = upper_boundary + self._parts = parts + + self._subsets = set(s[0] for s in splits) + self._length = 'parent' + + def _find_split(self, index): + for subset_indices, subset in self._parts: + if index in subset_indices: + return subset + return subset # all the possible remainder goes to the last split + + def __iter__(self): + for i, item in enumerate(self._extractor): + yield self.wrap_item(item, subset=self._find_split(i)) + +class IdFromImageName(Transform, CliPlugin): + def transform_item(self, item): + if item.has_image and item.image.path: + name = osp.splitext(osp.basename(item.image.path))[0] + return self.wrap_item(item, id=name) + else: + log.debug("Can't change item id for item '%s': " + "item has no image info" % item.id) + return item + +class Rename(Transform, CliPlugin): + """ + Renames items in the dataset. Supports regular expressions. + The first character in the expression is a delimiter for + the pattern and replacement parts. Replacement part can also + contain string.format tokens with 'item' object available.|n + |n + Examples:|n + - Replace 'pattern' with 'replacement':|n + |s|srename -e '|pattern|replacement|'|n + - Remove 'frame_' from item ids:|n + |s|srename -e '|frame_(\d+)|\\1|' + """ + + @classmethod + def build_cmdline_parser(cls, **kwargs): + parser = super().build_cmdline_parser(**kwargs) + parser.add_argument('-e', '--regex', + help="Regex for renaming.") + return parser + + def __init__(self, extractor, regex): + super().__init__(extractor) + + assert regex and isinstance(regex, str) + parts = regex.split(regex[0], maxsplit=3) + regex, sub = parts[1:3] + self._re = re.compile(regex) + self._sub = sub + + def transform_item(self, item): + return self.wrap_item(item, id=self._re.sub(self._sub, item.id) \ + .format(item=item)) + +class RemapLabels(Transform, CliPlugin): + """ + Changes labels in the dataset.|n + Examples:|n + - Rename 'person' to 'car' and 'cat' to 'dog', keep 'bus', remove others:|n + |s|sremap_labels -l person:car -l bus:bus -l cat:dog --default delete + """ + + DefaultAction = Enum('DefaultAction', ['keep', 'delete']) + + @staticmethod + def _split_arg(s): + parts = s.split(':') + if len(parts) != 2: + import argparse + raise argparse.ArgumentTypeError() + return (parts[0], parts[1]) + + @classmethod + def build_cmdline_parser(cls, **kwargs): + parser = super().build_cmdline_parser(**kwargs) + parser.add_argument('-l', '--label', action='append', + type=cls._split_arg, dest='mapping', + help="Label in the form of: ':' (repeatable)") + parser.add_argument('--default', + choices=[a.name for a in cls.DefaultAction], + default=cls.DefaultAction.keep.name, + help="Action for unspecified labels (default: %(default)s)") + return parser + + def __init__(self, extractor, mapping, default=None): + super().__init__(extractor) + + assert isinstance(default, (str, self.DefaultAction)) + if isinstance(default, str): + default = self.DefaultAction[default] + + assert isinstance(mapping, (dict, list)) + if isinstance(mapping, list): + mapping = dict(mapping) + + self._categories = {} + + src_label_cat = self._extractor.categories().get(AnnotationType.label) + if src_label_cat is not None: + self._make_label_id_map(src_label_cat, mapping, default) + + src_mask_cat = self._extractor.categories().get(AnnotationType.mask) + if src_mask_cat is not None: + assert src_label_cat is not None + dst_mask_cat = MaskCategories(attributes=src_mask_cat.attributes) + dst_mask_cat.colormap = { + id: src_mask_cat.colormap[id] + for id, _ in enumerate(src_label_cat.items) + if self._map_id(id) or id == 0 + } + self._categories[AnnotationType.mask] = dst_mask_cat + + src_points_cat = self._extractor.categories().get(AnnotationType.points) + if src_points_cat is not None: + assert src_label_cat is not None + dst_points_cat = PointsCategories(attributes=src_points_cat.attributes) + dst_points_cat.items = { + id: src_points_cat.items[id] + for id, item in enumerate(src_label_cat.items) + if self._map_id(id) or id == 0 + } + self._categories[AnnotationType.points] = dst_points_cat + + def _make_label_id_map(self, src_label_cat, label_mapping, default_action): + dst_label_cat = LabelCategories(attributes=src_label_cat.attributes) + id_mapping = {} + for src_index, src_label in enumerate(src_label_cat.items): + dst_label = label_mapping.get(src_label.name) + if not dst_label and default_action == self.DefaultAction.keep: + dst_label = src_label.name # keep unspecified as is + if not dst_label: + continue + + dst_index = dst_label_cat.find(dst_label)[0] + if dst_index is None: + dst_index = dst_label_cat.add(dst_label, + src_label.parent, src_label.attributes) + id_mapping[src_index] = dst_index + + if log.getLogger().isEnabledFor(log.DEBUG): + log.debug("Label mapping:") + for src_id, src_label in enumerate(src_label_cat.items): + if id_mapping.get(src_id): + log.debug("#%s '%s' -> #%s '%s'", + src_id, src_label.name, id_mapping[src_id], + dst_label_cat.items[id_mapping[src_id]].name + ) + else: + log.debug("#%s '%s' -> ", src_id, src_label.name) + + self._map_id = lambda src_id: id_mapping.get(src_id, None) + self._categories[AnnotationType.label] = dst_label_cat + + def categories(self): + return self._categories + + def transform_item(self, item): + annotations = [] + for ann in item.annotations: + if ann.type in { AnnotationType.label, AnnotationType.mask, + AnnotationType.points, AnnotationType.polygon, + AnnotationType.polyline, AnnotationType.bbox + } and ann.label is not None: + conv_label = self._map_id(ann.label) + if conv_label is not None: + annotations.append(ann.wrap(label=conv_label)) + else: + annotations.append(ann.wrap()) + return item.wrap(annotations=annotations) + +class AnnsToLabels(Transform, CliPlugin): + """ + Collects all labels from annotations (of all types) and + transforms them into a set of annotations of type Label + """ + + def transform_item(self, item): + labels = set(p.label for p in item.annotations + if getattr(p, 'label') != None) + annotations = [] + for label in labels: + annotations.append(Label(label=label)) + + return item.wrap(annotations=annotations) \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/vgg_face2_format.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/vgg_face2_format.py new file mode 100644 index 0000000000000000000000000000000000000000..8d08f399b5bc2adcc4d6f86a515ca4b4510c5798 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/vgg_face2_format.py @@ -0,0 +1,139 @@ +# Copyright (C) 2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +import csv +import os +import os.path as osp +from glob import glob + +from datumaro.components.converter import Converter +from datumaro.components.extractor import (AnnotationType, Bbox, DatasetItem, + Importer, Points, LabelCategories, SourceExtractor) + + +class VggFace2Path: + ANNOTATION_DIR = "bb_landmark" + IMAGE_EXT = '.jpg' + BBOXES_FILE = 'loose_bb_' + LANDMARKS_FILE = 'loose_landmark_' + +class VggFace2Extractor(SourceExtractor): + def __init__(self, path): + if not osp.isfile(path): + raise Exception("Can't read .csv annotation file '%s'" % path) + self._path = path + self._dataset_dir = osp.dirname(osp.dirname(path)) + + subset = osp.splitext(osp.basename(path))[0] + if subset.startswith(VggFace2Path.LANDMARKS_FILE): + subset = subset.split('_')[2] + super().__init__(subset=subset) + + self._load_categories() + self._items = list(self._load_items(path).values()) + + def _load_categories(self): + self._categories[AnnotationType.label] = LabelCategories() + + def _load_items(self, path): + items = {} + with open(path) as content: + landmarks_table = list(csv.DictReader(content)) + + for row in landmarks_table: + item_id = row['NAME_ID'] + image_path = osp.join(self._dataset_dir, self._subset, + item_id + VggFace2Path.IMAGE_EXT) + annotations = [] + if len([p for p in row if row[p] == '']) == 0 and len(row) == 11: + annotations.append(Points( + [float(row[p]) for p in row if p != 'NAME_ID'])) + if item_id in items and 0 < len(annotations): + annotation = items[item_id].annotations + annotation.append(annotations[0]) + else: + items[item_id] = DatasetItem(id=item_id, subset=self._subset, + image=image_path, annotations=annotations) + + bboxes_path = osp.join(self._dataset_dir, VggFace2Path.ANNOTATION_DIR, + VggFace2Path.BBOXES_FILE + self._subset + '.csv') + if osp.isfile(bboxes_path): + with open(bboxes_path) as content: + bboxes_table = list(csv.DictReader(content)) + for row in bboxes_table: + if len([p for p in row if row[p] == '']) == 0 and len(row) == 5: + item_id = row['NAME_ID'] + annotations = items[item_id].annotations + annotations.append(Bbox(int(row['X']), int(row['Y']), + int(row['W']), int(row['H']))) + return items + +class VggFace2Importer(Importer): + @classmethod + def find_sources(cls, path): + subset_paths = [p for p in glob(osp.join(path, + VggFace2Path.ANNOTATION_DIR, '**.csv'), recursive=True) + if not osp.basename(p).startswith(VggFace2Path.BBOXES_FILE)] + sources = [] + for subset_path in subset_paths: + sources += cls._find_sources_recursive( + subset_path, '.csv', 'vgg_face2') + return sources + +class VggFace2Converter(Converter): + DEFAULT_IMAGE_EXT = '.jpg' + + def apply(self): + save_dir = self._save_dir + + os.makedirs(save_dir, exist_ok=True) + for subset_name, subset in self._extractor.subsets().items(): + subset_dir = osp.join(save_dir, subset_name) + bboxes_table = [] + landmarks_table = [] + for item in subset: + if item.has_image and self._save_images: + self._save_image(item, osp.join(save_dir, subset_dir, + item.id + VggFace2Path.IMAGE_EXT)) + + landmarks = [a for a in item.annotations + if a.type == AnnotationType.points] + if landmarks: + for landmark in landmarks: + points = landmark.points + landmarks_table.append({'NAME_ID': item.id, + 'P1X': points[0], 'P1Y': points[1], + 'P2X': points[2], 'P2Y': points[3], + 'P3X': points[4], 'P3Y': points[5], + 'P4X': points[6], 'P4Y': points[7], + 'P5X': points[8], 'P5Y': points[9]}) + else: + landmarks_table.append({'NAME_ID': item.id}) + + bboxes = [a for a in item.annotations + if a.type == AnnotationType.bbox] + if bboxes: + for bbox in bboxes: + bboxes_table.append({'NAME_ID': item.id, 'X': int(bbox.x), + 'Y': int(bbox.y), 'W': int(bbox.w), 'H': int(bbox.h)}) + + landmarks_path = osp.join(save_dir, VggFace2Path.ANNOTATION_DIR, + VggFace2Path.LANDMARKS_FILE + subset_name + '.csv') + os.makedirs(osp.dirname(landmarks_path), exist_ok=True) + with open(landmarks_path, 'w', newline='') as file: + columns = ['NAME_ID', 'P1X', 'P1Y', 'P2X', 'P2Y', + 'P3X', 'P3Y', 'P4X', 'P4Y', 'P5X', 'P5Y'] + writer = csv.DictWriter(file, fieldnames=columns) + writer.writeheader() + writer.writerows(landmarks_table) + + if bboxes_table: + bboxes_path = osp.join(save_dir, VggFace2Path.ANNOTATION_DIR, + VggFace2Path.BBOXES_FILE + subset_name + '.csv') + os.makedirs(osp.dirname(bboxes_path), exist_ok=True) + with open(bboxes_path, 'w', newline='') as file: + columns = ['NAME_ID', 'X', 'Y', 'W', 'H'] + writer = csv.DictWriter(file, fieldnames=columns) + writer.writeheader() + writer.writerows(bboxes_table) diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/voc_format/__init__.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/voc_format/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/voc_format/converter.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/voc_format/converter.py new file mode 100644 index 0000000000000000000000000000000000000000..c88e15d7baececac84621f5f0487a6ac1bd9eb61 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/voc_format/converter.py @@ -0,0 +1,579 @@ + +# Copyright (C) 2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +import logging as log +import os +import os.path as osp +from collections import OrderedDict, defaultdict +from enum import Enum +from itertools import chain + +from lxml import etree as ET + +from datumaro.components.converter import Converter +from datumaro.components.extractor import (DEFAULT_SUBSET_NAME, AnnotationType, + CompiledMask, LabelCategories) +from datumaro.util import find, str_to_bool +from datumaro.util.image import save_image +from datumaro.util.mask_tools import paint_mask, remap_mask + +from .format import (VocTask, VocPath, VocInstColormap, + parse_label_map, make_voc_label_map, make_voc_categories, write_label_map +) + + +def _convert_attr(name, attributes, type_conv, default=None, warn=True): + d = object() + value = attributes.get(name, d) + if value is d: + return default + + try: + return type_conv(value) + except Exception as e: + log.warning("Failed to convert attribute '%s'='%s': %s" % \ + (name, value, e)) + return default + +def _write_xml_bbox(bbox, parent_elem): + x, y, w, h = bbox + bbox_elem = ET.SubElement(parent_elem, 'bndbox') + ET.SubElement(bbox_elem, 'xmin').text = str(x) + ET.SubElement(bbox_elem, 'ymin').text = str(y) + ET.SubElement(bbox_elem, 'xmax').text = str(x + w) + ET.SubElement(bbox_elem, 'ymax').text = str(y + h) + return bbox_elem + + +LabelmapType = Enum('LabelmapType', ['voc', 'source']) + +class VocConverter(Converter): + DEFAULT_IMAGE_EXT = VocPath.IMAGE_EXT + + @staticmethod + def _split_tasks_string(s): + return [VocTask[i.strip()] for i in s.split(',')] + + @staticmethod + def _get_labelmap(s): + if osp.isfile(s): + return s + try: + return LabelmapType[s].name + except KeyError: + import argparse + raise argparse.ArgumentTypeError() + + @classmethod + def build_cmdline_parser(cls, **kwargs): + parser = super().build_cmdline_parser(**kwargs) + + parser.add_argument('--apply-colormap', type=str_to_bool, default=True, + help="Use colormap for class and instance masks " + "(default: %(default)s)") + parser.add_argument('--label-map', type=cls._get_labelmap, default=None, + help="Labelmap file path or one of %s" % \ + ', '.join(t.name for t in LabelmapType)) + parser.add_argument('--allow-attributes', + type=str_to_bool, default=True, + help="Allow export of attributes (default: %(default)s)") + parser.add_argument('--tasks', type=cls._split_tasks_string, + help="VOC task filter, comma-separated list of {%s} " + "(default: all)" % ', '.join(t.name for t in VocTask)) + + return parser + + def __init__(self, extractor, save_dir, + tasks=None, apply_colormap=True, label_map=None, + allow_attributes=True, **kwargs): + super().__init__(extractor, save_dir, **kwargs) + + assert tasks is None or isinstance(tasks, (VocTask, list, set)) + if tasks is None: + tasks = set(VocTask) + elif isinstance(tasks, VocTask): + tasks = {tasks} + else: + tasks = set(t if t in VocTask else VocTask[t] for t in tasks) + self._tasks = tasks + + self._apply_colormap = apply_colormap + self._allow_attributes = allow_attributes + + if label_map is None: + label_map = LabelmapType.source.name + self._load_categories(label_map) + + def apply(self): + self.make_dirs() + self.save_subsets() + self.save_label_map() + + def make_dirs(self): + save_dir = self._save_dir + subsets_dir = osp.join(save_dir, VocPath.SUBSETS_DIR) + cls_subsets_dir = osp.join(subsets_dir, + VocPath.TASK_DIR[VocTask.classification]) + action_subsets_dir = osp.join(subsets_dir, + VocPath.TASK_DIR[VocTask.action_classification]) + layout_subsets_dir = osp.join(subsets_dir, + VocPath.TASK_DIR[VocTask.person_layout]) + segm_subsets_dir = osp.join(subsets_dir, + VocPath.TASK_DIR[VocTask.segmentation]) + ann_dir = osp.join(save_dir, VocPath.ANNOTATIONS_DIR) + img_dir = osp.join(save_dir, VocPath.IMAGES_DIR) + segm_dir = osp.join(save_dir, VocPath.SEGMENTATION_DIR) + inst_dir = osp.join(save_dir, VocPath.INSTANCES_DIR) + images_dir = osp.join(save_dir, VocPath.IMAGES_DIR) + + os.makedirs(subsets_dir, exist_ok=True) + os.makedirs(ann_dir, exist_ok=True) + os.makedirs(img_dir, exist_ok=True) + os.makedirs(segm_dir, exist_ok=True) + os.makedirs(inst_dir, exist_ok=True) + os.makedirs(images_dir, exist_ok=True) + + self._subsets_dir = subsets_dir + self._cls_subsets_dir = cls_subsets_dir + self._action_subsets_dir = action_subsets_dir + self._layout_subsets_dir = layout_subsets_dir + self._segm_subsets_dir = segm_subsets_dir + self._ann_dir = ann_dir + self._img_dir = img_dir + self._segm_dir = segm_dir + self._inst_dir = inst_dir + self._images_dir = images_dir + + def get_label(self, label_id): + return self._extractor. \ + categories()[AnnotationType.label].items[label_id].name + + def save_subsets(self): + for subset_name, subset in self._extractor.subsets().items(): + class_lists = OrderedDict() + clsdet_list = OrderedDict() + action_list = OrderedDict() + layout_list = OrderedDict() + segm_list = OrderedDict() + + for item in subset: + log.debug("Converting item '%s'", item.id) + + image_filename = self._make_image_filename(item) + if self._save_images: + if item.has_image and item.image.has_data: + self._save_image(item, + osp.join(self._images_dir, image_filename)) + else: + log.debug("Item '%s' has no image", item.id) + + labels = [] + bboxes = [] + masks = [] + for a in item.annotations: + if a.type == AnnotationType.label: + labels.append(a) + elif a.type == AnnotationType.bbox: + bboxes.append(a) + elif a.type == AnnotationType.mask: + masks.append(a) + + if self._tasks is None and bboxes or \ + self._tasks & {VocTask.detection, VocTask.person_layout, + VocTask.action_classification}: + root_elem = ET.Element('annotation') + if '_' in item.id: + folder = item.id[ : item.id.find('_')] + else: + folder = '' + ET.SubElement(root_elem, 'folder').text = folder + ET.SubElement(root_elem, 'filename').text = image_filename + + source_elem = ET.SubElement(root_elem, 'source') + ET.SubElement(source_elem, 'database').text = 'Unknown' + ET.SubElement(source_elem, 'annotation').text = 'Unknown' + ET.SubElement(source_elem, 'image').text = 'Unknown' + + if item.has_image: + h, w = item.image.size + size_elem = ET.SubElement(root_elem, 'size') + ET.SubElement(size_elem, 'width').text = str(w) + ET.SubElement(size_elem, 'height').text = str(h) + ET.SubElement(size_elem, 'depth').text = '' + + item_segmented = 0 < len(masks) + ET.SubElement(root_elem, 'segmented').text = \ + str(int(item_segmented)) + + objects_with_parts = [] + objects_with_actions = defaultdict(dict) + + main_bboxes = [] + layout_bboxes = [] + for bbox in bboxes: + label = self.get_label(bbox.label) + if self._is_part(label): + layout_bboxes.append(bbox) + elif self._is_label(label): + main_bboxes.append(bbox) + + for new_obj_id, obj in enumerate(main_bboxes): + attr = obj.attributes + + obj_elem = ET.SubElement(root_elem, 'object') + + obj_label = self.get_label(obj.label) + ET.SubElement(obj_elem, 'name').text = obj_label + + if 'pose' in attr: + ET.SubElement(obj_elem, 'pose').text = \ + str(attr['pose']) + + if 'truncated' in attr: + truncated = _convert_attr('truncated', attr, int, 0) + ET.SubElement(obj_elem, 'truncated').text = \ + '%d' % truncated + + if 'difficult' in attr: + difficult = _convert_attr('difficult', attr, int, 0) + ET.SubElement(obj_elem, 'difficult').text = \ + '%d' % difficult + + if 'occluded' in attr: + occluded = _convert_attr('occluded', attr, int, 0) + ET.SubElement(obj_elem, 'occluded').text = \ + '%d' % occluded + + bbox = obj.get_bbox() + if bbox is not None: + _write_xml_bbox(bbox, obj_elem) + + for part_bbox in filter( + lambda x: obj.group and obj.group == x.group, + layout_bboxes): + part_elem = ET.SubElement(obj_elem, 'part') + ET.SubElement(part_elem, 'name').text = \ + self.get_label(part_bbox.label) + _write_xml_bbox(part_bbox.get_bbox(), part_elem) + + objects_with_parts.append(new_obj_id) + + label_actions = self._get_actions(obj_label) + actions_elem = ET.Element('actions') + for action in label_actions: + present = 0 + if action in attr: + present = _convert_attr(action, attr, + lambda v: int(v == True), 0) + ET.SubElement(actions_elem, action).text = \ + '%d' % present + + objects_with_actions[new_obj_id][action] = present + if len(actions_elem) != 0: + obj_elem.append(actions_elem) + + if self._allow_attributes: + native_attrs = {'difficult', 'pose', + 'truncated', 'occluded' } + native_attrs.update(label_actions) + + attrs_elem = ET.Element('attributes') + for k, v in attr.items(): + if k in native_attrs: + continue + attr_elem = ET.SubElement(attrs_elem, 'attribute') + ET.SubElement(attr_elem, 'name').text = str(k) + ET.SubElement(attr_elem, 'value').text = str(v) + if len(attrs_elem): + obj_elem.append(attrs_elem) + + if self._tasks & {VocTask.detection, VocTask.person_layout, + VocTask.action_classification}: + ann_path = osp.join(self._ann_dir, item.id + '.xml') + os.makedirs(osp.dirname(ann_path), exist_ok=True) + with open(ann_path, 'w') as f: + f.write(ET.tostring(root_elem, + encoding='unicode', pretty_print=True)) + + clsdet_list[item.id] = True + layout_list[item.id] = objects_with_parts + action_list[item.id] = objects_with_actions + + for label_ann in labels: + label = self.get_label(label_ann.label) + if not self._is_label(label): + continue + class_list = class_lists.get(item.id, set()) + class_list.add(label_ann.label) + class_lists[item.id] = class_list + + clsdet_list[item.id] = True + + if masks: + compiled_mask = CompiledMask.from_instance_masks(masks, + instance_labels=[self._label_id_mapping(m.label) + for m in masks]) + + self.save_segm( + osp.join(self._segm_dir, item.id + VocPath.SEGM_EXT), + compiled_mask.class_mask) + self.save_segm( + osp.join(self._inst_dir, item.id + VocPath.SEGM_EXT), + compiled_mask.instance_mask, + colormap=VocInstColormap) + + segm_list[item.id] = True + + if len(item.annotations) == 0: + clsdet_list[item.id] = None + layout_list[item.id] = None + action_list[item.id] = None + segm_list[item.id] = None + + if self._tasks & {VocTask.classification, VocTask.detection, + VocTask.action_classification, VocTask.person_layout}: + self.save_clsdet_lists(subset_name, clsdet_list) + if self._tasks & {VocTask.classification}: + self.save_class_lists(subset_name, class_lists) + if self._tasks & {VocTask.action_classification}: + self.save_action_lists(subset_name, action_list) + if self._tasks & {VocTask.person_layout}: + self.save_layout_lists(subset_name, layout_list) + if self._tasks & {VocTask.segmentation}: + self.save_segm_lists(subset_name, segm_list) + + def save_action_lists(self, subset_name, action_list): + if not action_list: + return + + os.makedirs(self._action_subsets_dir, exist_ok=True) + + ann_file = osp.join(self._action_subsets_dir, subset_name + '.txt') + with open(ann_file, 'w') as f: + for item in action_list: + f.write('%s\n' % item) + + if len(action_list) == 0: + return + + all_actions = set(chain(*(self._get_actions(l) + for l in self._label_map))) + for action in all_actions: + ann_file = osp.join(self._action_subsets_dir, + '%s_%s.txt' % (action, subset_name)) + with open(ann_file, 'w') as f: + for item, objs in action_list.items(): + if not objs: + continue + for obj_id, obj_actions in objs.items(): + presented = obj_actions[action] + f.write('%s %s % d\n' % \ + (item, 1 + obj_id, 1 if presented else -1)) + + def save_class_lists(self, subset_name, class_lists): + if not class_lists: + return + + os.makedirs(self._cls_subsets_dir, exist_ok=True) + + for label in self._label_map: + ann_file = osp.join(self._cls_subsets_dir, + '%s_%s.txt' % (label, subset_name)) + with open(ann_file, 'w') as f: + for item, item_labels in class_lists.items(): + if not item_labels: + continue + item_labels = [self.get_label(l) for l in item_labels] + presented = label in item_labels + f.write('%s % d\n' % (item, 1 if presented else -1)) + + def save_clsdet_lists(self, subset_name, clsdet_list): + if not clsdet_list: + return + + os.makedirs(self._cls_subsets_dir, exist_ok=True) + + ann_file = osp.join(self._cls_subsets_dir, subset_name + '.txt') + with open(ann_file, 'w') as f: + for item in clsdet_list: + f.write('%s\n' % item) + + def save_segm_lists(self, subset_name, segm_list): + if not segm_list: + return + + os.makedirs(self._segm_subsets_dir, exist_ok=True) + + ann_file = osp.join(self._segm_subsets_dir, subset_name + '.txt') + with open(ann_file, 'w') as f: + for item in segm_list: + f.write('%s\n' % item) + + def save_layout_lists(self, subset_name, layout_list): + if not layout_list: + return + + os.makedirs(self._layout_subsets_dir, exist_ok=True) + + ann_file = osp.join(self._layout_subsets_dir, subset_name + '.txt') + with open(ann_file, 'w') as f: + for item, item_layouts in layout_list.items(): + if item_layouts: + for obj_id in item_layouts: + f.write('%s % d\n' % (item, 1 + obj_id)) + else: + f.write('%s\n' % (item)) + + def save_segm(self, path, mask, colormap=None): + if self._apply_colormap: + if colormap is None: + colormap = self._categories[AnnotationType.mask].colormap + mask = paint_mask(mask, colormap) + save_image(path, mask, create_dir=True) + + def save_label_map(self): + path = osp.join(self._save_dir, VocPath.LABELMAP_FILE) + write_label_map(path, self._label_map) + + def _load_categories(self, label_map_source): + if label_map_source == LabelmapType.voc.name: + # use the default VOC colormap + label_map = make_voc_label_map() + + elif label_map_source == LabelmapType.source.name and \ + AnnotationType.mask not in self._extractor.categories(): + # generate colormap for input labels + labels = self._extractor.categories() \ + .get(AnnotationType.label, LabelCategories()) + label_map = OrderedDict((item.name, [None, [], []]) + for item in labels.items) + + elif label_map_source == LabelmapType.source.name and \ + AnnotationType.mask in self._extractor.categories(): + # use source colormap + labels = self._extractor.categories()[AnnotationType.label] + colors = self._extractor.categories()[AnnotationType.mask] + label_map = OrderedDict() + for idx, item in enumerate(labels.items): + color = colors.colormap.get(idx) + if color is not None: + label_map[item.name] = [color, [], []] + + elif isinstance(label_map_source, dict): + label_map = OrderedDict( + sorted(label_map_source.items(), key=lambda e: e[0])) + + elif isinstance(label_map_source, str) and osp.isfile(label_map_source): + label_map = parse_label_map(label_map_source) + + else: + raise Exception("Wrong labelmap specified, " + "expected one of %s or a file path" % \ + ', '.join(t.name for t in LabelmapType)) + + # There must always be a label with color (0, 0, 0) at index 0 + bg_label = find(label_map.items(), lambda x: x[1][0] == (0, 0, 0)) + if bg_label is not None: + bg_label = bg_label[0] + else: + bg_label = 'background' + if bg_label not in label_map: + has_colors = any(v[0] is not None for v in label_map.values()) + color = (0, 0, 0) if has_colors else None + label_map[bg_label] = [color, [], []] + label_map.move_to_end(bg_label, last=False) + + self._categories = make_voc_categories(label_map) + + # Update colors with assigned values + colormap = self._categories[AnnotationType.mask].colormap + for label_id, color in colormap.items(): + label_desc = label_map[ + self._categories[AnnotationType.label].items[label_id].name] + label_desc[0] = color + + self._label_map = label_map + self._label_id_mapping = self._make_label_id_map() + + def _is_label(self, s): + return self._label_map.get(s) is not None + + def _is_part(self, s): + for label_desc in self._label_map.values(): + if s in label_desc[1]: + return True + return False + + def _is_action(self, label, s): + return s in self._get_actions(label) + + def _get_actions(self, label): + label_desc = self._label_map.get(label) + if not label_desc: + return [] + return label_desc[2] + + def _make_label_id_map(self): + source_labels = { + id: label.name for id, label in + enumerate(self._extractor.categories().get( + AnnotationType.label, LabelCategories()).items) + } + target_labels = { + label.name: id for id, label in + enumerate(self._categories[AnnotationType.label].items) + } + id_mapping = { + src_id: target_labels.get(src_label, 0) + for src_id, src_label in source_labels.items() + } + + void_labels = [src_label for src_id, src_label in source_labels.items() + if src_label not in target_labels] + if void_labels: + log.warning("The following labels are remapped to background: %s" % + ', '.join(void_labels)) + log.debug("Saving segmentations with the following label mapping: \n%s" % + '\n'.join(["#%s '%s' -> #%s '%s'" % + ( + src_id, src_label, id_mapping[src_id], + self._categories[AnnotationType.label] \ + .items[id_mapping[src_id]].name + ) + for src_id, src_label in source_labels.items() + ]) + ) + + def map_id(src_id): + return id_mapping.get(src_id, 0) + return map_id + + def _remap_mask(self, mask): + return remap_mask(mask, self._label_id_mapping) + +class VocClassificationConverter(VocConverter): + def __init__(self, *args, **kwargs): + kwargs['tasks'] = VocTask.classification + super().__init__(*args, **kwargs) + +class VocDetectionConverter(VocConverter): + def __init__(self, *args, **kwargs): + kwargs['tasks'] = VocTask.detection + super().__init__(*args, **kwargs) + +class VocLayoutConverter(VocConverter): + def __init__(self, *args, **kwargs): + kwargs['tasks'] = VocTask.person_layout + super().__init__(*args, **kwargs) + +class VocActionConverter(VocConverter): + def __init__(self, *args, **kwargs): + kwargs['tasks'] = VocTask.action_classification + super().__init__(*args, **kwargs) + +class VocSegmentationConverter(VocConverter): + def __init__(self, *args, **kwargs): + kwargs['tasks'] = VocTask.segmentation + super().__init__(*args, **kwargs) diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/voc_format/format.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/voc_format/format.py new file mode 100644 index 0000000000000000000000000000000000000000..a03446d511aa1165e7f783b9abfdb56827ed8b43 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/voc_format/format.py @@ -0,0 +1,206 @@ + +# Copyright (C) 2019-2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +from collections import OrderedDict +from enum import Enum +from itertools import chain +import numpy as np + +from datumaro.components.extractor import (AnnotationType, + LabelCategories, MaskCategories +) + + +VocTask = Enum('VocTask', [ + 'classification', + 'detection', + 'segmentation', + 'action_classification', + 'person_layout', +]) + +VocLabel = Enum('VocLabel', [ + ('background', 0), + ('aeroplane', 1), + ('bicycle', 2), + ('bird', 3), + ('boat', 4), + ('bottle', 5), + ('bus', 6), + ('car', 7), + ('cat', 8), + ('chair', 9), + ('cow', 10), + ('diningtable', 11), + ('dog', 12), + ('horse', 13), + ('motorbike', 14), + ('person', 15), + ('pottedplant', 16), + ('sheep', 17), + ('sofa', 18), + ('train', 19), + ('tvmonitor', 20), + ('ignored', 255), +]) + +VocPose = Enum('VocPose', [ + 'Unspecified', + 'Left', + 'Right', + 'Frontal', + 'Rear', +]) + +VocBodyPart = Enum('VocBodyPart', [ + 'head', + 'hand', + 'foot', +]) + +VocAction = Enum('VocAction', [ + 'other', + 'jumping', + 'phoning', + 'playinginstrument', + 'reading', + 'ridingbike', + 'ridinghorse', + 'running', + 'takingphoto', + 'usingcomputer', + 'walking', +]) + +def generate_colormap(length=256): + def get_bit(number, index): + return (number >> index) & 1 + + colormap = np.zeros((length, 3), dtype=int) + indices = np.arange(length, dtype=int) + + for j in range(7, -1, -1): + for c in range(3): + colormap[:, c] |= get_bit(indices, c) << j + indices >>= 3 + + return OrderedDict( + (id, tuple(color)) for id, color in enumerate(colormap) + ) + +VocColormap = {id: color for id, color in generate_colormap(256).items() + if id in [l.value for l in VocLabel]} +VocInstColormap = generate_colormap(256) + +class VocPath: + IMAGES_DIR = 'JPEGImages' + ANNOTATIONS_DIR = 'Annotations' + SEGMENTATION_DIR = 'SegmentationClass' + INSTANCES_DIR = 'SegmentationObject' + SUBSETS_DIR = 'ImageSets' + IMAGE_EXT = '.jpg' + SEGM_EXT = '.png' + LABELMAP_FILE = 'labelmap.txt' + + TASK_DIR = { + VocTask.classification: 'Main', + VocTask.detection: 'Main', + VocTask.segmentation: 'Segmentation', + VocTask.action_classification: 'Action', + VocTask.person_layout: 'Layout', + } + + +def make_voc_label_map(): + labels = sorted(VocLabel, key=lambda l: l.value) + label_map = OrderedDict( + (label.name, [VocColormap[label.value], [], []]) for label in labels) + label_map[VocLabel.person.name][1] = [p.name for p in VocBodyPart] + label_map[VocLabel.person.name][2] = [a.name for a in VocAction] + return label_map + +def parse_label_map(path): + if not path: + return None + + label_map = OrderedDict() + with open(path, 'r') as f: + for line in f: + # skip empty and commented lines + line = line.strip() + if not line or line and line[0] == '#': + continue + + # name, color, parts, actions + label_desc = line.strip().split(':') + name = label_desc[0] + + if name in label_map: + raise ValueError("Label '%s' is already defined" % name) + + if 1 < len(label_desc) and len(label_desc[1]) != 0: + color = label_desc[1].split(',') + assert len(color) == 3, \ + "Label '%s' has wrong color, expected 'r,g,b', got '%s'" % \ + (name, color) + color = tuple([int(c) for c in color]) + else: + color = None + + if 2 < len(label_desc) and len(label_desc[2]) != 0: + parts = label_desc[2].split(',') + else: + parts = [] + + if 3 < len(label_desc) and len(label_desc[3]) != 0: + actions = label_desc[3].split(',') + else: + actions = [] + + label_map[name] = [color, parts, actions] + return label_map + +def write_label_map(path, label_map): + with open(path, 'w') as f: + f.write('# label:color_rgb:parts:actions\n') + for label_name, label_desc in label_map.items(): + if label_desc[0]: + color_rgb = ','.join(str(c) for c in label_desc[0]) + else: + color_rgb = '' + + parts = ','.join(str(p) for p in label_desc[1]) + actions = ','.join(str(a) for a in label_desc[2]) + + f.write('%s\n' % ':'.join([label_name, color_rgb, parts, actions])) + +def make_voc_categories(label_map=None): + if label_map is None: + label_map = make_voc_label_map() + + categories = {} + + label_categories = LabelCategories() + label_categories.attributes.update(['difficult', 'truncated', 'occluded']) + + for label, desc in label_map.items(): + label_categories.add(label, attributes=desc[2]) + for part in OrderedDict((k, None) for k in chain( + *(desc[1] for desc in label_map.values()))): + label_categories.add(part) + categories[AnnotationType.label] = label_categories + + has_colors = any(v[0] is not None for v in label_map.values()) + if not has_colors: # generate new colors + colormap = generate_colormap(len(label_map)) + else: # only copy defined colors + label_id = lambda label: label_categories.find(label)[0] + colormap = { label_id(name): desc[0] + for name, desc in label_map.items() if desc[0] is not None } + mask_categories = MaskCategories(colormap) + mask_categories.inverse_colormap # pylint: disable=pointless-statement + categories[AnnotationType.mask] = mask_categories + + return categories diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/voc_format/importer.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/voc_format/importer.py new file mode 100644 index 0000000000000000000000000000000000000000..7da323249b6a7989eb17e83ae7d5c28a0b378118 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/voc_format/importer.py @@ -0,0 +1,77 @@ + +# Copyright (C) 2019-2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +from glob import glob +import os.path as osp + +from datumaro.components.extractor import Importer + +from .format import VocTask, VocPath + +def find_path(root_path, path, depth=4): + level, is_found = 0, False + full_path = None + while level < depth and not is_found: + full_path = osp.join(root_path, path) + paths = glob(full_path) + if paths: + full_path = paths[0] # ignore all after the first one + is_found = osp.isdir(full_path) + else: + full_path = None + + level += 1 + root_path = osp.join(root_path, '*') + + return full_path + +class VocImporter(Importer): + _TASKS = [ + (VocTask.classification, 'voc_classification', 'Main'), + (VocTask.detection, 'voc_detection', 'Main'), + (VocTask.segmentation, 'voc_segmentation', 'Segmentation'), + (VocTask.person_layout, 'voc_layout', 'Layout'), + (VocTask.action_classification, 'voc_action', 'Action'), + ] + + def __call__(self, path, **extra_params): + from datumaro.components.project import Project # cyclic import + project = Project() + + subset_paths = self.find_sources(path) + if len(subset_paths) == 0: + raise Exception("Failed to find 'voc' dataset at '%s'" % path) + + for task, extractor_type, subset_path in subset_paths: + project.add_source('%s-%s' % + (task.name, osp.splitext(osp.basename(subset_path))[0]), + { + 'url': subset_path, + 'format': extractor_type, + 'options': dict(extra_params), + }) + + return project + + @classmethod + def find_sources(cls, path): + # find root path for the dataset + root_path = path + for task, extractor_type, task_dir in cls._TASKS: + task_path = find_path(root_path, osp.join(VocPath.SUBSETS_DIR, task_dir)) + if task_path: + root_path = osp.dirname(osp.dirname(task_path)) + break + + subset_paths = [] + for task, extractor_type, task_dir in cls._TASKS: + task_path = osp.join(root_path, VocPath.SUBSETS_DIR, task_dir) + + if not osp.isdir(task_path): + continue + task_subsets = [p for p in glob(osp.join(task_path, '*.txt')) + if '_' not in osp.basename(p)] + subset_paths += [(task, extractor_type, p) for p in task_subsets] + return subset_paths diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/widerface_format.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/widerface_format.py new file mode 100644 index 0000000000000000000000000000000000000000..078b4ecd29919cda495e3bea6dad668be93193dd --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/widerface_format.py @@ -0,0 +1,122 @@ + +# Copyright (C) 2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +import os +import os.path as osp +import re + +from datumaro.components.converter import Converter +from datumaro.components.extractor import (AnnotationType, Bbox, DatasetItem, + Importer, SourceExtractor) + + +class WiderFacePath: + IMAGE_EXT = '.jpg' + ANNOTATIONS_DIR = 'wider_face_split' + IMAGES_DIR = 'images' + SUBSET_DIR = 'WIDER_' + BBOX_ATTRIBUTES = ['blur', 'expression', 'illumination', + 'occluded', 'pose', 'invalid'] + +class WiderFaceExtractor(SourceExtractor): + def __init__(self, path): + if not osp.isfile(path): + raise Exception("Can't read annotation file '%s'" % path) + self._path = path + self._dataset_dir = osp.dirname(osp.dirname(path)) + + subset = osp.splitext(osp.basename(path))[0] + match = re.fullmatch(r'wider_face_\S+_bbx_gt', subset) + if match: + subset = subset.split('_')[2] + super().__init__(subset=subset) + + self._items = list(self._load_items(path).values()) + + def _load_items(self, path): + items = {} + with open(path, 'r') as f: + lines = f.readlines() + + image_ids = [image_id for image_id, line in enumerate(lines) + if WiderFacePath.IMAGE_EXT in line] + + for image_id in image_ids: + image = lines[image_id] + image_path = osp.join(self._dataset_dir, WiderFacePath.SUBSET_DIR + + self._subset, WiderFacePath.IMAGES_DIR, image[:-1]) + item_id = image[:-(len(WiderFacePath.IMAGE_EXT) + 1)] + + bbox_count = lines[image_id + 1] + bbox_lines = lines[image_id + 2 : image_id + int(bbox_count) + 2] + annotations = [] + for bbox in bbox_lines: + bbox_list = bbox.split() + if len(bbox_list) >= 4: + attributes = {} + if len(bbox_list) == 10: + i = 4 + for attr in WiderFacePath.BBOX_ATTRIBUTES: + if bbox_list[i] != '-': + attributes[attr] = int(bbox_list[i]) + i += 1 + annotations.append(Bbox( + int(bbox_list[0]), int(bbox_list[1]), + int(bbox_list[2]), int(bbox_list[3]), + attributes = attributes + )) + + items[item_id] = DatasetItem(id=item_id, subset=self._subset, + image=image_path, annotations=annotations) + return items + +class WiderFaceImporter(Importer): + @classmethod + def find_sources(cls, path): + return cls._find_sources_recursive(osp.join(path, + WiderFacePath.ANNOTATIONS_DIR), '.txt', 'wider_face') + +class WiderFaceConverter(Converter): + DEFAULT_IMAGE_EXT = '.jpg' + + def apply(self): + save_dir = self._save_dir + + os.makedirs(save_dir, exist_ok=True) + + for subset_name, subset in self._extractor.subsets().items(): + subset_dir = osp.join(save_dir, WiderFacePath.SUBSET_DIR + subset_name) + + wider_annotation = '' + for item in subset: + wider_annotation += '%s\n' % (item.id + WiderFacePath.IMAGE_EXT) + if item.has_image and self._save_images: + self._save_image(item, osp.join(save_dir, subset_dir, + WiderFacePath.IMAGES_DIR, item.id + WiderFacePath.IMAGE_EXT)) + + bboxes = [a for a in item.annotations + if a.type == AnnotationType.bbox] + + wider_annotation += '%s\n' % len(bboxes) + for bbox in bboxes: + wider_bb = ' '.join('%d' % p for p in bbox.get_bbox()) + wider_annotation += '%s ' % wider_bb + if bbox.attributes: + wider_attr = '' + attr_counter = 0 + for attr in WiderFacePath.BBOX_ATTRIBUTES: + if attr in bbox.attributes: + wider_attr += '%s ' % bbox.attributes[attr] + attr_counter += 1 + else: + wider_attr += '- ' + if attr_counter > 0: + wider_annotation += wider_attr + wider_annotation += '\n' + annotation_path = osp.join(save_dir, WiderFacePath.ANNOTATIONS_DIR, + 'wider_face_' + subset_name + '_bbx_gt.txt') + os.makedirs(osp.dirname(annotation_path), exist_ok=True) + with open(annotation_path, 'w') as f: + f.write(wider_annotation) diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/yolo_format/__init__.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/yolo_format/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/yolo_format/extractor.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/yolo_format/extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..be9b2ba8078d5c61cdf5433b63c874a14155a155 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/yolo_format/extractor.py @@ -0,0 +1,197 @@ + +# Copyright (C) 2019-2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +from collections import OrderedDict +import os.path as osp +import re + +from datumaro.components.extractor import (SourceExtractor, Extractor, + DatasetItem, AnnotationType, Bbox, LabelCategories, Importer +) +from datumaro.util import split_path +from datumaro.util.image import Image + +from .format import YoloPath + + +class YoloExtractor(SourceExtractor): + class Subset(Extractor): + def __init__(self, name, parent): + super().__init__() + self._name = name + self._parent = parent + self.items = OrderedDict() + + def __iter__(self): + for item_id in self.items: + yield self._parent._get(item_id, self._name) + + def __len__(self): + return len(self.items) + + def categories(self): + return self._parent.categories() + + def __init__(self, config_path, image_info=None): + super().__init__() + + if not osp.isfile(config_path): + raise Exception("Can't read dataset descriptor file '%s'" % + config_path) + + rootpath = osp.dirname(config_path) + self._path = rootpath + + assert image_info is None or isinstance(image_info, (str, dict)) + if image_info is None: + image_info = osp.join(rootpath, YoloPath.IMAGE_META_FILE) + if not osp.isfile(image_info): + image_info = {} + if isinstance(image_info, str): + if not osp.isfile(image_info): + raise Exception("Can't read image meta file '%s'" % image_info) + with open(image_info) as f: + image_info = {} + for line in f: + image_name, h, w = line.strip().split() + image_info[image_name] = (int(h), int(w)) + self._image_info = image_info + + with open(config_path, 'r') as f: + config_lines = f.readlines() + + subsets = OrderedDict() + names_path = None + + for line in config_lines: + match = re.match(r'(\w+)\s*=\s*(.+)$', line) + if not match: + continue + + key = match.group(1) + value = match.group(2) + if key == 'names': + names_path = value + elif key in YoloPath.SUBSET_NAMES: + subsets[key] = value + else: + continue + + if not names_path: + raise Exception("Failed to parse labels path from '%s'" % \ + config_path) + + for subset_name, list_path in subsets.items(): + list_path = osp.join(self._path, self.localize_path(list_path)) + if not osp.isfile(list_path): + raise Exception("Not found '%s' subset list file" % subset_name) + + subset = YoloExtractor.Subset(subset_name, self) + with open(list_path, 'r') as f: + subset.items = OrderedDict( + (self.name_from_path(p), self.localize_path(p)) + for p in f + ) + subsets[subset_name] = subset + + self._subsets = subsets + + self._categories = { + AnnotationType.label: + self._load_categories( + osp.join(self._path, self.localize_path(names_path))) + } + + @staticmethod + def localize_path(path): + path = path.strip() + default_base = osp.join('data', '') + if path.startswith(default_base): # default path + path = path[len(default_base) : ] + return path + + @classmethod + def name_from_path(cls, path): + path = cls.localize_path(path) + parts = split_path(path) + if 1 < len(parts) and not osp.isabs(path): + # NOTE: when path is like [data/]/ + # drop everything but + # can be , so no just basename() + path = osp.join(*parts[1:]) + return osp.splitext(path)[0] + + def _get(self, item_id, subset_name): + subset = self._subsets[subset_name] + item = subset.items[item_id] + + if isinstance(item, str): + image_size = self._image_info.get(item_id) + image = Image(path=osp.join(self._path, item), size=image_size) + + anno_path = osp.splitext(image.path)[0] + '.txt' + annotations = self._parse_annotations(anno_path, image) + + item = DatasetItem(id=item_id, subset=subset_name, + image=image, annotations=annotations) + subset.items[item_id] = item + + return item + + @staticmethod + def _parse_annotations(anno_path, image): + lines = [] + with open(anno_path, 'r') as f: + for line in f: + line = line.strip() + if line: + lines.append(line) + + annotations = [] + if lines: + size = image.size # use image info as late as possible + if size is None: + raise Exception("Can't find image info for '%s'" % image.path) + image_height, image_width = size + for line in lines: + label_id, xc, yc, w, h = line.split() + label_id = int(label_id) + w = float(w) + h = float(h) + x = float(xc) - w * 0.5 + y = float(yc) - h * 0.5 + annotations.append(Bbox( + round(x * image_width, 1), round(y * image_height, 1), + round(w * image_width, 1), round(h * image_height, 1), + label=label_id + )) + + return annotations + + @staticmethod + def _load_categories(names_path): + label_categories = LabelCategories() + + with open(names_path, 'r') as f: + for label in f: + label_categories.add(label.strip()) + + return label_categories + + def __iter__(self): + for subset in self._subsets.values(): + for item in subset: + yield item + + def __len__(self): + return sum(len(s) for s in self._subsets.values()) + + def get_subset(self, name): + return self._subsets[name] + +class YoloImporter(Importer): + @classmethod + def find_sources(cls, path): + return cls._find_sources_recursive(path, '.data', 'yolo') \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/datumaro/plugins/yolo_format/format.py b/testbed/openvinotoolkit__datumaro/datumaro/plugins/yolo_format/format.py new file mode 100644 index 0000000000000000000000000000000000000000..02a07669bb12e51aa94ebec86c723ab008c98209 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/plugins/yolo_format/format.py @@ -0,0 +1,11 @@ + +# Copyright (C) 2019-2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + + +class YoloPath: + DEFAULT_SUBSET_NAME = 'train' + SUBSET_NAMES = ['train', 'valid'] + + IMAGE_META_FILE = 'images.meta' \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/datumaro/util/__init__.py b/testbed/openvinotoolkit__datumaro/datumaro/util/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0a75756bd262aa90db565b82dd270f146fc427df --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/util/__init__.py @@ -0,0 +1,93 @@ + +# Copyright (C) 2019-2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +import os +import os.path as osp +from itertools import islice + + +def find(iterable, pred=lambda x: True, default=None): + return next((x for x in iterable if pred(x)), default) + +def dir_items(path, ext, truncate_ext=False): + items = [] + for f in os.listdir(path): + ext_pos = f.rfind(ext) + if ext_pos != -1: + if truncate_ext: + f = f[:ext_pos] + items.append(f) + return items + +def split_path(path): + path = osp.normpath(path) + parts = [] + + while True: + path, part = osp.split(path) + if part: + parts.append(part) + else: + if path: + parts.append(path) + break + parts.reverse() + + return parts + +def cast(value, type_conv, default=None): + if value is None: + return default + try: + return type_conv(value) + except Exception: + return default + +def to_snake_case(s): + if not s: + return '' + + name = [s[0].lower()] + for idx, char in enumerate(s[1:]): + idx = idx + 1 + if char.isalpha() and char.isupper(): + prev_char = s[idx - 1] + if not (prev_char.isalpha() and prev_char.isupper()): + # avoid "HTML" -> "h_t_m_l" + name.append('_') + name.append(char.lower()) + else: + name.append(char) + return ''.join(name) + +def pairs(iterable): + a = iter(iterable) + return zip(a, a) + +def take_by(iterable, count): + """ + Returns elements from the input iterable by batches of N items. + ('abcdefg', 3) -> ['a', 'b', 'c'], ['d', 'e', 'f'], ['g'] + """ + + it = iter(iterable) + while True: + batch = list(islice(it, count)) + if len(batch) == 0: + break + + yield batch + +def str_to_bool(s): + t = s.lower() + if t in {'true', '1', 'ok', 'yes', 'y'}: + return True + elif t in {'false', '0', 'no', 'n'}: + return False + else: + raise ValueError("Can't convert value '%s' to bool" % s) + +def filter_dict(d, exclude_keys): + return { k: v for k, v in d.items() if k not in exclude_keys } \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/datumaro/util/annotation_util.py b/testbed/openvinotoolkit__datumaro/datumaro/util/annotation_util.py new file mode 100644 index 0000000000000000000000000000000000000000..3daa313f3fbc68523643c98ee015ff625d8d9c64 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/util/annotation_util.py @@ -0,0 +1,212 @@ +# Copyright (C) 2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +from itertools import groupby + +import numpy as np + +from datumaro.components.extractor import _Shape, Mask, AnnotationType, RleMask +from datumaro.util.mask_tools import mask_to_rle + + +def find_instances(instance_anns): + instance_anns = sorted(instance_anns, key=lambda a: a.group) + ann_groups = [] + for g_id, group in groupby(instance_anns, lambda a: a.group): + if not g_id: + ann_groups.extend(([a] for a in group)) + else: + ann_groups.append(list(group)) + + return ann_groups + +def find_group_leader(group): + return max(group, key=lambda x: x.get_area()) + +def _get_bbox(ann): + if isinstance(ann, (_Shape, Mask)): + return ann.get_bbox() + else: + return ann + +def max_bbox(annotations): + boxes = [_get_bbox(ann) for ann in annotations] + x0 = min((b[0] for b in boxes), default=0) + y0 = min((b[1] for b in boxes), default=0) + x1 = max((b[0] + b[2] for b in boxes), default=0) + y1 = max((b[1] + b[3] for b in boxes), default=0) + return [x0, y0, x1 - x0, y1 - y0] + +def mean_bbox(annotations): + le = len(annotations) + boxes = [_get_bbox(ann) for ann in annotations] + mlb = sum(b[0] for b in boxes) / le + mtb = sum(b[1] for b in boxes) / le + mrb = sum(b[0] + b[2] for b in boxes) / le + mbb = sum(b[1] + b[3] for b in boxes) / le + return [mlb, mtb, mrb - mlb, mbb - mtb] + +def softmax(x): + return np.exp(x) / sum(np.exp(x)) + +def nms(segments, iou_thresh=0.5): + """ + Non-maxima suppression algorithm. + """ + + indices = np.argsort([b.attributes['score'] for b in segments]) + ious = np.array([[iou(a, b) for b in segments] for a in segments]) + + predictions = [] + while len(indices) != 0: + i = len(indices) - 1 + pred_idx = indices[i] + to_remove = [i] + predictions.append(segments[pred_idx]) + for i, box_idx in enumerate(indices[:i]): + if iou_thresh < ious[pred_idx, box_idx]: + to_remove.append(i) + indices = np.delete(indices, to_remove) + + return predictions + +def bbox_iou(a, b): + """ + IoU computations for simple cases with bounding boxes + """ + bbox_a = _get_bbox(a) + bbox_b = _get_bbox(b) + + aX, aY, aW, aH = bbox_a + bX, bY, bW, bH = bbox_b + in_right = min(aX + aW, bX + bW) + in_left = max(aX, bX) + in_top = max(aY, bY) + in_bottom = min(aY + aH, bY + bH) + + in_w = max(0, in_right - in_left) + in_h = max(0, in_bottom - in_top) + intersection = in_w * in_h + if not intersection: + return -1 + + a_area = aW * aH + b_area = bW * bH + union = a_area + b_area - intersection + return intersection / union + +def segment_iou(a, b): + """ + Generic IoU computation with masks, polygons, and boxes. + Returns -1 if no intersection, [0; 1] otherwise + """ + from pycocotools import mask as mask_utils + + a_bbox = a.get_bbox() + b_bbox = b.get_bbox() + + is_bbox = AnnotationType.bbox in [a.type, b.type] + if is_bbox: + a = [a_bbox] + b = [b_bbox] + else: + w = max(a_bbox[0] + a_bbox[2], b_bbox[0] + b_bbox[2]) + h = max(a_bbox[1] + a_bbox[3], b_bbox[1] + b_bbox[3]) + + def _to_rle(ann): + if ann.type == AnnotationType.polygon: + return mask_utils.frPyObjects([ann.points], h, w) + elif isinstance(ann, RleMask): + return [ann.rle] + elif ann.type == AnnotationType.mask: + return mask_utils.frPyObjects([mask_to_rle(ann.image)], h, w) + else: + raise TypeError("Unexpected arguments: %s, %s" % (a, b)) + a = _to_rle(a) + b = _to_rle(b) + return float(mask_utils.iou(a, b, [not is_bbox])) + +def PDJ(a, b, eps=None, ratio=0.05, bbox=None): + """ + Percentage of Detected Joints metric. + Counts the number of matching points. + """ + + assert eps is not None or ratio is not None + + p1 = np.array(a.points).reshape((-1, 2)) + p2 = np.array(b.points).reshape((-1, 2)) + if len(p1) != len(p2): + return 0 + + if not eps: + if bbox is None: + bbox = mean_bbox([a, b]) + + diag = (bbox[2] ** 2 + bbox[3] ** 2) ** 0.5 + eps = ratio * diag + + dists = np.linalg.norm(p1 - p2, axis=1) + return np.sum(dists < eps) / len(p1) + +def OKS(a, b, sigma=None, bbox=None, scale=None): + """ + Object Keypoint Similarity metric. + https://cocodataset.org/#keypoints-eval + """ + + p1 = np.array(a.points).reshape((-1, 2)) + p2 = np.array(b.points).reshape((-1, 2)) + if len(p1) != len(p2): + return 0 + + if not sigma: + sigma = 0.1 + else: + assert len(sigma) == len(p1) + + if not scale: + if bbox is None: + bbox = mean_bbox([a, b]) + scale = bbox[2] * bbox[3] + + dists = np.linalg.norm(p1 - p2, axis=1) + return np.sum(np.exp(-(dists ** 2) / (2 * scale * (2 * sigma) ** 2))) + +def smooth_line(points, segments): + assert 2 <= len(points) // 2 and len(points) % 2 == 0 + + if len(points) // 2 == segments: + return points + + points = list(points) + if len(points) == 2: + points.extend(points) + points = np.array(points).reshape((-1, 2)) + + lengths = np.linalg.norm(points[1:] - points[:-1], axis=1) + dists = [0] + for l in lengths: + dists.append(dists[-1] + l) + + step = dists[-1] / segments + + new_points = np.zeros((segments + 1, 2)) + new_points[0] = points[0] + + old_segment = 0 + for new_segment in range(1, segments + 1): + pos = new_segment * step + while dists[old_segment + 1] < pos and old_segment + 2 < len(dists): + old_segment += 1 + + segment_start = dists[old_segment] + segment_len = lengths[old_segment] + prev_p = points[old_segment] + next_p = points[old_segment + 1] + r = (pos - segment_start) / segment_len + + new_points[new_segment] = prev_p * (1 - r) + next_p * r + + return new_points, step diff --git a/testbed/openvinotoolkit__datumaro/datumaro/util/attrs_util.py b/testbed/openvinotoolkit__datumaro/datumaro/util/attrs_util.py new file mode 100644 index 0000000000000000000000000000000000000000..e631f35ad2d78b8d4112d72bff367cd342373b8f --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/util/attrs_util.py @@ -0,0 +1,33 @@ +# Copyright (C) 2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +import attr + +_NOTSET = object() + +def not_empty(inst, attribute, x): + assert len(x) != 0, x + +def default_if_none(conv): + def validator(inst, attribute, value): + default = attribute.default + if value is None: + if callable(default): + value = default() + elif isinstance(default, attr.Factory): + value = default.factory() + else: + value = default + elif not isinstance(value, attribute.type or conv): + value = conv(value) + setattr(inst, attribute.name, value) + return validator + +def ensure_cls(c): + def converter(arg): + if isinstance(arg, c): + return arg + else: + return c(**arg) + return converter \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/datumaro/util/command_targets.py b/testbed/openvinotoolkit__datumaro/datumaro/util/command_targets.py new file mode 100644 index 0000000000000000000000000000000000000000..50c854f271e01a248e2f1f6f1a4ebdacab23226d --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/util/command_targets.py @@ -0,0 +1,113 @@ + +# Copyright (C) 2019-2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +import argparse +from enum import Enum + +from datumaro.components.project import Project +from datumaro.util.image import load_image + + +TargetKinds = Enum('TargetKinds', + ['project', 'source', 'external_dataset', 'inference', 'image']) + +def is_project_name(value, project): + return value == project.config.project_name + +def is_project_path(value): + if value: + try: + Project.load(value) + return True + except Exception: + pass + return False + +def is_project(value, project=None): + if is_project_path(value): + return True + elif project is not None: + return is_project_name(value, project) + + return False + +def is_source(value, project=None): + if project is not None: + try: + project.get_source(value) + return True + except KeyError: + pass + + return False + +def is_external_source(value): + return False + +def is_inference_path(value): + return False + +def is_image_path(value): + try: + return load_image(value) is not None + except Exception: + return False + + +class Target: + def __init__(self, kind, test, is_default=False, name=None): + self.kind = kind + self.test = test + self.is_default = is_default + self.name = name + + def _get_fields(self): + return [self.kind, self.test, self.is_default, self.name] + + def __str__(self): + return self.name or str(self.kind) + + def __len__(self): + return len(self._get_fields()) + + def __iter__(self): + return iter(self._get_fields()) + +def ProjectTarget(kind=TargetKinds.project, test=None, + is_default=False, name='project name or path', + project=None): + if test is None: + test = lambda v: is_project(v, project=project) + return Target(kind, test, is_default, name) + +def SourceTarget(kind=TargetKinds.source, test=None, + is_default=False, name='source name', + project=None): + if test is None: + test = lambda v: is_source(v, project=project) + return Target(kind, test, is_default, name) + +def ExternalDatasetTarget(kind=TargetKinds.external_dataset, + test=is_external_source, + is_default=False, name='external dataset path'): + return Target(kind, test, is_default, name) + +def InferenceTarget(kind=TargetKinds.inference, test=is_inference_path, + is_default=False, name='inference path'): + return Target(kind, test, is_default, name) + +def ImageTarget(kind=TargetKinds.image, test=is_image_path, + is_default=False, name='image path'): + return Target(kind, test, is_default, name) + + +def target_selector(*targets): + def selector(value): + for (kind, test, is_default, _) in targets: + if (is_default and (value == '' or value is None)) or test(value): + return (kind, value) + raise argparse.ArgumentTypeError('Value should be one of: %s' \ + % (', '.join([str(t) for t in targets]))) + return selector diff --git a/testbed/openvinotoolkit__datumaro/datumaro/util/image.py b/testbed/openvinotoolkit__datumaro/datumaro/util/image.py new file mode 100644 index 0000000000000000000000000000000000000000..c653adf6875fdfb484bfc1ccb74432060dfae683 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/util/image.py @@ -0,0 +1,295 @@ + +# Copyright (C) 2019-2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +# pylint: disable=unused-import + +from enum import Enum +from io import BytesIO +import numpy as np +import os +import os.path as osp + +_IMAGE_BACKENDS = Enum('_IMAGE_BACKENDS', ['cv2', 'PIL']) +_IMAGE_BACKEND = None +try: + import cv2 + _IMAGE_BACKEND = _IMAGE_BACKENDS.cv2 +except ImportError: + import PIL + _IMAGE_BACKEND = _IMAGE_BACKENDS.PIL + +from datumaro.util.image_cache import ImageCache as _ImageCache + + +def load_image(path, dtype=np.float32): + """ + Reads an image in the HWC Grayscale/BGR(A) float [0; 255] format. + """ + + if _IMAGE_BACKEND == _IMAGE_BACKENDS.cv2: + import cv2 + image = cv2.imread(path, cv2.IMREAD_UNCHANGED) + image = image.astype(dtype) + elif _IMAGE_BACKEND == _IMAGE_BACKENDS.PIL: + from PIL import Image + image = Image.open(path) + image = np.asarray(image, dtype=dtype) + if len(image.shape) == 3 and image.shape[2] in {3, 4}: + image[:, :, :3] = image[:, :, 2::-1] # RGB to BGR + else: + raise NotImplementedError() + + if image is None: + raise ValueError("Can't open image '%s'" % path) + assert len(image.shape) in {2, 3} + if len(image.shape) == 3: + assert image.shape[2] in {3, 4} + return image + +def save_image(path, image, create_dir=False, dtype=np.uint8, **kwargs): + # NOTE: Check destination path for existence + # OpenCV silently fails if target directory does not exist + dst_dir = osp.dirname(path) + if dst_dir: + if create_dir: + os.makedirs(dst_dir, exist_ok=True) + elif not osp.isdir(dst_dir): + raise FileNotFoundError("Directory does not exist: '%s'" % dst_dir) + + if not kwargs: + kwargs = {} + + if _IMAGE_BACKEND == _IMAGE_BACKENDS.cv2: + import cv2 + + params = [] + + ext = path[-4:] + if ext.upper() == '.JPG': + params = [ + int(cv2.IMWRITE_JPEG_QUALITY), kwargs.get('jpeg_quality', 75) + ] + + image = image.astype(dtype) + cv2.imwrite(path, image, params=params) + elif _IMAGE_BACKEND == _IMAGE_BACKENDS.PIL: + from PIL import Image + + params = {} + params['quality'] = kwargs.get('jpeg_quality') + if kwargs.get('jpeg_quality') == 100: + params['subsampling'] = 0 + + image = image.astype(dtype) + if len(image.shape) == 3 and image.shape[2] in {3, 4}: + image[:, :, :3] = image[:, :, 2::-1] # BGR to RGB + image = Image.fromarray(image) + image.save(path, **params) + else: + raise NotImplementedError() + +def encode_image(image, ext, dtype=np.uint8, **kwargs): + if not kwargs: + kwargs = {} + + if _IMAGE_BACKEND == _IMAGE_BACKENDS.cv2: + import cv2 + + params = [] + + if not ext.startswith('.'): + ext = '.' + ext + + if ext.upper() == '.JPG': + params = [ + int(cv2.IMWRITE_JPEG_QUALITY), kwargs.get('jpeg_quality', 75) + ] + + image = image.astype(dtype) + success, result = cv2.imencode(ext, image, params=params) + if not success: + raise Exception("Failed to encode image to '%s' format" % (ext)) + return result.tobytes() + elif _IMAGE_BACKEND == _IMAGE_BACKENDS.PIL: + from PIL import Image + + if ext.startswith('.'): + ext = ext[1:] + + params = {} + params['quality'] = kwargs.get('jpeg_quality') + if kwargs.get('jpeg_quality') == 100: + params['subsampling'] = 0 + + image = image.astype(dtype) + if len(image.shape) == 3 and image.shape[2] in {3, 4}: + image[:, :, :3] = image[:, :, 2::-1] # BGR to RGB + image = Image.fromarray(image) + with BytesIO() as buffer: + image.save(buffer, format=ext, **params) + return buffer.getvalue() + else: + raise NotImplementedError() + +def decode_image(image_bytes, dtype=np.float32): + if _IMAGE_BACKEND == _IMAGE_BACKENDS.cv2: + import cv2 + image = np.frombuffer(image_bytes, dtype=np.uint8) + image = cv2.imdecode(image, cv2.IMREAD_UNCHANGED) + image = image.astype(dtype) + elif _IMAGE_BACKEND == _IMAGE_BACKENDS.PIL: + from PIL import Image + image = Image.open(BytesIO(image_bytes)) + image = np.asarray(image, dtype=dtype) + if len(image.shape) == 3 and image.shape[2] in {3, 4}: + image[:, :, :3] = image[:, :, 2::-1] # RGB to BGR + else: + raise NotImplementedError() + + assert len(image.shape) in {2, 3} + if len(image.shape) == 3: + assert image.shape[2] in {3, 4} + return image + + +class lazy_image: + def __init__(self, path, loader=None, cache=None): + if loader is None: + loader = load_image + self.path = path + self.loader = loader + + # Cache: + # - False: do not cache + # - None: use the global cache + # - object: an object to be used as cache + assert cache in {None, False} or isinstance(cache, object) + self.cache = cache + + def __call__(self): + image = None + image_id = hash(self) # path is not necessary hashable or a file path + + cache = self._get_cache(self.cache) + if cache is not None: + image = cache.get(image_id) + + if image is None: + image = self.loader(self.path) + if cache is not None: + cache.push(image_id, image) + return image + + @staticmethod + def _get_cache(cache): + if cache is None: + cache = _ImageCache.get_instance() + elif cache == False: + return None + return cache + + def __hash__(self): + return hash((id(self), self.path, self.loader)) + +class Image: + def __init__(self, data=None, path=None, loader=None, cache=None, + size=None): + assert size is None or len(size) == 2 + if size is not None: + assert len(size) == 2 and 0 < size[0] and 0 < size[1], size + size = tuple(size) + self._size = size # (H, W) + + assert path is None or isinstance(path, str) + if path is None: + path = '' + self._path = path + + assert data is not None or path or loader, "Image can not be empty" + if data is not None: + assert callable(data) or isinstance(data, np.ndarray), type(data) + if data is None and (path or loader): + if osp.isfile(path) or loader: + data = lazy_image(path, loader=loader, cache=cache) + self._data = data + + @property + def path(self): + return self._path + + @property + def ext(self): + return osp.splitext(osp.basename(self.path))[1] + + @property + def data(self): + if callable(self._data): + return self._data() + return self._data + + @property + def has_data(self): + return self._data is not None + + @property + def size(self): + if self._size is None: + data = self.data + if data is not None: + self._size = data.shape[:2] + return self._size + + def __eq__(self, other): + if isinstance(other, np.ndarray): + return self.has_data and np.array_equal(self.data, other) + + if not isinstance(other, __class__): + return False + return \ + (np.array_equal(self.size, other.size)) and \ + (self.has_data == other.has_data) and \ + (self.has_data and np.array_equal(self.data, other.data) or \ + not self.has_data) + +class ByteImage(Image): + def __init__(self, data=None, path=None, ext=None, cache=None, size=None): + loader = None + if data is not None: + if callable(data) and not isinstance(data, lazy_image): + data = lazy_image(path, loader=data, cache=cache) + loader = lambda _: decode_image(self.get_bytes()) + + super().__init__(path=path, size=size, loader=loader, cache=cache) + if data is None and loader is None: + # unset defaults for regular images + # to avoid random file reading to bytes + self._data = None + + self._bytes_data = data + if ext: + ext = ext.lower() + if not ext.startswith('.'): + ext = '.' + ext + self._ext = ext + + def get_bytes(self): + if callable(self._bytes_data): + return self._bytes_data() + return self._bytes_data + + @property + def ext(self): + if self._ext: + return self._ext + return super().ext + + def __eq__(self, other): + if not isinstance(other, __class__): + return super().__eq__(other) + return \ + (np.array_equal(self.size, other.size)) and \ + (self.has_data == other.has_data) and \ + (self.has_data and self.get_bytes() == other.get_bytes() or \ + not self.has_data) \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/datumaro/util/image_cache.py b/testbed/openvinotoolkit__datumaro/datumaro/util/image_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..08f025828937b09a6f937df68d9dbbff08c7d560 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/util/image_cache.py @@ -0,0 +1,42 @@ +# Copyright (C) 2019-2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +from collections import OrderedDict + + +_instance = None + +DEFAULT_CAPACITY = 2 + +class ImageCache: + @staticmethod + def get_instance(): + global _instance + if _instance is None: + _instance = ImageCache() + return _instance + + def __init__(self, capacity=DEFAULT_CAPACITY): + self.capacity = int(capacity) + self.items = OrderedDict() + + def push(self, item_id, image): + if self.capacity <= len(self.items): + self.items.popitem(last=True) + self.items[item_id] = image + + def get(self, item_id): + default = object() + item = self.items.get(item_id, default) + if item is default: + return None + + self.items.move_to_end(item_id, last=False) # naive splay tree + return item + + def size(self): + return len(self.items) + + def clear(self): + self.items.clear() \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/datumaro/util/log_utils.py b/testbed/openvinotoolkit__datumaro/datumaro/util/log_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6c8d8421e7e9ddb7496ecae37d98e6c35a35f032 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/util/log_utils.py @@ -0,0 +1,16 @@ + +# Copyright (C) 2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +from contextlib import contextmanager +import logging + +@contextmanager +def logging_disabled(max_level=logging.CRITICAL): + previous_level = logging.root.manager.disable + logging.disable(max_level) + try: + yield + finally: + logging.disable(previous_level) \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/datumaro/util/mask_tools.py b/testbed/openvinotoolkit__datumaro/datumaro/util/mask_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..72224bccf7eea22ad1b875c3f692a104a4c15a43 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/util/mask_tools.py @@ -0,0 +1,288 @@ + +# Copyright (C) 2019-2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +import numpy as np + +from datumaro.util.image import lazy_image, load_image + + +def generate_colormap(length=256): + """ + Generates colors using PASCAL VOC algorithm. + + Returns index -> (R, G, B) mapping. + """ + + def get_bit(number, index): + return (number >> index) & 1 + + colormap = np.zeros((length, 3), dtype=int) + indices = np.arange(length, dtype=int) + + for j in range(7, -1, -1): + for c in range(3): + colormap[:, c] |= get_bit(indices, c) << j + indices >>= 3 + + return { + id: tuple(color) for id, color in enumerate(colormap) + } + +def invert_colormap(colormap): + return { + tuple(a): index for index, a in colormap.items() + } + +def check_is_mask(mask): + assert len(mask.shape) in {2, 3} + if len(mask.shape) == 3: + assert mask.shape[2] == 1 + +_default_colormap = generate_colormap() +_default_unpaint_colormap = invert_colormap(_default_colormap) + +def unpaint_mask(painted_mask, inverse_colormap=None): + # Covert color mask to index mask + + # mask: HWC BGR [0; 255] + # colormap: (R, G, B) -> index + assert len(painted_mask.shape) == 3 + if inverse_colormap is None: + inverse_colormap = _default_unpaint_colormap + + if callable(inverse_colormap): + map_fn = lambda a: inverse_colormap( + (a >> 16) & 255, (a >> 8) & 255, a & 255 + ) + else: + map_fn = lambda a: inverse_colormap[( + (a >> 16) & 255, (a >> 8) & 255, a & 255 + )] + + painted_mask = painted_mask.astype(int) + painted_mask = painted_mask[:, :, 0] + \ + (painted_mask[:, :, 1] << 8) + \ + (painted_mask[:, :, 2] << 16) + uvals, unpainted_mask = np.unique(painted_mask, return_inverse=True) + palette = np.array([map_fn(v) for v in uvals], dtype=np.float32) + unpainted_mask = palette[unpainted_mask].reshape(painted_mask.shape[:2]) + + return unpainted_mask + +def paint_mask(mask, colormap=None): + # Applies colormap to index mask + + # mask: HW(C) [0; max_index] mask + # colormap: index -> (R, G, B) + check_is_mask(mask) + + if colormap is None: + colormap = _default_colormap + if callable(colormap): + map_fn = colormap + else: + map_fn = lambda c: colormap.get(c, (-1, -1, -1)) + palette = np.array([map_fn(c)[::-1] for c in range(256)], dtype=np.float32) + + mask = mask.astype(np.uint8) + painted_mask = palette[mask].reshape((*mask.shape[:2], 3)) + return painted_mask + +def remap_mask(mask, map_fn): + # Changes mask elements from one colormap to another + + # mask: HW(C) [0; max_index] mask + check_is_mask(mask) + + return np.array([map_fn(c) for c in range(256)], dtype=np.uint8)[mask] + +def make_index_mask(binary_mask, index): + return np.choose(binary_mask, np.array([0, index], dtype=np.uint8)) + +def make_binary_mask(mask): + return np.nonzero(mask) + + +def load_mask(path, inverse_colormap=None): + mask = load_image(path, dtype=np.uint8) + if inverse_colormap is not None: + if len(mask.shape) == 3 and mask.shape[2] != 1: + mask = unpaint_mask(mask, inverse_colormap) + return mask + +def lazy_mask(path, inverse_colormap=None): + return lazy_image(path, lambda path: load_mask(path, inverse_colormap)) + +def mask_to_rle(binary_mask): + # walk in row-major order as COCO format specifies + bounded = binary_mask.ravel(order='F') + + # add borders to sequence + # find boundary positions for sequences and compute their lengths + difs = np.diff(bounded, prepend=[1 - bounded[0]], append=[1 - bounded[-1]]) + counts, = np.where(difs != 0) + + # start RLE encoding from 0 as COCO format specifies + if bounded[0] != 0: + counts = np.diff(counts, prepend=[0]) + else: + counts = np.diff(counts) + + return { + 'counts': counts, + 'size': list(binary_mask.shape) + } + +def mask_to_polygons(mask, tolerance=1.0, area_threshold=1): + """ + Convert an instance mask to polygons + + Args: + mask: a 2d binary mask + tolerance: maximum distance from original points of + a polygon to the approximated ones + area_threshold: minimal area of generated polygons + + Returns: + A list of polygons like [[x1,y1, x2,y2 ...], [...]] + """ + from pycocotools import mask as mask_utils + from skimage import measure + + polygons = [] + + # pad mask with 0 around borders + padded_mask = np.pad(mask, pad_width=1, mode='constant', constant_values=0) + contours = measure.find_contours(padded_mask, 0.5) + # Fix coordinates after padding + contours = np.subtract(contours, 1) + + for contour in contours: + if not np.array_equal(contour[0], contour[-1]): + contour = np.vstack((contour, contour[0])) # make polygon closed + + contour = measure.approximate_polygon(contour, tolerance) + if len(contour) <= 2: + continue + + contour = np.flip(contour, axis=1).flatten().clip(0) # [x0, y0, ...] + + # Check if the polygon is big enough + rle = mask_utils.frPyObjects([contour], mask.shape[0], mask.shape[1]) + area = sum(mask_utils.area(rle)) + if area_threshold <= area: + polygons.append(contour) + return polygons + +def crop_covered_segments(segments, width, height, + iou_threshold=0.0, ratio_tolerance=0.001, area_threshold=1, + return_masks=False): + """ + Find all segments occluded by others and crop them to the visible part only. + Input segments are expected to be sorted from background to foreground. + + Args: + segments: 1d list of segment RLEs (in COCO format) + width: width of the image + height: height of the image + iou_threshold: IoU threshold for objects to be counted as intersected + By default is set to 0 to process any intersected objects + ratio_tolerance: an IoU "handicap" value for a situation + when an object is (almost) fully covered by another one and we + don't want make a "hole" in the background object + area_threshold: minimal area of included segments + + Returns: + A list of input segments' parts (in the same order as input): + [ + [[x1,y1, x2,y2 ...], ...], # input segment #0 parts + mask1, # input segment #1 mask (if source segment is mask) + [], # when source segment is too small + ... + ] + """ + from pycocotools import mask as mask_utils + + segments = [[s] for s in segments] + input_rles = [mask_utils.frPyObjects(s, height, width) for s in segments] + + for i, rle_bottom in enumerate(input_rles): + area_bottom = sum(mask_utils.area(rle_bottom)) + if area_bottom < area_threshold: + segments[i] = [] if not return_masks else None + continue + + rles_top = [] + for j in range(i + 1, len(input_rles)): + rle_top = input_rles[j] + iou = sum(mask_utils.iou(rle_bottom, rle_top, [0, 0]))[0] + + if iou <= iou_threshold: + continue + + area_top = sum(mask_utils.area(rle_top)) + area_ratio = area_top / area_bottom + + # If a segment is fully inside another one, skip this segment + if abs(area_ratio - iou) < ratio_tolerance: + continue + + # Check if the bottom segment is fully covered by the top one. + # There is a mistake in the annotation, keep the background one + if abs(1 / area_ratio - iou) < ratio_tolerance: + rles_top = [] + break + + rles_top += rle_top + + if not rles_top and not isinstance(segments[i][0], dict) \ + and not return_masks: + continue + + rle_bottom = rle_bottom[0] + bottom_mask = mask_utils.decode(rle_bottom).astype(np.uint8) + + if rles_top: + rle_top = mask_utils.merge(rles_top) + top_mask = mask_utils.decode(rle_top).astype(np.uint8) + + bottom_mask -= top_mask + bottom_mask[bottom_mask != 1] = 0 + + if not return_masks and not isinstance(segments[i][0], dict): + segments[i] = mask_to_polygons(bottom_mask, + area_threshold=area_threshold) + else: + segments[i] = bottom_mask + + return segments + +def rles_to_mask(rles, width, height): + from pycocotools import mask as mask_utils + + rles = mask_utils.frPyObjects(rles, height, width) + rles = mask_utils.merge(rles) + mask = mask_utils.decode(rles) + return mask + +def find_mask_bbox(mask): + cols = np.any(mask, axis=0) + rows = np.any(mask, axis=1) + x0, x1 = np.where(cols)[0][[0, -1]] + y0, y1 = np.where(rows)[0][[0, -1]] + return [x0, y0, x1 - x0, y1 - y0] + +def merge_masks(masks): + """ + Merges masks into one, mask order is responsible for z order. + """ + if not masks: + return None + + merged_mask = masks[0] + for m in masks[1:]: + merged_mask = np.where(m != 0, m, merged_mask) + + return merged_mask \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/datumaro/util/os_util.py b/testbed/openvinotoolkit__datumaro/datumaro/util/os_util.py new file mode 100644 index 0000000000000000000000000000000000000000..b4d05e376db29e38b31cd09f5476007b1fb3f279 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/util/os_util.py @@ -0,0 +1,17 @@ + +# Copyright (C) 2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +import subprocess + + +def check_instruction_set(instruction): + return instruction == str.strip( + # Let's ignore a warning from bandit about using shell=True. + # In this case it isn't a security issue and we use some + # shell features like pipes. + subprocess.check_output( + 'lscpu | grep -o "%s" | head -1' % instruction, + shell=True).decode('utf-8') # nosec + ) \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/datumaro/util/test_utils.py b/testbed/openvinotoolkit__datumaro/datumaro/util/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..08ffa4ed53afc02346ee7fb32abd3a8281f791b5 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/util/test_utils.py @@ -0,0 +1,143 @@ + +# Copyright (C) 2019-2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +import inspect +import os +import os.path as osp +import shutil +import tempfile + +from datumaro.components.extractor import AnnotationType +from datumaro.components.project import Project +from datumaro.util import find + + +def current_function_name(depth=1): + return inspect.getouterframes(inspect.currentframe())[depth].function + +class FileRemover: + def __init__(self, path, is_dir=False, ignore_errors=False): + self.path = path + self.is_dir = is_dir + self.ignore_errors = ignore_errors + + def __enter__(self): + return self.path + + # pylint: disable=redefined-builtin + def __exit__(self, type=None, value=None, traceback=None): + if self.is_dir: + shutil.rmtree(self.path, ignore_errors=self.ignore_errors) + else: + os.remove(self.path) + # pylint: enable=redefined-builtin + +class TestDir(FileRemover): + def __init__(self, path=None, ignore_errors=False): + if path is None: + path = osp.abspath('temp_%s-' % current_function_name(2)) + path = tempfile.mkdtemp(dir=os.getcwd(), prefix=path) + else: + os.makedirs(path, exist_ok=ignore_errors) + + super().__init__(path, is_dir=True, ignore_errors=ignore_errors) + +def compare_categories(test, expected, actual): + test.assertEqual( + sorted(expected, key=lambda t: t.value), + sorted(actual, key=lambda t: t.value) + ) + + if AnnotationType.label in expected: + test.assertEqual( + expected[AnnotationType.label].items, + actual[AnnotationType.label].items, + ) + if AnnotationType.mask in expected: + test.assertEqual( + expected[AnnotationType.mask].colormap, + actual[AnnotationType.mask].colormap, + ) + if AnnotationType.points in expected: + test.assertEqual( + expected[AnnotationType.points].items, + actual[AnnotationType.points].items, + ) + +def _compare_annotations(expected, actual, ignored_attrs=None): + if not ignored_attrs: + return expected == actual + + a_attr = expected.attributes + b_attr = actual.attributes + + expected.attributes = {k:v for k,v in a_attr.items() if k not in ignored_attrs} + actual.attributes = {k:v for k,v in b_attr.items() if k not in ignored_attrs} + r = expected == actual + + expected.attributes = a_attr + actual.attributes = b_attr + return r + +def compare_datasets(test, expected, actual, ignored_attrs=None, + require_images=False): + compare_categories(test, expected.categories(), actual.categories()) + + test.assertEqual(sorted(expected.subsets()), sorted(actual.subsets())) + test.assertEqual(len(expected), len(actual)) + for item_a in expected: + item_b = find(actual, lambda x: x.id == item_a.id and \ + x.subset == item_a.subset) + test.assertFalse(item_b is None, item_a.id) + test.assertEqual(item_a.attributes, item_b.attributes) + if (require_images and item_a.has_image and item_a.image.has_data) or \ + item_a.has_image and item_a.image.has_data and \ + item_b.has_image and item_b.image.has_data: + test.assertEqual(item_a.image, item_b.image, item_a.id) + test.assertEqual(len(item_a.annotations), len(item_b.annotations)) + for ann_a in item_a.annotations: + # We might find few corresponding items, so check them all + ann_b_matches = [x for x in item_b.annotations + if x.type == ann_a.type] + test.assertFalse(len(ann_b_matches) == 0, 'ann id: %s' % ann_a.id) + + ann_b = find(ann_b_matches, lambda x: + _compare_annotations(x, ann_a, ignored_attrs=ignored_attrs)) + if ann_b is None: + test.fail('ann %s, candidates %s' % (ann_a, ann_b_matches)) + item_b.annotations.remove(ann_b) # avoid repeats + +def compare_datasets_strict(test, expected, actual): + # Compares datasets for strong equality + + test.assertEqual(expected.categories(), actual.categories()) + + test.assertListEqual(sorted(expected.subsets()), sorted(actual.subsets())) + test.assertEqual(len(expected), len(actual)) + + for subset_name in expected.subsets(): + e_subset = expected.get_subset(subset_name) + a_subset = actual.get_subset(subset_name) + test.assertEqual(len(e_subset), len(a_subset)) + for idx, (item_a, item_b) in enumerate(zip(e_subset, a_subset)): + test.assertEqual(item_a, item_b, + '%s:\n%s\nvs.\n%s\n' % \ + (idx, item_a, item_b)) + +def test_save_and_load(test, source_dataset, converter, test_dir, importer, + target_dataset=None, importer_args=None, compare=None): + converter(source_dataset, test_dir) + + if importer_args is None: + importer_args = {} + parsed_dataset = Project.import_from(test_dir, importer, **importer_args) \ + .make_dataset() + + if target_dataset is None: + target_dataset = source_dataset + + if not compare: + compare = compare_datasets + compare(test, expected=target_dataset, actual=parsed_dataset) \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/datumaro/util/tf_util.py b/testbed/openvinotoolkit__datumaro/datumaro/util/tf_util.py new file mode 100644 index 0000000000000000000000000000000000000000..9eda97bab9c82d741baa671ab7faafe685793031 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/util/tf_util.py @@ -0,0 +1,80 @@ + +# Copyright (C) 2019-2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + + +def check_import(): + # Workaround for checking import availability: + # Official TF builds include AVX instructions. Once we try to import, + # the program crashes. We raise an exception instead. + + import subprocess + import sys + + from .os_util import check_instruction_set + + result = subprocess.run([sys.executable, '-c', 'import tensorflow'], + timeout=60, + universal_newlines=True, # use text mode for output stream + stdout=subprocess.PIPE, stderr=subprocess.PIPE) # capture output + + if result.returncode != 0: + message = result.stderr + if not message: + message = "Can't import tensorflow. " \ + "Test process exit code: %s." % result.returncode + if not check_instruction_set('avx'): + # The process has probably crashed for AVX unavalability + message += " This is likely because your CPU does not " \ + "support AVX instructions, " \ + "which are required for tensorflow." + + raise ImportError(message) + +def import_tf(check=True): + import sys + + not_found = object() + tf = sys.modules.get('tensorflow', not_found) + if tf is None: + import tensorflow as tf # emit default error + elif tf is not not_found: + return tf + + # Reduce output noise, https://stackoverflow.com/questions/38073432/how-to-suppress-verbose-tensorflow-logging + import os + os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' + + if check: + try: + check_import() + except Exception: + sys.modules['tensorflow'] = None # prevent further import + raise + + import tensorflow as tf + + try: + tf.get_logger().setLevel('WARNING') + except AttributeError: + pass + try: + tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.WARN) + except AttributeError: + pass + + # Enable eager execution in early versions to unlock dataset operations + eager_enabled = False + try: + tf.compat.v1.enable_eager_execution() + eager_enabled = True + except AttributeError: + pass + try: + if not eager_enabled: + tf.enable_eager_execution() + except AttributeError: + pass + + return tf diff --git a/testbed/openvinotoolkit__datumaro/datumaro/version.py b/testbed/openvinotoolkit__datumaro/datumaro/version.py new file mode 100644 index 0000000000000000000000000000000000000000..a0901263b2ade3d0af6aa27f5e514cf07fdd0fa6 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/datumaro/version.py @@ -0,0 +1 @@ +VERSION = '0.1.4' \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/docs/cli_design.mm b/testbed/openvinotoolkit__datumaro/docs/cli_design.mm new file mode 100644 index 0000000000000000000000000000000000000000..0ff17cb29940dfc2d204bdd22fa7df32c07e2085 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/docs/cli_design.mm @@ -0,0 +1,65 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/testbed/openvinotoolkit__datumaro/docs/design.md b/testbed/openvinotoolkit__datumaro/docs/design.md new file mode 100644 index 0000000000000000000000000000000000000000..528b2adf754934e13f6ed7bf1913cdb6794ae341 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/docs/design.md @@ -0,0 +1,185 @@ +# Datumaro + + + +## Table of contents + +- [Concept](#concept) +- [RC 1 vision](#rc-1-vision) + +## Concept + +Datumaro is: +- a tool to build composite datasets and iterate over them +- a tool to create and maintain datasets + - Version control of annotations and images + - Publication (with removal of sensitive information) + - Editing + - Joining and splitting + - Exporting, format changing + - Image preprocessing +- a dataset storage +- a tool to debug datasets + - A network can be used to generate + informative data subsets (e.g. with false-positives) + to be analyzed further + +### Requirements + +- User interfaces + - a library + - a console tool with visualization means +- Targets: single datasets, composite datasets, single images / videos +- Built-in support for well-known annotation formats and datasets: + CVAT, COCO, PASCAL VOC, Cityscapes, ImageNet +- Extensibility with user-provided components +- Lightweightness - it should be easy to start working with Datumaro + - Minimal dependency on environment and configuration + - It should be easier to use Datumaro than writing own code + for computation of statistics or dataset manipulations + +### Functionality and ideas + +- Blur sensitive areas on dataset images +- Dataset annotation filters, relabelling etc. +- Dataset augmentation +- Calculation of statistics: + - Mean & std, custom stats +- "Edit" command to modify annotations +- Versioning (for images, annotations, subsets, sources etc., comparison) +- Documentation generation +- Provision of iterators for user code +- Dataset downloading +- Dataset generation +- Dataset building (export in a specific format, indexation, statistics, documentation) +- Dataset exporting to other formats +- Dataset debugging (run inference, generate dataset slices, compute statistics) +- "Explainable AI" - highlight network attention areas ([paper](https://arxiv.org/abs/1901.04592)) + - Black-box approach + - Classification, Detection, Segmentation, Captioning + - White-box approach + +### Research topics + +- exploration of network prediction uncertainty (aka Bayessian approach) + Use case: explanation of network "quality", "stability", "certainty" +- adversarial attacks on networks +- dataset minification / reduction + Use case: removal of redundant information to reach the same network quality with lesser training time +- dataset expansion and filtration of additions + Use case: add only important data +- guidance for key frame selection for tracking ([paper](https://arxiv.org/abs/1903.11779)) + Use case: more effective annotation, better predictions + +## RC 1 vision + +In the first version Datumaro should be a project manager for CVAT. +It should only consume data from CVAT. The collected dataset +can be downloaded by user to be operated on with Datumaro CLI. + + +``` + User + | + v + +------------------+ + | CVAT | + +--------v---------+ +------------------+ +--------------+ + | Datumaro module | ----> | Datumaro project | <---> | Datumaro CLI | <--- User + +------------------+ +------------------+ +--------------+ +``` + + +### Interfaces + +- [x] Python API for user code + - [x] Installation as a package +- [x] A command-line tool for dataset manipulations + +### Features + +- Dataset format support (reading, writing) + - [x] Own format + - [x] CVAT + - [x] COCO + - [x] PASCAL VOC + - [x] YOLO + - [x] TF Detection API + - [ ] Cityscapes + - [ ] ImageNet + +- Dataset visualization (`show`) + - [ ] Ability to visualize a dataset + - [ ] with TensorBoard + +- Calculation of statistics for datasets + - [x] Pixel mean, std + - [x] Object counts (detection scenario) + - [x] Image-Class distribution (classification scenario) + - [x] Pixel-Class distribution (segmentation scenario) + - [ ] Image similarity clusters + - [ ] Custom statistics + +- Dataset building + - [x] Composite dataset building + - [x] Class remapping + - [x] Subset splitting + - [x] Dataset filtering (`extract`) + - [x] Dataset merging (`merge`) + - [ ] Dataset item editing (`edit`) + +- Dataset comparison (`diff`) + - [x] Annotation-annotation comparison + - [x] Annotation-inference comparison + - [x] Annotation quality estimation (for CVAT) + - Provide a simple method to check + annotation quality with a model and generate summary + +- Dataset and model debugging + - [x] Inference explanation (`explain`) + - [x] Black-box approach ([RISE paper](https://arxiv.org/abs/1806.07421)) + - [x] Ability to run a model on a dataset and read the results + +- CVAT-integration features + - [x] Task export + - [x] Datumaro project export + - [x] Dataset export + - [x] Original raw data (images, a video file) can be downloaded (exported) + together with annotations or just have links + on CVAT server (in future, support S3, etc) + - [x] Be able to use local files instead of remote links + - [ ] Specify cache directory + - [x] Use case "annotate for model training" + - create a task + - annotate + - export the task + - convert to a training format + - train a DL model + - [x] Use case "annotate - reannotate problematic images - merge" + - [x] Use case "annotate and estimate quality" + - create a task + - annotate + - estimate quality of annotations + +### Optional features + +- Dataset publishing + - [ ] Versioning (for annotations, subsets, sources, etc.) + - [ ] Blur sensitive areas on images + - [ ] Tracking of legal information + - [ ] Documentation generation + +- Dataset building + - [ ] Dataset minification / Extraction of the most representative subset + - Use case: generate low-precision calibration dataset + +- Dataset and model debugging + - [ ] Training visualization + - [ ] Inference explanation (`explain`) + - [ ] White-box approach + +### Properties + +- Lightweightness +- Modularity +- Extensibility diff --git a/testbed/openvinotoolkit__datumaro/docs/developer_guide.md b/testbed/openvinotoolkit__datumaro/docs/developer_guide.md new file mode 100644 index 0000000000000000000000000000000000000000..6317cee99979800b7ebbff8d919d63204c4feb59 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/docs/developer_guide.md @@ -0,0 +1,295 @@ +# Dataset Management Framework (Datumaro) API and developer manual + +## Basics + +The center part of the library is the `Dataset` class, which represents +a dataset and allows to iterate over its elements. +`DatasetItem`, an element of a dataset, represents a single +dataset entry with annotations - an image, video sequence, audio track etc. +It can contain only annotated data or meta information, only annotations, or +all of this. + +Basic library usage and data flow: + +```lang-none +Extractors -> Dataset -> Converter + | + Filtration + Transformations + Statistics + Merging + Inference + Quality Checking + Comparison + ... +``` + +1. Data is read (or produced) by one or many `Extractor`s and merged + into a `Dataset` +1. The dataset is processed in some way +1. The dataset is saved with a `Converter` + +Datumaro has a number of dataset and annotation features: +- iteration over dataset elements +- filtering of datasets and annotations by a custom criteria +- working with subsets (e.g. `train`, `val`, `test`) +- computing of dataset statistics +- comparison and merging of datasets +- various annotation operations + +```python +from datumaro.components.project import Environment, Dataset +from datumaro.components.extractor import Bbox, Polygon, DatasetItem + +# Import and save a dataset +env = Environment() +dataset = env.make_importer('voc')('src/dir').make_dataset() +env.converters.get('coco').convert(dataset, save_dir='dst/dir') + +# Create a dataset, convert polygons to masks, save in PASCAL VOC format +dataset = Dataset.from_iterable([ + DatasetItem(id='image1', annotations=[ + Bbox(x=1, y=2, w=3, h=4, label=1), + Polygon([1, 2, 3, 2, 4, 4], label=2, attributes={'occluded': True}), + ]), +], categories=['cat', 'dog', 'person']) +dataset = dataset.transform(env.transforms.get('polygons_to_masks')) +env.converters.get('voc').convert(dataset, save_dir='dst/dir') +``` + +### The Dataset class + +The `Dataset` class from the `datumaro.components.project` module represents +a dataset, consisting of multiple `DatasetItem`s. Annotations are +represented by members of the `datumaro.components.extractor` module, +such as `Label`, `Mask` or `Polygon`. A dataset can contain items from one or +multiple subsets (e.g. `train`, `test`, `val` etc.), the list of dataset subsets +is available at `dataset.subsets`. + +Datasets typically have annotations, and these annotations can +require additional information to be interpreted correctly. For instance, it +can include class names, class hierarchy, keypoint connections, +class colors for masks, class attributes. +This information is stored in `dataset.categories`, which is a mapping from +`AnnotationType` to a corresponding `...Categories` class. Each annotation type +can have its `Categories`. Typically, there will be a `LabelCategories` object. +Annotations and other categories adress dataset labels +by their indices in this object. + +The main operation for a dataset is iteration over its elements. +An item corresponds to a single image, a video sequence, etc. There are also +few other operations available, such as filtration (`dataset.select`) and +transformations (`dataset.transform`). A dataset can be created from extractors +or other datasets with `dataset.from_extractors` and directly from items with +`dataset.from_iterable`. A dataset is an extractor itself. If it is created from +multiple extractors, their categories must match, and their contents will be +merged. + +A dataset item is an element of a dataset. Its `id` is a name of a +corresponding image. There can be some image `attributes`, +an `image` and `annotations`. + +```python +# create a dataset from other datasets +dataset = Dataset.from_extractors(dataset1, dataset2) + +# or directly from items +dataset = Dataset.from_iterable([ + DatasetItem(id='image1', annotations=[ + Bbox(x=1, y=2, w=3, h=4, label=1), + Polygon([1, 2, 3, 2, 4, 4], label=2), + ]), +], categories=['cat', 'dog', 'person']) + +# keep only annotated images +dataset = dataset.select(lambda item: len(item.annotations) != 0) + +# change dataset labels +dataset = dataset.transform(project.env.transforms.get('remap_labels'), + {'cat': 'dog', # rename cat to dog + 'truck': 'car', # rename truck to car + 'person': '', # remove this label + }, default='delete') + +# iterate over elements +for item in dataset: + print(item.id, item.annotations) + +# iterate over subsets +for subset_name in dataset.subsets(): + subset = dataset.get_subset(subset_name) # a dataset, again + for item in subset: + print(item.id, item.annotations) +``` + +### Projects + +Projects are intended for complex use of Datumaro. They provide means of +persistence, of extending, and CLI operation for Datasets. A project can +be converted to a Dataset with `project.make_dataset`. Project datasets +can have multiple data sources, which are merged on dataset creation. They +can have a hierarchy. Project configuration is available in `project.config`. + +The `Environment` class is responsible for accessing built-in and +project-specific plugins. For a project, there is an instance of +related `Environment` in `project.env`. + +## Library contents + +### Dataset Formats + +The framework provides functions to read and write datasets in specific formats. +It is supported by `Extractor`s, `Importer`s, and `Converter`s. + +Dataset reading is supported by `Extractor`s and `Importer`s: +- An `Extractor` produces a list of `DatasetItem`s corresponding +to the dataset. Annotations are available in the `DatasetItem.annotations` list +- An `Importer` creates a project from a data source location + +It is possible to add custom `Extractor`s and `Importer`s. To do this, you need +to put an `Extractor` and `Importer` implementations to a plugin directory. + +Dataset writing is supported by `Converter`s. +A `Converter` produces a dataset of a specific format from dataset items. +It is possible to add custom `Converter`s. To do this, you need to put a +`Converter` implementation script to a plugin directory. + + +### Dataset Conversions ("Transforms") + +A `Transform` is a function for altering a dataset and producing a new one. +It can update dataset items, annotations, classes, and other properties. +A list of available transforms for dataset conversions can be extended by +adding a `Transform` implementation script into a plugin directory. + +### Model launchers + +A list of available launchers for model execution can be extended by +adding a `Launcher` implementation script into a plugin directory. + +## Plugins + +Datumaro comes with a number of built-in formats and other tools, +but it also can be extended by plugins. Plugins are optional components, +which dependencies are not installed by default. +In Datumaro there are several types of plugins, which include: +- `extractor` - produces dataset items from data source +- `importer` - recognizes dataset type and creates project +- `converter` - exports dataset to a specific format +- `transformation` - modifies dataset items or other properties +- `launcher` - executes models + +A plugin is a regular Python module. It must be present in a plugin directory: +- `/.datumaro/plugins` for project-specific plugins +- `/plugins` for global plugins + +A plugin can be used either via the `Environment` class instance, +or by regular module importing: + +```python +from datumaro.components.project import Environment, Project +from datumaro.plugins.yolo_format.converter import YoloConverter + +# Import a dataset +dataset = Environment().make_importer('voc')(src_dir).make_dataset() + +# Load an existing project, save the dataset in some project-specific format +project = Project.load('project/dir') +project.env.converters.get('custom_format').convert(dataset, save_dir=dst_dir) + +# Save the dataset in some built-in format +Environment().converters.get('yolo').convert(dataset, save_dir=dst_dir) +YoloConverter.convert(dataset, save_dir=dst_dir) +``` + +### Writing a plugin + +A plugin is a Python module with any name, which exports some symbols. +To export a symbol, inherit it from one of special classes: + +```python +from datumaro.components.extractor import Importer, SourceExtractor, Transform +from datumaro.components.launcher import Launcher +from datumaro.components.converter import Converter +``` + +The `exports` list of the module can be used to override default behaviour: +```python +class MyComponent1: ... +class MyComponent2: ... +exports = [MyComponent2] # exports only MyComponent2 +``` + +There is also an additional class to modify plugin appearance in command line: + +```python +from datumaro.components.cli_plugin import CliPlugin +``` + +#### Plugin example + + + +``` +datumaro/plugins/ +- my_plugin1/file1.py +- my_plugin1/file2.py +- my_plugin2.py +``` + + + +`my_plugin1/file2.py` contents: + +```python +from datumaro.components.extractor import Transform, CliPlugin +from .file1 import something, useful + +class MyTransform(Transform, CliPlugin): + NAME = "custom_name" # could be generated automatically + + """ + Some description. The text will be displayed in the command line output. + """ + + @classmethod + def build_cmdline_parser(cls, **kwargs): + parser = super().build_cmdline_parser(**kwargs) + parser.add_argument('-q', help="Very useful parameter") + return parser + + def __init__(self, extractor, q): + super().__init__(extractor) + self.q = q + + def transform_item(self, item): + return item +``` + +`my_plugin2.py` contents: + +```python +from datumaro.components.extractor import SourceExtractor + +class MyFormat: ... +class MyFormatExtractor(SourceExtractor): ... + +exports = [MyFormat] # explicit exports declaration +# MyFormatExtractor won't be exported +``` + +## Command-line + +Basically, the interface is divided on contexts and single commands. +Contexts are semantically grouped commands, related to a single topic or target. +Single commands are handy shorter alternatives for the most used commands +and also special commands, which are hard to be put into any specific context. +[Docker](https://www.docker.com/) is an example of similar approach. + +![cli-design-image](images/cli_design.png) + +- The diagram above was created with [FreeMind](http://freemind.sourceforge.net/wiki/index.php/Main_Page) + +Model-View-ViewModel (MVVM) UI pattern is used. + +![mvvm-image](images/mvvm.png) diff --git a/testbed/openvinotoolkit__datumaro/docs/user_manual.md b/testbed/openvinotoolkit__datumaro/docs/user_manual.md new file mode 100644 index 0000000000000000000000000000000000000000..fc75fe783a6e10dd88ed5e1315ec05c9bf8d69a8 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/docs/user_manual.md @@ -0,0 +1,1019 @@ +# User manual + +## Contents + +- [Installation](#installation) +- [Interfaces](#interfaces) +- [Supported dataset formats and annotations](#supported-formats) +- [Command line workflow](#command-line-workflow) + - [Project structure](#project-structure) +- [Command reference](#command-reference) + - [Convert datasets](#convert-datasets) + - [Create project](#create-project) + - [Add and remove data](#add-and-remove-data) + - [Import project](#import-project) + - [Filter project](#filter-project) + - [Update project (merge)](#update-project) + - [Merge projects](#merge-projects) + - [Export project](#export-project) + - [Compare projects](#compare-projects) + - [Obtaining project info](#get-project-info) + - [Obtaining project statistics](#get-project-statistics) + - [Register model](#register-model) + - [Run inference](#run-inference) + - [Run inference explanation](#explain-inference) + - [Transform project](#transform-project) +- [Extending](#extending) +- [Links](#links) + +## Installation + +### Dependencies + +- Python (3.6+) +- Optional: OpenVINO, TensforFlow, PyTorch, MxNet, Caffe, Accuracy Checker + +### Installation steps + +Optionally, set up a virtual environment: + +``` bash +python -m pip install virtualenv +python -m virtualenv venv +. venv/bin/activate +``` + +Install: +``` bash +pip install 'git+https://github.com/openvinotoolkit/datumaro' +``` + +> You can change the installation branch with `...@` +> Also note `--force-reinstall` parameter in this case. + +## Interfaces + +As a standalone tool: + +``` bash +datum --help +``` + +As a python module: +> The directory containing Datumaro should be in the `PYTHONPATH` +> environment variable or `cvat/datumaro/` should be the current directory. + +``` bash +python -m datumaro --help +python datumaro/ --help +python datum.py --help +``` + +As a python library: + +``` python +import datumaro +``` + +## Supported Formats + +List of supported formats: +- MS COCO (`image_info`, `instances`, `person_keypoints`, `captions`, `labels`*) + - [Format specification](http://cocodataset.org/#format-data) + - [Dataset example](../tests/assets/coco_dataset) + - `labels` are our extension - like `instances` with only `category_id` +- PASCAL VOC (`classification`, `detection`, `segmentation` (class, instances), `action_classification`, `person_layout`) + - [Format specification](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/htmldoc/index.html) + - [Dataset example](../tests/assets/voc_dataset) +- YOLO (`bboxes`) + - [Format specification](https://github.com/AlexeyAB/darknet#how-to-train-pascal-voc-data) + - [Dataset example](../tests/assets/yolo_dataset) +- TF Detection API (`bboxes`, `masks`) + - Format specifications: [bboxes](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/using_your_own_dataset.md), [masks](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/instance_segmentation.md) + - [Dataset example](../tests/assets/tf_detection_api_dataset) +- WIDER Face (`bboxes`) + - [Format specification](http://shuoyang1213.me/WIDERFACE/) + - [Dataset example](../tests/assets/wider_dataset) +- VGGFace2 (`landmarks`, `bboxes`) + - [Format specification](https://github.com/ox-vgg/vgg_face2) + - [Dataset example](../tests/assets/vgg_face2_dataset) +- MOT sequences + - [Format specification](https://arxiv.org/pdf/1906.04567.pdf) + - [Dataset example](../tests/assets/mot_dataset) +- MOTS (png) + - [Format specification](https://www.vision.rwth-aachen.de/page/mots) + - [Dataset example](../tests/assets/mots_dataset) +- ImageNet (`classification`, `detection`) + - [Dataset example](../tests/assets/imagenet_dataset) + - [Dataset example (txt for classification)](../tests/assets/imagenet_txt_dataset) + - Detection format is the same as in PASCAL VOC +- CamVid (`segmentation`) + - [Format specification](http://mi.eng.cam.ac.uk/research/projects/VideoRec/CamVid/) + - [Dataset example](../tests/assets/camvid_dataset) +- CVAT + - [Format specification](https://github.com/opencv/cvat/blob/develop/cvat/apps/documentation/xml_format.md) + - [Dataset example](../tests/assets/cvat_dataset) +- LabelMe + - [Format specification](http://labelme.csail.mit.edu/Release3.0) + - [Dataset example](../tests/assets/labelme_dataset) + +List of supported annotation types: +- Labels +- Bounding boxes +- Polygons +- Polylines +- (Segmentation) Masks +- (Key-)Points +- Captions + +## Command line workflow + +The key object is a project, so most CLI commands operate on projects. +However, there are few commands operating on datasets directly. +A project is a combination of a project's own dataset, a number of +external data sources and an environment. +An empty Project can be created by `project create` command, +an existing dataset can be imported with `project import` command. +A typical way to obtain projects is to export tasks in CVAT UI. + +If you want to interact with models, you need to add them to project first. + +### Project structure + + +``` +└── project/ + ├── .datumaro/ + | ├── config.yml + │   ├── .git/ + │   ├── models/ + │   └── plugins/ + │   ├── plugin1/ + │   | ├── file1.py + │   | └── file2.py + │   ├── plugin2.py + │   ├── custom_extractor1.py + │   └── ... + ├── dataset/ + └── sources/ + ├── source1 + └── ... +``` + + +## Command reference + +> **Note**: command invocation syntax is subject to change, +> **always refer to command --help output** + +Available CLI commands: +![CLI design doc](images/cli_design.png) + +### Convert datasets + +This command allows to convert a dataset from one format into another. In fact, this +command is a combination of `project import` and `project export` and just provides a simpler +way to obtain the same result when no extra options is needed. A list of supported +formats can be found in the `--help` output of this command. + +Usage: + +``` bash +datum convert --help + +datum convert \ + -i \ + -if \ + -o \ + -f \ + -- [extra parameters for output format] +``` + +Example: convert a VOC-like dataset to a COCO-like one: + +``` bash +datum convert --input-format voc --input-path \ + --output-format coco +``` + +### Import project + +This command creates a Project from an existing dataset. + +Supported formats are listed in the command help. Check [extending tips](#extending) +for information on extra format support. + +Usage: + +``` bash +datum project import --help + +datum project import \ + -i \ + -o \ + -f +``` + +Example: create a project from COCO-like dataset + +``` bash +datum project import \ + -i /home/coco_dir \ + -o /home/project_dir \ + -f coco +``` + +An _MS COCO_-like dataset should have the following directory structure: + + +``` +COCO/ +├── annotations/ +│   ├── instances_val2017.json +│   ├── instances_train2017.json +├── images/ +│   ├── val2017 +│   ├── train2017 +``` + + +Everything after the last `_` is considered a subset name in the COCO format. + +### Create project + +The command creates an empty project. Once a Project is created, there are +a few options to interact with it. + +Usage: + +``` bash +datum project create --help + +datum project create \ + -o +``` + +Example: create an empty project `my_dataset` + +``` bash +datum project create -o my_dataset/ +``` + +### Add and remove data + +A Project can contain a number of external Data Sources. Each Data Source +describes a way to produce dataset items. A Project combines dataset items from +all the sources and its own dataset into one composite dataset. You can manage +project sources by commands in the `source` command line context. + +Datasets come in a wide variety of formats. Each dataset +format defines its own data structure and rules on how to +interpret the data. For example, the following data structure +is used in COCO format: + +``` +/dataset/ +- /images/.jpg +- /annotations/ +``` + + +Supported formats are listed in the command help. Check [extending tips](#extending) +for information on extra format support. + +Usage: + +``` bash +datum source add --help +datum source remove --help + +datum source add \ + path \ + -p \ + -n + +datum source remove \ + -p \ + -n +``` + +Example: create a project from a bunch of different annotations and images, +and generate TFrecord for TF Detection API for model training + +``` bash +datum project create +# 'default' is the name of the subset below +datum source add path -f coco_instances +datum source add path -f cvat +datum source add path -f voc_detection +datum source add path -f datumaro +datum source add path -f image_dir +datum project export -f tf_detection_api +``` + +### Filter project + +This command allows to create a sub-Project from a Project. The new project +includes only items satisfying some condition. [XPath](https://devhints.io/xpath) +is used as a query format. + +There are several filtering modes available (`-m/--mode` parameter). +Supported modes: +- `i`, `items` +- `a`, `annotations` +- `i+a`, `a+i`, `items+annotations`, `annotations+items` + +When filtering annotations, use the `items+annotations` +mode to point that annotation-less dataset items should be +removed. To select an annotation, write an XPath that +returns `annotation` elements (see examples). + +Usage: + +``` bash +datum project filter --help + +datum project filter \ + -p \ + -e '' +``` + +Example: extract a dataset with only images which `width` < `height` + +``` bash +datum project filter \ + -p test_project \ + -e '/item[image/width < image/height]' +``` + +Example: extract a dataset with only large annotations of class `cat` and any non-`persons` + +``` bash +datum project filter \ + -p test_project \ + --mode annotations -e '/item/annotation[(label="cat" and area > 99.5) or label!="person"]' +``` + +Example: extract a dataset with only occluded annotations, remove empty images + +``` bash +datum project filter \ + -p test_project \ + -m i+a -e '/item/annotation[occluded="True"]' +``` + +Item representations are available with `--dry-run` parameter: + +``` xml + + 290768 + minival2014 + + 612 + 612 + 3 + + + 80154 + bbox + 39 + 264.59 + 150.25 + 11.199999999999989 + 42.31 + 473.87199999999956 + + + 669839 + bbox + 41 + 163.58 + 191.75 + 76.98999999999998 + 73.63 + 5668.773699999998 + + ... + +``` + +### Update project + +This command updates items in a project from another one +(check [Merge Projects](#merge-projects) for complex merging). + +Usage: + +``` bash +datum project merge --help + +datum project merge \ + -p \ + -o \ + +``` + +Example: update annotations in the `first_project` with annotations +from the `second_project` and save the result as `merged_project` + +``` bash +datum project merge \ + -p first_project \ + -o merged_project \ + second_project +``` + +### Merge projects + +This command merges items from 2 or more projects and checks annotations for errors. + +Spatial annotations are compared by distance and intersected, labels and attributes +are selected by voting. +Merge conflicts, missing items and annotations, other errors are saved into a `.json` file. + +Usage: + +``` bash +datum merge --help + +datum merge +``` + +Example: merge 4 (partially-)intersecting projects, +- consider voting succeeded when there are 3+ same votes +- consider shapes intersecting when IoU >= 0.6 +- check annotation groups to have `person`, `hand`, `head` and `foot` (`?` for optional) + +``` bash +datum merge project1/ project2/ project3/ project4/ \ + --quorum 3 \ + -iou 0.6 \ + --groups 'person,hand?,head,foot?' +``` + +### Export project + +This command exports a Project as a dataset in some format. + +Supported formats are listed in the command help. Check [extending tips](#extending) +for information on extra format support. + +Usage: + +``` bash +datum project export --help + +datum project export \ + -p \ + -o \ + -f \ + -- [additional format parameters] +``` + +Example: save project as VOC-like dataset, include images, convert images to `PNG` + +``` bash +datum project export \ + -p test_project \ + -o test_project-export \ + -f voc \ + -- --save-images --image-ext='.png' +``` + +### Get project info + +This command outputs project status information. + +Usage: + +``` bash +datum project info --help + +datum project info \ + -p +``` + +Example: + +``` bash +datum project info -p /test_project + +Project: + name: test_project + location: /test_project +Sources: + source 'instances_minival2014': + format: coco_instances + url: /coco_like/annotations/instances_minival2014.json +Dataset: + length: 5000 + categories: label + label: + count: 80 + labels: person, bicycle, car, motorcycle (and 76 more) + subsets: minival2014 + subset 'minival2014': + length: 5000 + categories: label + label: + count: 80 + labels: person, bicycle, car, motorcycle (and 76 more) +``` + +### Get project statistics + +This command computes various project statistics, such as: +- image mean and std. dev. +- class and attribute balance +- mask pixel balance +- segment area distribution + +Usage: + +``` bash +datum project stats --help + +datum project stats \ + -p +``` + +Example: + +
+ +``` bash +datum project stats -p /test_project + +{ + "annotations": { + "labels": { + "attributes": { + "gender": { + "count": 358, + "distribution": { + "female": [ + 149, + 0.41620111731843573 + ], + "male": [ + 209, + 0.5837988826815642 + ] + }, + "values count": 2, + "values present": [ + "female", + "male" + ] + }, + "view": { + "count": 340, + "distribution": { + "__undefined__": [ + 4, + 0.011764705882352941 + ], + "front": [ + 54, + 0.1588235294117647 + ], + "left": [ + 14, + 0.041176470588235294 + ], + "rear": [ + 235, + 0.6911764705882353 + ], + "right": [ + 33, + 0.09705882352941177 + ] + }, + "values count": 5, + "values present": [ + "__undefined__", + "front", + "left", + "rear", + "right" + ] + } + }, + "count": 2038, + "distribution": { + "car": [ + 340, + 0.16683022571148184 + ], + "cyclist": [ + 194, + 0.09519136408243375 + ], + "head": [ + 354, + 0.17369970559371933 + ], + "ignore": [ + 100, + 0.04906771344455348 + ], + "left_hand": [ + 238, + 0.11678115799803729 + ], + "person": [ + 358, + 0.17566241413150147 + ], + "right_hand": [ + 77, + 0.037782139352306184 + ], + "road_arrows": [ + 326, + 0.15996074582924436 + ], + "traffic_sign": [ + 51, + 0.025024533856722278 + ] + } + }, + "segments": { + "area distribution": [ + { + "count": 1318, + "max": 11425.1, + "min": 0.0, + "percent": 0.9627465303140978 + }, + { + "count": 1, + "max": 22850.2, + "min": 11425.1, + "percent": 0.0007304601899196494 + }, + { + "count": 0, + "max": 34275.3, + "min": 22850.2, + "percent": 0.0 + }, + { + "count": 0, + "max": 45700.4, + "min": 34275.3, + "percent": 0.0 + }, + { + "count": 0, + "max": 57125.5, + "min": 45700.4, + "percent": 0.0 + }, + { + "count": 0, + "max": 68550.6, + "min": 57125.5, + "percent": 0.0 + }, + { + "count": 0, + "max": 79975.7, + "min": 68550.6, + "percent": 0.0 + }, + { + "count": 0, + "max": 91400.8, + "min": 79975.7, + "percent": 0.0 + }, + { + "count": 0, + "max": 102825.90000000001, + "min": 91400.8, + "percent": 0.0 + }, + { + "count": 50, + "max": 114251.0, + "min": 102825.90000000001, + "percent": 0.036523009495982466 + } + ], + "avg. area": 5411.624543462382, + "pixel distribution": { + "car": [ + 13655, + 0.0018431496518735067 + ], + "cyclist": [ + 939005, + 0.12674674030446592 + ], + "head": [ + 0, + 0.0 + ], + "ignore": [ + 5501200, + 0.7425510702956085 + ], + "left_hand": [ + 0, + 0.0 + ], + "person": [ + 954654, + 0.12885903974805205 + ], + "right_hand": [ + 0, + 0.0 + ], + "road_arrows": [ + 0, + 0.0 + ], + "traffic_sign": [ + 0, + 0.0 + ] + } + } + }, + "annotations by type": { + "bbox": { + "count": 548 + }, + "caption": { + "count": 0 + }, + "label": { + "count": 0 + }, + "mask": { + "count": 0 + }, + "points": { + "count": 669 + }, + "polygon": { + "count": 821 + }, + "polyline": { + "count": 0 + } + }, + "annotations count": 2038, + "dataset": { + "image mean": [ + 107.06903686941979, + 79.12831698580979, + 52.95829558185416 + ], + "image std": [ + 49.40237673503467, + 43.29600731496902, + 35.47373007603151 + ], + "images count": 100 + }, + "images count": 100, + "subsets": {}, + "unannotated images": [ + "img00051", + "img00052", + "img00053", + "img00054", + "img00055", + ], + "unannotated images count": 5 +} +``` + +
+ +### Register model + +Supported models: +- OpenVINO +- Custom models via custom `launchers` + +Usage: + +``` bash +datum model add --help +``` + +Example: register an OpenVINO model + +A model consists of a graph description and weights. There is also a script +used to convert model outputs to internal data structures. + +``` bash +datum project create +datum model add \ + -n openvino \ + -d -w -i +``` + +Interpretation script for an OpenVINO detection model (`convert.py`): + +``` python +from datumaro.components.extractor import * + +max_det = 10 +conf_thresh = 0.1 + +def process_outputs(inputs, outputs): + # inputs = model input, array or images, shape = (N, C, H, W) + # outputs = model output, shape = (N, 1, K, 7) + # results = conversion result, [ [ Annotation, ... ], ... ] + results = [] + for input, output in zip(inputs, outputs): + input_height, input_width = input.shape[:2] + detections = output[0] + image_results = [] + for i, det in enumerate(detections): + label = int(det[1]) + conf = det[2] + if conf <= conf_thresh: + continue + + x = max(int(det[3] * input_width), 0) + y = max(int(det[4] * input_height), 0) + w = min(int(det[5] * input_width - x), input_width) + h = min(int(det[6] * input_height - y), input_height) + image_results.append(Bbox(x, y, w, h, + label=label, attributes={'score': conf} )) + + results.append(image_results[:max_det]) + + return results + +def get_categories(): + # Optionally, provide output categories - label map etc. + # Example: + label_categories = LabelCategories() + label_categories.add('person') + label_categories.add('car') + return { AnnotationType.label: label_categories } +``` + +### Run model + +This command applies model to dataset images and produces a new project. + +Usage: + +``` bash +datum model run --help + +datum model run \ + -p \ + -m \ + -o +``` + +Example: launch inference on a dataset + +``` bash +datum project import <...> +datum model add mymodel <...> +datum model run -m mymodel -o inference +``` + +### Compare projects + +The command compares two datasets and saves the results in the +specified directory. The current project is considered to be +"ground truth". + +``` bash +datum project diff --help + +datum project diff -o +``` + +Example: compare a dataset with model inference + +``` bash +datum project import <...> +datum model add mymodel <...> +datum project transform <...> -o inference +datum project diff inference -o diff +``` + +### Explain inference + +Usage: + +``` bash +datum explain --help + +datum explain \ + -m \ + -o \ + -t \ + \ + +``` + +Example: run inference explanation on a single image with visualization + +``` bash +datum project create <...> +datum model add mymodel <...> +datum explain \ + -m mymodel \ + -t 'image.png' \ + rise \ + -s 1000 --progressive +``` + +### Transform Project + +This command allows to modify images or annotations in a project all at once. + +``` bash +datum project transform --help + +datum project transform \ + -p \ + -o \ + -t \ + -- [extra transform options] +``` + +Example: split a dataset randomly to `train` and `test` subsets, ratio is 2:1 + +``` bash +datum project transform -t random_split -- --subset train:.67 --subset test:.33 +``` + +Example: convert polygons to masks, masks to boxes etc.: + +``` bash +datum project transform -t boxes_to_masks +datum project transform -t masks_to_polygons +datum project transform -t polygons_to_masks +datum project transform -t shapes_to_boxes +``` + +Example: remap dataset labels, `person` to `car` and `cat` to `dog`, keep `bus`, remove others + +``` bash +datum project transform -t remap_labels -- \ + -l person:car -l bus:bus -l cat:dog \ + --default delete +``` + +Example: rename dataset items by a regular expression +- Replace `pattern` with `replacement` +- Remove `frame_` from item ids + +``` bash +datum project transform -t rename -- -e '|pattern|replacement|' +datum project transform -t rename -- -e '|frame_(\d+)|\\1|' +``` + +## Extending + +There are few ways to extend and customize Datumaro behaviour, which is supported by plugins. +Check [our contribution guide](../CONTRIBUTING.md) for details on plugin implementation. +In general, a plugin is a Python code file. It must be put into a plugin directory: +- `/.datumaro/plugins` for project-specific plugins +- `/plugins` for global plugins + +### Dataset Formats + +Dataset reading is supported by Extractors and Importers. +An Extractor produces a list of dataset items corresponding +to the dataset. An Importer creates a project from the data source location. +It is possible to add custom Extractors and Importers. To do this, you need +to put an Extractor and Importer implementation scripts to a plugin directory. + +Dataset writing is supported by Converters. +A Converter produces a dataset of a specific format from dataset items. +It is possible to add custom Converters. To do this, you need to put a Converter +implementation script to a plugin directory. + +### Dataset Conversions ("Transforms") + +A Transform is a function for altering a dataset and producing a new one. It can update +dataset items, annotations, classes, and other properties. +A list of available transforms for dataset conversions can be extended by adding a Transform +implementation script into a plugin directory. + +### Model launchers + +A list of available launchers for model execution can be extended by adding a Launcher +implementation script into a plugin directory. + +## Links +- [TensorFlow detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) +- [How to convert model to OpenVINO format](https://docs.openvinotoolkit.org/latest/_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_Object_Detection_API_Models.html) +- [Model conversion script example](https://github.com/opencv/cvat/blob/3e09503ba6c6daa6469a6c4d275a5a8b168dfa2c/components/tf_annotation/install.sh#L23) diff --git a/testbed/openvinotoolkit__datumaro/requirements.txt b/testbed/openvinotoolkit__datumaro/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..6bc3c7ee79919fe6cdf1b679240775db3a79341c --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/requirements.txt @@ -0,0 +1,12 @@ +attrs>=19.3.0 +Cython>=0.27.3 # include before pycocotools +defusedxml>=0.6.0 +GitPython>=3.0.8 +lxml>=4.4.1 +matplotlib>=3.3.1 +opencv-python-headless>=4.1.0.25 +Pillow>=6.1.0 +pycocotools>=2.0.0 +PyYAML>=5.3.1 +scikit-image>=0.15.0 +tensorboardX>=1.8 diff --git a/testbed/openvinotoolkit__datumaro/setup.py b/testbed/openvinotoolkit__datumaro/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..7e08249839e73727871e7b58695ce7125d62d01c --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/setup.py @@ -0,0 +1,86 @@ + +# Copyright (C) 2019-2020 Intel Corporation +# +# SPDX-License-Identifier: MIT + +from distutils.util import strtobool +import os +import os.path as osp +import re +import setuptools + + +def find_version(file_path=None): + if not file_path: + file_path = osp.join(osp.dirname(osp.abspath(__file__)), + 'datumaro', 'version.py') + + with open(file_path, 'r') as version_file: + version_text = version_file.read() + + # PEP440: + # https://www.python.org/dev/peps/pep-0440/#appendix-b-parsing-version-strings-with-regular-expressions + pep_regex = r'([1-9]\d*!)?(0|[1-9]\d*)(\.(0|[1-9]\d*))*((a|b|rc)(0|[1-9]\d*))?(\.post(0|[1-9]\d*))?(\.dev(0|[1-9]\d*))?' + version_regex = r'VERSION\s*=\s*.(' + pep_regex + ').' + match = re.match(version_regex, version_text) + if not match: + raise RuntimeError("Failed to find version string in '%s'" % file_path) + + version = version_text[match.start(1) : match.end(1)] + return version + +def get_requirements(): + requirements = [ + 'attrs>=19.3.0', + 'defusedxml', + 'GitPython', + 'lxml', + 'matplotlib', + 'numpy>=1.17.3', + 'Pillow', + 'pycocotools', + 'PyYAML', + 'scikit-image', + 'tensorboardX', + ] + if strtobool(os.getenv('DATUMARO_HEADLESS', '0').lower()): + requirements.append('opencv-python-headless') + else: + requirements.append('opencv-python') + + return requirements + +with open('README.md', 'r') as fh: + long_description = fh.read() + +setuptools.dist.Distribution().fetch_build_eggs([ + 'Cython>=0.27.3' # required for pycocotools and others, if need to compile +]) + +setuptools.setup( + name="datumaro", + version=find_version(), + author="Intel", + author_email="maxim.zhiltsov@intel.com", + description="Dataset Management Framework (Datumaro)", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/opencv/cvat/datumaro", + packages=setuptools.find_packages(exclude=['tests*']), + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + ], + python_requires='>=3.5', + install_requires=get_requirements(), + extras_require={ + 'tf': ['tensorflow'], + 'tf-gpu': ['tensorflow-gpu'], + }, + entry_points={ + 'console_scripts': [ + 'datum=datumaro.cli.__main__:main', + ], + }, +) diff --git a/testbed/openvinotoolkit__datumaro/tests/__init__.py b/testbed/openvinotoolkit__datumaro/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/coco_dataset/annotations/instances_val.json b/testbed/openvinotoolkit__datumaro/tests/assets/coco_dataset/annotations/instances_val.json new file mode 100644 index 0000000000000000000000000000000000000000..b5d9bd8697b77fe5a84c32430416661ab9d861cd --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/coco_dataset/annotations/instances_val.json @@ -0,0 +1,59 @@ +{ + "licenses": [ + { + "name": "", + "id": 0, + "url": "" + } + ], + "info": { + "contributor": "", + "date_created": "", + "description": "", + "url": "", + "version": "", + "year": "" + }, + "categories": [ + { + "id": 1, + "name": "TEST", + "supercategory": "" + } + ], + "images": [ + { + "id": 1, + "width": 5, + "height": 10, + "file_name": "000000000001.jpg", + "license": 0, + "flickr_url": "", + "coco_url": "", + "date_captured": 0 + } + ], + "annotations": [ + { + "id": 1, + "image_id": 1, + "category_id": 1, + "segmentation": [[0, 0, 1, 0, 1, 2, 0, 2]], + "area": 2, + "bbox": [0, 0, 1, 2], + "iscrowd": 0 + }, + { + "id": 2, + "image_id": 1, + "category_id": 1, + "segmentation": { + "counts": [0, 10, 5, 5, 5, 5, 0, 10, 10, 0], + "size": [10, 5] + }, + "area": 30, + "bbox": [0, 0, 10, 4], + "iscrowd": 1 + } + ] + } diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/imagenet_txt_dataset/synsets.txt b/testbed/openvinotoolkit__datumaro/tests/assets/imagenet_txt_dataset/synsets.txt new file mode 100644 index 0000000000000000000000000000000000000000..6d9c393d8668b15262d9085bb99bfb95766fffce --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/imagenet_txt_dataset/synsets.txt @@ -0,0 +1,10 @@ +label_0 +label_1 +label_2 +label_3 +label_4 +label_5 +label_6 +label_7 +label_8 +label_9 \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/imagenet_txt_dataset/train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/imagenet_txt_dataset/train.txt new file mode 100644 index 0000000000000000000000000000000000000000..624d1113460fde4bd71cd365ca5e36a054fbe64a --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/imagenet_txt_dataset/train.txt @@ -0,0 +1,4 @@ +1 0 +2 5 +3 3 +4 5 \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/labelme_dataset/img1.xml b/testbed/openvinotoolkit__datumaro/tests/assets/labelme_dataset/img1.xml new file mode 100644 index 0000000000000000000000000000000000000000..ff8ae1b46e3b0de05ba9c4b66e481da89455942d --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/labelme_dataset/img1.xml @@ -0,0 +1 @@ +img1.pngexample_folderThe MIT-CSAIL database of objects and scenesLabelMe Webtoolwindow0025-May-2012 00:09:480admin433445344537433777102license plate00no27-Jul-2014 02:58:501brussell58666268img1_mask_1.png58666268img1_scribble_1.pngo100yesa13,415-Nov-2019 14:38:512anonymous3012422124261522181422122712q100nokj215-Nov-2019 14:39:003anonymous352143224028283131223225b100yeshg215-Nov-2019 14:39:094bounding_boxanonymous1319231923301330m100nod615-Nov-2019 14:39:305bounding_boxanonymous56147023img1_mask_5.png55137023img1_scribble_5.pnghg00nogfd lkj lkj hi515-Nov-2019 14:41:576anonymous642174247232623460276222 \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/mot_dataset/gt/gt.txt b/testbed/openvinotoolkit__datumaro/tests/assets/mot_dataset/gt/gt.txt new file mode 100644 index 0000000000000000000000000000000000000000..f4b7c0d469590fdc803460fe6f6abc9b9b125e47 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/mot_dataset/gt/gt.txt @@ -0,0 +1 @@ +1,-1,0,4,4,8,1,3,1.0 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/mot_dataset/gt/labels.txt b/testbed/openvinotoolkit__datumaro/tests/assets/mot_dataset/gt/labels.txt new file mode 100644 index 0000000000000000000000000000000000000000..6d9c393d8668b15262d9085bb99bfb95766fffce --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/mot_dataset/gt/labels.txt @@ -0,0 +1,10 @@ +label_0 +label_1 +label_2 +label_3 +label_4 +label_5 +label_6 +label_7 +label_8 +label_9 \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/mots_dataset/train/instances/labels.txt b/testbed/openvinotoolkit__datumaro/tests/assets/mots_dataset/train/instances/labels.txt new file mode 100644 index 0000000000000000000000000000000000000000..27a7ea60561953234e709fe8f1caaf5892ce4481 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/mots_dataset/train/instances/labels.txt @@ -0,0 +1,4 @@ +a +b +c +d \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/mots_dataset/val/instances/labels.txt b/testbed/openvinotoolkit__datumaro/tests/assets/mots_dataset/val/instances/labels.txt new file mode 100644 index 0000000000000000000000000000000000000000..27a7ea60561953234e709fe8f1caaf5892ce4481 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/mots_dataset/val/instances/labels.txt @@ -0,0 +1,4 @@ +a +b +c +d \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/pytorch_launcher/__init__.py b/testbed/openvinotoolkit__datumaro/tests/assets/pytorch_launcher/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/pytorch_launcher/model_config.yml b/testbed/openvinotoolkit__datumaro/tests/assets/pytorch_launcher/model_config.yml new file mode 100644 index 0000000000000000000000000000000000000000..a3bef4fabc5384c0c3c4df25ea5bc4a5090b2c49 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/pytorch_launcher/model_config.yml @@ -0,0 +1,37 @@ +launcher: + framework: pytorch + module: samplenet.SampLeNet + python_path: '.' + checkpoint: 'samplenet.pth' + +# launcher returns raw result, so it should be converted +# to an appropriate representation with adapter +adapter: + type: classification + labels: + - label1 + - label2 + - label3 + - label4 + - label5 + - label6 + - label7 + - label8 + - label9 + - label10 + +# list of preprocessing, applied to each image during validation +# order of entries matters +preprocessing: + # resize input image to topology input size + # you may specify size to which image should be resized + # via dst_width, dst_height fields + - type: resize + size: 32 + # topology is trained on RGB images, but Datumaro reads in BGR + # so it must be converted to RGB + - type: bgr_to_rgb + # dataset mean and standard deviation + - type: normalization + mean: (125.307, 122.961, 113.8575) + std: (51.5865, 50.847, 51.255) \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/pytorch_launcher/samplenet.py b/testbed/openvinotoolkit__datumaro/tests/assets/pytorch_launcher/samplenet.py new file mode 100644 index 0000000000000000000000000000000000000000..7282e43adfcb5160ce8bec14880edaa651625756 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/pytorch_launcher/samplenet.py @@ -0,0 +1,38 @@ +""" +Copyright (C) 2019-2020 Intel Corporation + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import torch.nn as nn +import torch.nn.functional as F + + +class SampLeNet(nn.Module): + def __init__(self): + super(SampLeNet, self).__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 5 * 5) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + x = self.fc3(x) + return x diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/tf_detection_api_dataset/label_map.pbtxt b/testbed/openvinotoolkit__datumaro/tests/assets/tf_detection_api_dataset/label_map.pbtxt new file mode 100644 index 0000000000000000000000000000000000000000..dbf2b339b7b24b22d95dbb2169c83102faee9dad --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/tf_detection_api_dataset/label_map.pbtxt @@ -0,0 +1,50 @@ +item { + id: 1 + name: 'label_0' +} + +item { + id: 2 + name: 'label_1' +} + +item { + id: 3 + name: 'label_2' +} + +item { + id: 4 + name: 'label_3' +} + +item { + id: 5 + name: 'label_4' +} + +item { + id: 6 + name: 'label_5' +} + +item { + id: 7 + name: 'label_6' +} + +item { + id: 8 + name: 'label_7' +} + +item { + id: 9 + name: 'label_8' +} + +item { + id: 10 + name: 'label_9' +} + diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/tf_detection_api_dataset/test.tfrecord b/testbed/openvinotoolkit__datumaro/tests/assets/tf_detection_api_dataset/test.tfrecord new file mode 100644 index 0000000000000000000000000000000000000000..81dafa705b5016e1aca25a4d245a292681d529fb Binary files /dev/null and b/testbed/openvinotoolkit__datumaro/tests/assets/tf_detection_api_dataset/test.tfrecord differ diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/tf_detection_api_dataset/train.tfrecord b/testbed/openvinotoolkit__datumaro/tests/assets/tf_detection_api_dataset/train.tfrecord new file mode 100644 index 0000000000000000000000000000000000000000..3ca383316369bea7f45a1f5d28f9e4342e82b56a Binary files /dev/null and b/testbed/openvinotoolkit__datumaro/tests/assets/tf_detection_api_dataset/train.tfrecord differ diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/tf_detection_api_dataset/val.tfrecord b/testbed/openvinotoolkit__datumaro/tests/assets/tf_detection_api_dataset/val.tfrecord new file mode 100644 index 0000000000000000000000000000000000000000..34fa9ce1cda4db79315a991aacee180e47c616fe Binary files /dev/null and b/testbed/openvinotoolkit__datumaro/tests/assets/tf_detection_api_dataset/val.tfrecord differ diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/vgg_face2_dataset/bb_landmark/loose_bb_train.csv b/testbed/openvinotoolkit__datumaro/tests/assets/vgg_face2_dataset/bb_landmark/loose_bb_train.csv new file mode 100644 index 0000000000000000000000000000000000000000..365734f28095c60513038dec5e1c9e3e73b739c7 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/vgg_face2_dataset/bb_landmark/loose_bb_train.csv @@ -0,0 +1,3 @@ +NAME_ID,X,Y,W,H +n000001/0001_01,2,2,1,2 +n000002/0002_01,1,3,1,1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/vgg_face2_dataset/bb_landmark/loose_landmark_train.csv b/testbed/openvinotoolkit__datumaro/tests/assets/vgg_face2_dataset/bb_landmark/loose_landmark_train.csv new file mode 100644 index 0000000000000000000000000000000000000000..7ca5c1a3b0e36a64d57591c32d6798636dcfe469 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/vgg_face2_dataset/bb_landmark/loose_landmark_train.csv @@ -0,0 +1,3 @@ +NAME_ID,P1X,P1Y,P2X,P2Y,P3X,P3Y,P4X,P4Y,P5X,P5Y +n000001/0001_01,2.787,2.898,2.965,2.79,2.8,2.456,2.81,2.32,2.89,2.3 +n000002/0002_01,1.2,3.8,1.8,3.82,1.51,3.634,1.43,3.34,1.65,3.32 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/Annotations/2007_000001.xml b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/Annotations/2007_000001.xml new file mode 100644 index 0000000000000000000000000000000000000000..4f1e25a2112f3b26dd18373ac09d43a2722af0ad --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/Annotations/2007_000001.xml @@ -0,0 +1,54 @@ + + + VOC2007 + 2007_000001.jpg + + 10 + 20 + 3 + + 1 + + cat + Unspecified + 1 + 0 + + 1 + 2 + 3 + 4 + + + + person + + 4 + 5 + 6 + 7 + + + head + + 5.5 + 6 + 7.5 + 8 + + + + 1 + 0 + 1 + 0 + 1 + 0 + 1 + 0 + 1 + 0 + 1 + + + diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Action/test.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Action/test.txt new file mode 100644 index 0000000000000000000000000000000000000000..c9fdc2510e1f09596d04eb90e72468cd8bf4f9b4 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Action/test.txt @@ -0,0 +1 @@ +2007_000002 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Action/train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Action/train.txt new file mode 100644 index 0000000000000000000000000000000000000000..640b0d53ff2f69185a3a2b1788bb4170d1763527 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Action/train.txt @@ -0,0 +1 @@ +2007_000001 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Layout/test.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Layout/test.txt new file mode 100644 index 0000000000000000000000000000000000000000..c9fdc2510e1f09596d04eb90e72468cd8bf4f9b4 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Layout/test.txt @@ -0,0 +1 @@ +2007_000002 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Layout/train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Layout/train.txt new file mode 100644 index 0000000000000000000000000000000000000000..640b0d53ff2f69185a3a2b1788bb4170d1763527 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Layout/train.txt @@ -0,0 +1 @@ +2007_000001 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/aeroplane_train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/aeroplane_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3decd42ad8b6305460493ada0d1cb0c83db4303 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/aeroplane_train.txt @@ -0,0 +1 @@ +2007_000001 1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/background_train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/background_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..d4385b69787868cb7adb43fac13a82e17ac9ff5b --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/background_train.txt @@ -0,0 +1 @@ +2007_000001 -1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/bicycle_train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/bicycle_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..d4385b69787868cb7adb43fac13a82e17ac9ff5b --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/bicycle_train.txt @@ -0,0 +1 @@ +2007_000001 -1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/bird_train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/bird_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3decd42ad8b6305460493ada0d1cb0c83db4303 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/bird_train.txt @@ -0,0 +1 @@ +2007_000001 1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/boat_train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/boat_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..d4385b69787868cb7adb43fac13a82e17ac9ff5b --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/boat_train.txt @@ -0,0 +1 @@ +2007_000001 -1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/bottle_train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/bottle_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3decd42ad8b6305460493ada0d1cb0c83db4303 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/bottle_train.txt @@ -0,0 +1 @@ +2007_000001 1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/bus_train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/bus_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..d4385b69787868cb7adb43fac13a82e17ac9ff5b --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/bus_train.txt @@ -0,0 +1 @@ +2007_000001 -1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/car_train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/car_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3decd42ad8b6305460493ada0d1cb0c83db4303 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/car_train.txt @@ -0,0 +1 @@ +2007_000001 1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/cat_train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/cat_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..d4385b69787868cb7adb43fac13a82e17ac9ff5b --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/cat_train.txt @@ -0,0 +1 @@ +2007_000001 -1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/chair_train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/chair_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3decd42ad8b6305460493ada0d1cb0c83db4303 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/chair_train.txt @@ -0,0 +1 @@ +2007_000001 1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/cow_train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/cow_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..d4385b69787868cb7adb43fac13a82e17ac9ff5b --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/cow_train.txt @@ -0,0 +1 @@ +2007_000001 -1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/diningtable_train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/diningtable_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3decd42ad8b6305460493ada0d1cb0c83db4303 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/diningtable_train.txt @@ -0,0 +1 @@ +2007_000001 1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/dog_train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/dog_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..d4385b69787868cb7adb43fac13a82e17ac9ff5b --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/dog_train.txt @@ -0,0 +1 @@ +2007_000001 -1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/horse_train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/horse_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3decd42ad8b6305460493ada0d1cb0c83db4303 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/horse_train.txt @@ -0,0 +1 @@ +2007_000001 1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/ignored_train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/ignored_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3decd42ad8b6305460493ada0d1cb0c83db4303 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/ignored_train.txt @@ -0,0 +1 @@ +2007_000001 1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/motorbike_train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/motorbike_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..d4385b69787868cb7adb43fac13a82e17ac9ff5b --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/motorbike_train.txt @@ -0,0 +1 @@ +2007_000001 -1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/person_train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/person_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3decd42ad8b6305460493ada0d1cb0c83db4303 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/person_train.txt @@ -0,0 +1 @@ +2007_000001 1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/pottedplant_train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/pottedplant_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..d4385b69787868cb7adb43fac13a82e17ac9ff5b --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/pottedplant_train.txt @@ -0,0 +1 @@ +2007_000001 -1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/sheep_train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/sheep_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3decd42ad8b6305460493ada0d1cb0c83db4303 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/sheep_train.txt @@ -0,0 +1 @@ +2007_000001 1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/sofa_train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/sofa_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..d4385b69787868cb7adb43fac13a82e17ac9ff5b --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/sofa_train.txt @@ -0,0 +1 @@ +2007_000001 -1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/test.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/test.txt new file mode 100644 index 0000000000000000000000000000000000000000..c9fdc2510e1f09596d04eb90e72468cd8bf4f9b4 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/test.txt @@ -0,0 +1 @@ +2007_000002 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/train.txt new file mode 100644 index 0000000000000000000000000000000000000000..640b0d53ff2f69185a3a2b1788bb4170d1763527 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/train.txt @@ -0,0 +1 @@ +2007_000001 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/train_train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/train_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..a3decd42ad8b6305460493ada0d1cb0c83db4303 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/train_train.txt @@ -0,0 +1 @@ +2007_000001 1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/tvmonitor_train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/tvmonitor_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..d4385b69787868cb7adb43fac13a82e17ac9ff5b --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Main/tvmonitor_train.txt @@ -0,0 +1 @@ +2007_000001 -1 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Segmentation/test.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Segmentation/test.txt new file mode 100644 index 0000000000000000000000000000000000000000..c9fdc2510e1f09596d04eb90e72468cd8bf4f9b4 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Segmentation/test.txt @@ -0,0 +1 @@ +2007_000002 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Segmentation/train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Segmentation/train.txt new file mode 100644 index 0000000000000000000000000000000000000000..640b0d53ff2f69185a3a2b1788bb4170d1763527 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/voc_dataset/ImageSets/Segmentation/train.txt @@ -0,0 +1 @@ +2007_000001 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/widerface_dataset/wider_face_split/wider_face_train_bbx_gt.txt b/testbed/openvinotoolkit__datumaro/tests/assets/widerface_dataset/wider_face_split/wider_face_train_bbx_gt.txt new file mode 100644 index 0000000000000000000000000000000000000000..09109f70101c2b15b69396202d1cdb57884942ef --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/widerface_dataset/wider_face_split/wider_face_train_bbx_gt.txt @@ -0,0 +1,7 @@ +0--Parade/0_Parade_image_01.jpg +1 +1 2 2 2 0 0 0 0 0 0 +1--Handshaking/1_Handshaking_image_02.jpg +2 +1 1 2 2 0 0 1 0 0 0 +5 1 2 2 0 0 1 0 0 0 \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/widerface_dataset/wider_face_split/wider_face_val_bbx_gt.txt b/testbed/openvinotoolkit__datumaro/tests/assets/widerface_dataset/wider_face_split/wider_face_val_bbx_gt.txt new file mode 100644 index 0000000000000000000000000000000000000000..04573e82685b9f6cd42fcdd4a1b3811d6ae664b0 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/widerface_dataset/wider_face_split/wider_face_val_bbx_gt.txt @@ -0,0 +1,5 @@ +0--Parade/0_Parade_image_03.jpg +3 +0 0 1 1 2 0 0 0 2 0 +3 2 1 2 0 0 0 1 0 0 +5 6 1 1 2 0 0 0 2 0 \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/yolo_dataset/obj.data b/testbed/openvinotoolkit__datumaro/tests/assets/yolo_dataset/obj.data new file mode 100644 index 0000000000000000000000000000000000000000..16ca4090f427906bcb60cf86c584e3b6a3edc9e6 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/yolo_dataset/obj.data @@ -0,0 +1,4 @@ +classes = 10 +train = data/train.txt +names = data/obj.names +backup = backup/ diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/yolo_dataset/obj.names b/testbed/openvinotoolkit__datumaro/tests/assets/yolo_dataset/obj.names new file mode 100644 index 0000000000000000000000000000000000000000..b24c644df629b792558021687e13c231bead2d28 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/yolo_dataset/obj.names @@ -0,0 +1,10 @@ +label_0 +label_1 +label_2 +label_3 +label_4 +label_5 +label_6 +label_7 +label_8 +label_9 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/yolo_dataset/obj_train_data/1.txt b/testbed/openvinotoolkit__datumaro/tests/assets/yolo_dataset/obj_train_data/1.txt new file mode 100644 index 0000000000000000000000000000000000000000..1f507909e2849e9d68459f2fcb22931dbee635e7 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/yolo_dataset/obj_train_data/1.txt @@ -0,0 +1,2 @@ +2 0.133333 0.300000 0.266667 0.200000 +4 0.266667 0.450000 0.133333 0.300000 diff --git a/testbed/openvinotoolkit__datumaro/tests/assets/yolo_dataset/train.txt b/testbed/openvinotoolkit__datumaro/tests/assets/yolo_dataset/train.txt new file mode 100644 index 0000000000000000000000000000000000000000..f55beb7362590470bbde800ec9d8baf71be0ea3c --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/assets/yolo_dataset/train.txt @@ -0,0 +1 @@ +data/obj_train_data/1.jpg diff --git a/testbed/openvinotoolkit__datumaro/tests/test_RISE.py b/testbed/openvinotoolkit__datumaro/tests/test_RISE.py new file mode 100644 index 0000000000000000000000000000000000000000..04772287f43a21a4e69da47db18178d4944fb412 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/test_RISE.py @@ -0,0 +1,231 @@ +from collections import namedtuple +import numpy as np + +from unittest import TestCase + +from datumaro.components.extractor import Label, Bbox +from datumaro.components.launcher import Launcher +from datumaro.components.algorithms.rise import RISE + + +class RiseTest(TestCase): + def test_rise_can_be_applied_to_classification_model(self): + class TestLauncher(Launcher): + def __init__(self, class_count, roi, **kwargs): + self.class_count = class_count + self.roi = roi + + def launch(self, inputs): + for inp in inputs: + yield self._process(inp) + + def _process(self, image): + roi = self.roi + roi_area = (roi[1] - roi[0]) * (roi[3] - roi[2]) + if 0.5 * roi_area < np.sum(image[roi[0]:roi[1], roi[2]:roi[3], 0]): + cls = 0 + else: + cls = 1 + + cls_conf = 0.5 + other_conf = (1.0 - cls_conf) / (self.class_count - 1) + + return [ + Label(i, attributes={ + 'score': cls_conf if cls == i else other_conf }) \ + for i in range(self.class_count) + ] + + roi = [70, 90, 7, 90] + model = TestLauncher(class_count=3, roi=roi) + + rise = RISE(model, max_samples=(7 * 7) ** 2, mask_width=7, mask_height=7) + + image = np.ones((100, 100, 3)) + heatmaps = next(rise.apply(image)) + + self.assertEqual(1, len(heatmaps)) + + heatmap = heatmaps[0] + self.assertEqual(image.shape[:2], heatmap.shape) + + h_sum = np.sum(heatmap) + h_area = np.prod(heatmap.shape) + roi_sum = np.sum(heatmap[roi[0]:roi[1], roi[2]:roi[3]]) + roi_area = (roi[1] - roi[0]) * (roi[3] - roi[2]) + roi_den = roi_sum / roi_area + hrest_den = (h_sum - roi_sum) / (h_area - roi_area) + self.assertLess(hrest_den, roi_den) + + def test_rise_can_be_applied_to_detection_model(self): + ROI = namedtuple('ROI', + ['threshold', 'x', 'y', 'w', 'h', 'label']) + + class TestLauncher(Launcher): + def __init__(self, rois, class_count, fp_count=4, pixel_jitter=20, **kwargs): + self.rois = rois + self.roi_base_sums = [None, ] * len(rois) + self.class_count = class_count + self.fp_count = fp_count + self.pixel_jitter = pixel_jitter + + @staticmethod + def roi_value(roi, image): + return np.sum( + image[roi.y:roi.y + roi.h, roi.x:roi.x + roi.w, :]) + + def launch(self, inputs): + for inp in inputs: + yield self._process(inp) + + def _process(self, image): + detections = [] + for i, roi in enumerate(self.rois): + roi_sum = self.roi_value(roi, image) + roi_base_sum = self.roi_base_sums[i] + first_run = roi_base_sum is None + if first_run: + roi_base_sum = roi_sum + self.roi_base_sums[i] = roi_base_sum + + cls_conf = roi_sum / roi_base_sum + + if roi.threshold < roi_sum / roi_base_sum: + cls = roi.label + detections.append( + Bbox(roi.x, roi.y, roi.w, roi.h, + label=cls, attributes={'score': cls_conf}) + ) + + if first_run: + continue + for j in range(self.fp_count): + if roi.threshold < cls_conf: + cls = roi.label + else: + cls = (i + j) % self.class_count + box = [roi.x, roi.y, roi.w, roi.h] + offset = (np.random.rand(4) - 0.5) * self.pixel_jitter + detections.append( + Bbox(*(box + offset), + label=cls, attributes={'score': cls_conf}) + ) + + return detections + + rois = [ + ROI(0.3, 10, 40, 30, 10, 0), + ROI(0.5, 70, 90, 7, 10, 0), + ROI(0.7, 5, 20, 40, 60, 2), + ROI(0.9, 30, 20, 10, 40, 1), + ] + model = model = TestLauncher(class_count=3, rois=rois) + + rise = RISE(model, max_samples=(7 * 7) ** 2, mask_width=7, mask_height=7) + + image = np.ones((100, 100, 3)) + heatmaps = next(rise.apply(image)) + heatmaps_class_count = len(set([roi.label for roi in rois])) + self.assertEqual(heatmaps_class_count + len(rois), len(heatmaps)) + + # import cv2 + # roi_image = image.copy() + # for i, roi in enumerate(rois): + # cv2.rectangle(roi_image, (roi.x, roi.y), (roi.x + roi.w, roi.y + roi.h), (32 * i) * 3) + # cv2.imshow('img', roi_image) + + for c in range(heatmaps_class_count): + class_roi = np.zeros(image.shape[:2]) + for i, roi in enumerate(rois): + if roi.label != c: + continue + class_roi[roi.y:roi.y + roi.h, roi.x:roi.x + roi.w] \ + += roi.threshold + + heatmap = heatmaps[c] + + roi_pixels = heatmap[class_roi != 0] + h_sum = np.sum(roi_pixels) + h_area = np.sum(roi_pixels != 0) + h_den = h_sum / h_area + + rest_pixels = heatmap[class_roi == 0] + r_sum = np.sum(rest_pixels) + r_area = np.sum(rest_pixels != 0) + r_den = r_sum / r_area + + # print(r_den, h_den) + # cv2.imshow('class %s' % c, heatmap) + self.assertLess(r_den, h_den) + + for i, roi in enumerate(rois): + heatmap = heatmaps[heatmaps_class_count + i] + h_sum = np.sum(heatmap) + h_area = np.prod(heatmap.shape) + roi_sum = np.sum(heatmap[roi.y:roi.y + roi.h, roi.x:roi.x + roi.w]) + roi_area = roi.h * roi.w + roi_den = roi_sum / roi_area + hrest_den = (h_sum - roi_sum) / (h_area - roi_area) + # print(hrest_den, h_den) + # cv2.imshow('roi %s' % i, heatmap) + self.assertLess(hrest_den, roi_den) + # cv2.waitKey(0) + + @staticmethod + def DISABLED_test_roi_nms(): + ROI = namedtuple('ROI', + ['conf', 'x', 'y', 'w', 'h', 'label']) + + class_count = 3 + noisy_count = 3 + rois = [ + ROI(0.3, 10, 40, 30, 10, 0), + ROI(0.5, 70, 90, 7, 10, 0), + ROI(0.7, 5, 20, 40, 60, 2), + ROI(0.9, 30, 20, 10, 40, 1), + ] + pixel_jitter = 10 + + detections = [] + for i, roi in enumerate(rois): + detections.append( + Bbox(roi.x, roi.y, roi.w, roi.h, + label=roi.label, attributes={'score': roi.conf}) + ) + + for j in range(noisy_count): + cls_conf = roi.conf * j / noisy_count + cls = (i + j) % class_count + box = [roi.x, roi.y, roi.w, roi.h] + offset = (np.random.rand(4) - 0.5) * pixel_jitter + detections.append( + Bbox(*(box + offset), + label=cls, attributes={'score': cls_conf}) + ) + + import cv2 + image = np.zeros((100, 100, 3)) + for i, det in enumerate(detections): + roi = ROI(det.attributes['score'], *det.get_bbox(), det.label) + p1 = (int(roi.x), int(roi.y)) + p2 = (int(roi.x + roi.w), int(roi.y + roi.h)) + c = (0, 1 * (i % (1 + noisy_count) == 0), 1) + cv2.rectangle(image, p1, p2, c) + cv2.putText(image, 'd%s-%s-%.2f' % (i, roi.label, roi.conf), + p1, cv2.FONT_HERSHEY_SIMPLEX, 0.25, c) + cv2.imshow('nms_image', image) + cv2.waitKey(0) + + nms_boxes = RISE.nms(detections, iou_thresh=0.25) + print(len(detections), len(nms_boxes)) + + for i, det in enumerate(nms_boxes): + roi = ROI(det.attributes['score'], *det.get_bbox(), det.label) + p1 = (int(roi.x), int(roi.y)) + p2 = (int(roi.x + roi.w), int(roi.y + roi.h)) + c = (0, 1, 0) + cv2.rectangle(image, p1, p2, c) + cv2.putText(image, 'p%s-%s-%.2f' % (i, roi.label, roi.conf), + p1, cv2.FONT_HERSHEY_SIMPLEX, 0.25, c) + cv2.imshow('nms_image', image) + cv2.waitKey(0) \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/tests/test_camvid_format.py b/testbed/openvinotoolkit__datumaro/tests/test_camvid_format.py new file mode 100644 index 0000000000000000000000000000000000000000..12192279b915c7dbcb8ec1fa1984bddf8c53406b --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/test_camvid_format.py @@ -0,0 +1,229 @@ +import os.path as osp +from collections import OrderedDict +from functools import partial +from unittest import TestCase + +import datumaro.plugins.camvid_format as Camvid +import numpy as np +from datumaro.components.extractor import (AnnotationType, DatasetItem, + Extractor, LabelCategories, Mask) +from datumaro.components.project import Dataset, Project +from datumaro.plugins.camvid_format import CamvidConverter, CamvidImporter +from datumaro.util.test_utils import (TestDir, compare_datasets, + test_save_and_load) + + +class CamvidFormatTest(TestCase): + def test_can_write_and_parse_labelmap(self): + src_label_map = Camvid.CamvidLabelMap + + with TestDir() as test_dir: + file_path = osp.join(test_dir, 'label_colors.txt') + Camvid.write_label_map(file_path, src_label_map) + dst_label_map = Camvid.parse_label_map(file_path) + + self.assertEqual(src_label_map, dst_label_map) + +DUMMY_DATASET_DIR = osp.join(osp.dirname(__file__), 'assets', 'camvid_dataset') + +class TestExtractorBase(Extractor): + def _label(self, camvid_label): + return self.categories()[AnnotationType.label].find(camvid_label)[0] + + def categories(self): + return Camvid.make_camvid_categories() + +class CamvidImportTest(TestCase): + def test_can_import(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id='0001TP_008550', subset='test', + image=np.ones((1, 5, 3)), + annotations=[ + Mask(image=np.array([[1, 1, 0, 0, 0]]), label=1), + Mask(image=np.array([[0, 0, 1, 0, 0]]), label=18), + Mask(image=np.array([[0, 0, 0, 1, 1]]), label=22), + ] + ), + DatasetItem(id='0001TP_008580', subset='test', + image=np.ones((1, 5, 3)), + annotations=[ + Mask(image=np.array([[1, 1, 0, 0, 0]]), label=2), + Mask(image=np.array([[0, 0, 1, 0, 0]]), label=4), + Mask(image=np.array([[0, 0, 0, 1, 1]]), label=27), + ] + ), + DatasetItem(id='0001TP_006690', subset='train', + image=np.ones((1, 5, 3)), + annotations=[ + Mask(image=np.array([[1, 1, 0, 1, 1]]), label=3), + Mask(image=np.array([[0, 0, 1, 0, 0]]), label=18), + ] + ), + DatasetItem(id='0016E5_07959', subset = 'val', + image=np.ones((1, 5, 3)), + annotations=[ + Mask(image=np.array([[1, 1, 1, 0, 0]]), label=1), + Mask(image=np.array([[0, 0, 0, 1, 1]]), label=8), + ] + ), + ], categories=Camvid.make_camvid_categories()) + + parsed_dataset = Project.import_from(DUMMY_DATASET_DIR, 'camvid').make_dataset() + + compare_datasets(self, source_dataset, parsed_dataset) + + def test_can_detect_camvid(self): + self.assertTrue(CamvidImporter.detect(DUMMY_DATASET_DIR)) + +class CamvidConverterTest(TestCase): + def _test_save_and_load(self, source_dataset, converter, test_dir, + target_dataset=None, importer_args=None): + return test_save_and_load(self, source_dataset, converter, test_dir, + importer='camvid', + target_dataset=target_dataset, importer_args=importer_args) + + def test_can_save_camvid_segm(self): + class TestExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id='a/b/1', subset='test', + image=np.ones((1, 5, 3)), annotations=[ + Mask(image=np.array([[0, 0, 0, 1, 0]]), label=0), + Mask(image=np.array([[0, 1, 1, 0, 0]]), label=3), + Mask(image=np.array([[1, 0, 0, 0, 1]]), label=4), + ]), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(TestExtractor(), + partial(CamvidConverter.convert, label_map='camvid'), + test_dir) + + def test_can_save_camvid_segm_unpainted(self): + class TestExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id=1, subset='a', image=np.ones((1, 5, 3)), annotations=[ + Mask(image=np.array([[0, 0, 0, 1, 0]]), label=0), + Mask(image=np.array([[0, 1, 1, 0, 0]]), label=3), + Mask(image=np.array([[1, 0, 0, 0, 1]]), label=4), + ]), + ]) + + class DstExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id=1, subset='a', image=np.ones((1, 5, 3)), annotations=[ + Mask(image=np.array([[0, 0, 0, 1, 0]]), label=0), + Mask(image=np.array([[0, 1, 1, 0, 0]]), label=3), + Mask(image=np.array([[1, 0, 0, 0, 1]]), label=4), + ]), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(TestExtractor(), + partial(CamvidConverter.convert, + label_map='camvid', apply_colormap=False), + test_dir, target_dataset=DstExtractor()) + + def test_can_save_dataset_with_no_subsets(self): + class TestExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id=1, image=np.ones((1, 5, 3)), annotations=[ + Mask(image=np.array([[1, 0, 0, 1, 0]]), label=0), + Mask(image=np.array([[0, 1, 1, 0, 1]]), label=3), + ]), + + DatasetItem(id=2, image=np.ones((1, 5, 3)), annotations=[ + Mask(image=np.array([[1, 1, 0, 1, 0]]), label=1), + Mask(image=np.array([[0, 0, 1, 0, 1]]), label=2), + ]), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(TestExtractor(), + partial(CamvidConverter.convert, label_map='camvid'), test_dir) + + def test_can_save_with_no_masks(self): + class TestExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id='a/b/1', subset='test', + image=np.ones((2, 5, 3)), + ), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(TestExtractor(), + partial(CamvidConverter.convert, label_map='camvid'), + test_dir) + + def test_dataset_with_source_labelmap_undefined(self): + class SrcExtractor(TestExtractorBase): + def __iter__(self): + yield DatasetItem(id=1, image=np.ones((1, 5, 3)), annotations=[ + Mask(image=np.array([[1, 1, 0, 1, 0]]), label=0), + Mask(image=np.array([[0, 0, 1, 0, 0]]), label=1), + ]) + + def categories(self): + label_cat = LabelCategories() + label_cat.add('Label_1') + label_cat.add('label_2') + return { + AnnotationType.label: label_cat, + } + + class DstExtractor(TestExtractorBase): + def __iter__(self): + yield DatasetItem(id=1, image=np.ones((1, 5, 3)), annotations=[ + Mask(image=np.array([[1, 1, 0, 1, 0]]), label=self._label('Label_1')), + Mask(image=np.array([[0, 0, 1, 0, 0]]), label=self._label('label_2')), + ]) + + def categories(self): + label_map = OrderedDict() + label_map['background'] = None + label_map['Label_1'] = None + label_map['label_2'] = None + return Camvid.make_camvid_categories(label_map) + + with TestDir() as test_dir: + self._test_save_and_load(SrcExtractor(), + partial(CamvidConverter.convert, label_map='source'), + test_dir, target_dataset=DstExtractor()) + + def test_dataset_with_source_labelmap_defined(self): + class SrcExtractor(TestExtractorBase): + def __iter__(self): + yield DatasetItem(id=1, image=np.ones((1, 5, 3)), annotations=[ + Mask(image=np.array([[1, 1, 0, 1, 0]]), label=1), + Mask(image=np.array([[0, 0, 1, 0, 1]]), label=2), + ]) + + def categories(self): + label_map = OrderedDict() + label_map['background'] = (0, 0, 0) + label_map['label_1'] = (1, 2, 3) + label_map['label_2'] = (3, 2, 1) + return Camvid.make_camvid_categories(label_map) + + class DstExtractor(TestExtractorBase): + def __iter__(self): + yield DatasetItem(id=1, image=np.ones((1, 5, 3)), annotations=[ + Mask(image=np.array([[1, 1, 0, 1, 0]]), label=self._label('label_1')), + Mask(image=np.array([[0, 0, 1, 0, 1]]), label=self._label('label_2')), + ]) + + def categories(self): + label_map = OrderedDict() + label_map['background'] = (0, 0, 0) + label_map['label_1'] = (1, 2, 3) + label_map['label_2'] = (3, 2, 1) + return Camvid.make_camvid_categories(label_map) + + with TestDir() as test_dir: + self._test_save_and_load(SrcExtractor(), + partial(CamvidConverter.convert, label_map='source'), + test_dir, target_dataset=DstExtractor()) diff --git a/testbed/openvinotoolkit__datumaro/tests/test_coco_format.py b/testbed/openvinotoolkit__datumaro/tests/test_coco_format.py new file mode 100644 index 0000000000000000000000000000000000000000..a25f15f56160e2125800b973c7cf580347658595 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/test_coco_format.py @@ -0,0 +1,510 @@ +from functools import partial +import numpy as np +import os.path as osp + +from unittest import TestCase + +from datumaro.components.project import Project, Dataset +from datumaro.components.extractor import (DatasetItem, + AnnotationType, Label, Mask, Points, Polygon, Bbox, Caption, + LabelCategories, PointsCategories +) +from datumaro.plugins.coco_format.converter import ( + CocoConverter, + CocoImageInfoConverter, + CocoCaptionsConverter, + CocoInstancesConverter, + CocoPersonKeypointsConverter, + CocoLabelsConverter, +) +from datumaro.plugins.coco_format.importer import CocoImporter +from datumaro.util.image import Image +from datumaro.util.test_utils import (TestDir, compare_datasets, + test_save_and_load) + + +DUMMY_DATASET_DIR = osp.join(osp.dirname(__file__), 'assets', 'coco_dataset') + +class CocoImporterTest(TestCase): + def test_can_import(self): + expected_dataset = Dataset.from_iterable([ + DatasetItem(id='000000000001', image=np.ones((10, 5, 3)), + subset='val', attributes={'id': 1}, + annotations=[ + Polygon([0, 0, 1, 0, 1, 2, 0, 2], label=0, + id=1, group=1, attributes={'is_crowd': False}), + Mask(np.array( + [[1, 0, 0, 1, 0]] * 5 + + [[1, 1, 1, 1, 0]] * 5 + ), label=0, + id=2, group=2, attributes={'is_crowd': True}), + ] + ), + ], categories=['TEST',]) + + dataset = Project.import_from(DUMMY_DATASET_DIR, 'coco') \ + .make_dataset() + + compare_datasets(self, expected_dataset, dataset) + + def test_can_detect(self): + self.assertTrue(CocoImporter.detect(DUMMY_DATASET_DIR)) + +class CocoConverterTest(TestCase): + def _test_save_and_load(self, source_dataset, converter, test_dir, + target_dataset=None, importer_args=None): + return test_save_and_load(self, source_dataset, converter, test_dir, + importer='coco', + target_dataset=target_dataset, importer_args=importer_args) + + def test_can_save_and_load_captions(self): + expected_dataset = Dataset.from_iterable([ + DatasetItem(id=1, subset='train', + annotations=[ + Caption('hello', id=1, group=1), + Caption('world', id=2, group=2), + ], attributes={'id': 1}), + DatasetItem(id=2, subset='train', + annotations=[ + Caption('test', id=3, group=3), + ], attributes={'id': 2}), + + DatasetItem(id=3, subset='val', + annotations=[ + Caption('word', id=1, group=1), + ], attributes={'id': 1}), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(expected_dataset, + CocoCaptionsConverter.convert, test_dir) + + def test_can_save_and_load_instances(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id=1, subset='train', image=np.ones((4, 4, 3)), + annotations=[ + # Bbox + single polygon + Bbox(0, 1, 2, 2, + label=2, group=1, id=1, + attributes={ 'is_crowd': False }), + Polygon([0, 1, 2, 1, 2, 3, 0, 3], + attributes={ 'is_crowd': False }, + label=2, group=1, id=1), + ], attributes={'id': 1}), + DatasetItem(id=2, subset='train', image=np.ones((4, 4, 3)), + annotations=[ + # Mask + bbox + Mask(np.array([ + [0, 1, 0, 0], + [0, 1, 0, 0], + [0, 1, 1, 1], + [0, 0, 0, 0]], + ), + attributes={ 'is_crowd': True }, + label=4, group=3, id=3), + Bbox(1, 0, 2, 2, label=4, group=3, id=3, + attributes={ 'is_crowd': True }), + ], attributes={'id': 2}), + + DatasetItem(id=3, subset='val', image=np.ones((4, 4, 3)), + annotations=[ + # Bbox + mask + Bbox(0, 1, 2, 2, label=4, group=3, id=3, + attributes={ 'is_crowd': True }), + Mask(np.array([ + [0, 0, 0, 0], + [1, 1, 1, 0], + [1, 1, 0, 0], + [0, 0, 0, 0]], + ), + attributes={ 'is_crowd': True }, + label=4, group=3, id=3), + ], attributes={'id': 1}), + ], categories=[str(i) for i in range(10)]) + + target_dataset = Dataset.from_iterable([ + DatasetItem(id=1, subset='train', image=np.ones((4, 4, 3)), + annotations=[ + Polygon([0, 1, 2, 1, 2, 3, 0, 3], + attributes={ 'is_crowd': False }, + label=2, group=1, id=1), + ], attributes={'id': 1}), + DatasetItem(id=2, subset='train', image=np.ones((4, 4, 3)), + annotations=[ + Mask(np.array([ + [0, 1, 0, 0], + [0, 1, 0, 0], + [0, 1, 1, 1], + [0, 0, 0, 0]], + ), + attributes={ 'is_crowd': True }, + label=4, group=3, id=3), + ], attributes={'id': 2}), + + DatasetItem(id=3, subset='val', image=np.ones((4, 4, 3)), + annotations=[ + Mask(np.array([ + [0, 0, 0, 0], + [1, 1, 1, 0], + [1, 1, 0, 0], + [0, 0, 0, 0]], + ), + attributes={ 'is_crowd': True }, + label=4, group=3, id=3), + ], attributes={'id': 1}) + ], categories=[str(i) for i in range(10)]) + + with TestDir() as test_dir: + self._test_save_and_load(source_dataset, + CocoInstancesConverter.convert, test_dir, + target_dataset=target_dataset) + + def test_can_merge_polygons_on_loading(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id=1, image=np.zeros((6, 10, 3)), + annotations=[ + Polygon([0, 0, 4, 0, 4, 4], + label=3, id=4, group=4), + Polygon([5, 0, 9, 0, 5, 5], + label=3, id=4, group=4), + ] + ), + ], categories=[str(i) for i in range(10)]) + + target_dataset = Dataset.from_iterable([ + DatasetItem(id=1, image=np.zeros((6, 10, 3)), + annotations=[ + Mask(np.array([ + [0, 1, 1, 1, 0, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], + # only internal fragment (without the border), + # but not everywhere... + ), + label=3, id=4, group=4, + attributes={ 'is_crowd': False }), + ], attributes={'id': 1} + ), + ], categories=[str(i) for i in range(10)]) + + with TestDir() as test_dir: + self._test_save_and_load(source_dataset, + CocoInstancesConverter.convert, test_dir, + importer_args={'merge_instance_polygons': True}, + target_dataset=target_dataset) + + def test_can_crop_covered_segments(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id=1, image=np.zeros((5, 5, 3)), + annotations=[ + Mask(np.array([ + [0, 0, 1, 1, 1], + [0, 0, 1, 1, 1], + [1, 1, 0, 1, 1], + [1, 1, 1, 0, 0], + [1, 1, 1, 0, 0]], + ), + label=2, id=1, z_order=0), + Polygon([1, 1, 4, 1, 4, 4, 1, 4], + label=1, id=2, z_order=1), + ] + ), + ], categories=[str(i) for i in range(10)]) + + target_dataset = Dataset.from_iterable([ + DatasetItem(id=1, image=np.zeros((5, 5, 3)), + annotations=[ + Mask(np.array([ + [0, 0, 1, 1, 1], + [0, 0, 0, 0, 1], + [1, 0, 0, 0, 1], + [1, 0, 0, 0, 0], + [1, 1, 1, 0, 0]], + ), + attributes={ 'is_crowd': True }, + label=2, id=1, group=1), + + Polygon([1, 1, 4, 1, 4, 4, 1, 4], + label=1, id=2, group=2, + attributes={ 'is_crowd': False }), + ], attributes={'id': 1} + ), + ], categories=[str(i) for i in range(10)]) + + with TestDir() as test_dir: + self._test_save_and_load(source_dataset, + partial(CocoInstancesConverter.convert, crop_covered=True), + test_dir, target_dataset=target_dataset) + + def test_can_convert_polygons_to_mask(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id=1, image=np.zeros((6, 10, 3)), + annotations=[ + Polygon([0, 0, 4, 0, 4, 4], + label=3, id=4, group=4), + Polygon([5, 0, 9, 0, 5, 5], + label=3, id=4, group=4), + ] + ), + ], categories=[str(i) for i in range(10)]) + + target_dataset = Dataset.from_iterable([ + DatasetItem(id=1, image=np.zeros((6, 10, 3)), + annotations=[ + Mask(np.array([ + [0, 1, 1, 1, 0, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], + # only internal fragment (without the border), + # but not everywhere... + ), + attributes={ 'is_crowd': True }, + label=3, id=4, group=4), + ], attributes={'id': 1} + ), + ], categories=[str(i) for i in range(10)]) + + with TestDir() as test_dir: + self._test_save_and_load(source_dataset, + partial(CocoInstancesConverter.convert, segmentation_mode='mask'), + test_dir, target_dataset=target_dataset) + + def test_can_convert_masks_to_polygons(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id=1, image=np.zeros((5, 10, 3)), + annotations=[ + Mask(np.array([ + [0, 1, 1, 1, 0, 1, 1, 1, 1, 0], + [0, 0, 1, 1, 0, 1, 1, 1, 0, 0], + [0, 0, 0, 1, 0, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + ]), + label=3, id=4, group=4), + ] + ), + ], categories=[str(i) for i in range(10)]) + + target_dataset = Dataset.from_iterable([ + DatasetItem(id=1, image=np.zeros((5, 10, 3)), + annotations=[ + Polygon( + [3.0, 2.5, 1.0, 0.0, 3.5, 0.0, 3.0, 2.5], + label=3, id=4, group=4, + attributes={ 'is_crowd': False }), + Polygon( + [5.0, 3.5, 4.5, 0.0, 8.0, 0.0, 5.0, 3.5], + label=3, id=4, group=4, + attributes={ 'is_crowd': False }), + ], attributes={'id': 1} + ), + ], categories=[str(i) for i in range(10)]) + + with TestDir() as test_dir: + self._test_save_and_load(source_dataset, + partial(CocoInstancesConverter.convert, segmentation_mode='polygons'), + test_dir, + target_dataset=target_dataset) + + def test_can_save_and_load_images(self): + expected_dataset = Dataset.from_iterable([ + DatasetItem(id=1, subset='train', attributes={'id': 1}), + DatasetItem(id=2, subset='train', attributes={'id': 2}), + + DatasetItem(id=2, subset='val', attributes={'id': 2}), + DatasetItem(id=3, subset='val', attributes={'id': 3}), + DatasetItem(id=4, subset='val', attributes={'id': 4}), + + DatasetItem(id=5, subset='test', attributes={'id': 1}), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(expected_dataset, + CocoImageInfoConverter.convert, test_dir) + + def test_can_save_and_load_labels(self): + expected_dataset = Dataset.from_iterable([ + DatasetItem(id=1, subset='train', + annotations=[ + Label(4, id=1, group=1), + Label(9, id=2, group=2), + ], attributes={'id': 1}), + ], categories=[str(i) for i in range(10)]) + + with TestDir() as test_dir: + self._test_save_and_load(expected_dataset, + CocoLabelsConverter.convert, test_dir) + + def test_can_save_and_load_keypoints(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id=1, subset='train', image=np.zeros((5, 5, 3)), + annotations=[ + # Full instance annotations: polygon + keypoints + Points([0, 0, 0, 2, 4, 1], [0, 1, 2], + label=3, group=1, id=1), + Polygon([0, 0, 4, 0, 4, 4], + label=3, group=1, id=1), + + # Full instance annotations: bbox + keypoints + Points([1, 2, 3, 4, 2, 3], group=2, id=2), + Bbox(1, 2, 2, 2, group=2, id=2), + + # Solitary keypoints + Points([1, 2, 0, 2, 4, 1], label=5, id=3), + + # Some other solitary annotations (bug #1387) + Polygon([0, 0, 4, 0, 4, 4], label=3, id=4), + + # Solitary keypoints with no label + Points([0, 0, 1, 2, 3, 4], [0, 1, 2], id=5), + ]), + ], categories={ + AnnotationType.label: LabelCategories.from_iterable( + str(i) for i in range(10)), + AnnotationType.points: PointsCategories.from_iterable( + (i, None, [[0, 1], [1, 2]]) for i in range(10) + ), + }) + + target_dataset = Dataset.from_iterable([ + DatasetItem(id=1, subset='train', image=np.zeros((5, 5, 3)), + annotations=[ + Points([0, 0, 0, 2, 4, 1], [0, 1, 2], + label=3, group=1, id=1, + attributes={'is_crowd': False}), + Polygon([0, 0, 4, 0, 4, 4], + label=3, group=1, id=1, + attributes={'is_crowd': False}), + + Points([1, 2, 3, 4, 2, 3], + group=2, id=2, + attributes={'is_crowd': False}), + Bbox(1, 2, 2, 2, + group=2, id=2, + attributes={'is_crowd': False}), + + Points([1, 2, 0, 2, 4, 1], + label=5, group=3, id=3, + attributes={'is_crowd': False}), + Bbox(0, 1, 4, 1, + label=5, group=3, id=3, + attributes={'is_crowd': False}), + + Points([0, 0, 1, 2, 3, 4], [0, 1, 2], + group=5, id=5, + attributes={'is_crowd': False}), + Bbox(1, 2, 2, 2, + group=5, id=5, + attributes={'is_crowd': False}), + ], attributes={'id': 1}), + ], categories={ + AnnotationType.label: LabelCategories.from_iterable( + str(i) for i in range(10)), + AnnotationType.points: PointsCategories.from_iterable( + (i, None, [[0, 1], [1, 2]]) for i in range(10) + ), + }) + + with TestDir() as test_dir: + self._test_save_and_load(source_dataset, + CocoPersonKeypointsConverter.convert, test_dir, + target_dataset=target_dataset) + + def test_can_save_dataset_with_no_subsets(self): + test_dataset = Dataset.from_iterable([ + DatasetItem(id=1, attributes={'id': 1}), + DatasetItem(id=2, attributes={'id': 2}), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(test_dataset, + CocoConverter.convert, test_dir) + + def test_can_save_dataset_with_image_info(self): + expected_dataset = Dataset.from_iterable([ + DatasetItem(id=1, image=Image(path='1.jpg', size=(10, 15)), + attributes={'id': 1}), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(expected_dataset, + CocoImageInfoConverter.convert, test_dir) + + def test_relative_paths(self): + expected_dataset = Dataset.from_iterable([ + DatasetItem(id='1', image=np.ones((4, 2, 3)), + attributes={'id': 1}), + DatasetItem(id='subdir1/1', image=np.ones((2, 6, 3)), + attributes={'id': 2}), + DatasetItem(id='subdir2/1', image=np.ones((5, 4, 3)), + attributes={'id': 3}), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(expected_dataset, + partial(CocoImageInfoConverter.convert, save_images=True), test_dir) + + def test_preserve_coco_ids(self): + expected_dataset = Dataset.from_iterable([ + DatasetItem(id='some/name1', image=np.ones((4, 2, 3)), + attributes={'id': 40}), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(expected_dataset, + partial(CocoImageInfoConverter.convert, save_images=True), test_dir) + + def test_annotation_attributes(self): + expected_dataset = Dataset.from_iterable([ + DatasetItem(id=1, image=np.ones((4, 2, 3)), annotations=[ + Polygon([0, 0, 4, 0, 4, 4], label=5, group=1, id=1, + attributes={'is_crowd': False, 'x': 5, 'y': 'abc'}), + ], attributes={'id': 1}) + ], categories=[str(i) for i in range(10)]) + + with TestDir() as test_dir: + self._test_save_and_load(expected_dataset, + CocoConverter.convert, test_dir) + + def test_auto_annotation_ids(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id=2, image=np.ones((4, 2, 3)), annotations=[ + Polygon([0, 0, 4, 0, 4, 4], label=0), + ]) + ], categories=[str(i) for i in range(10)]) + + target_dataset = Dataset.from_iterable([ + DatasetItem(id=2, image=np.ones((4, 2, 3)), annotations=[ + Polygon([0, 0, 4, 0, 4, 4], label=0, id=1, group=1, + attributes={'is_crowd': False}), + ], attributes={'id': 1}) + ], categories=[str(i) for i in range(10)]) + + with TestDir() as test_dir: + self._test_save_and_load(source_dataset, + CocoConverter.convert, test_dir, target_dataset=target_dataset) + + def test_reindex(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id=2, image=np.ones((4, 2, 3)), annotations=[ + Polygon([0, 0, 4, 0, 4, 4], label=0, id=5), + ], attributes={'id': 22}) + ], categories=[str(i) for i in range(10)]) + + target_dataset = Dataset.from_iterable([ + DatasetItem(id=2, image=np.ones((4, 2, 3)), annotations=[ + Polygon([0, 0, 4, 0, 4, 4], label=0, id=1, group=1, + attributes={'is_crowd': False}), + ], attributes={'id': 1}) + ], categories=[str(i) for i in range(10)]) + + with TestDir() as test_dir: + self._test_save_and_load(source_dataset, + partial(CocoConverter.convert, reindex=True), + test_dir, target_dataset=target_dataset) diff --git a/testbed/openvinotoolkit__datumaro/tests/test_command_targets.py b/testbed/openvinotoolkit__datumaro/tests/test_command_targets.py new file mode 100644 index 0000000000000000000000000000000000000000..5b8a69f31829f713faee929f5c63004412470f2d --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/test_command_targets.py @@ -0,0 +1,128 @@ +import numpy as np +import os.path as osp + +from unittest import TestCase + +from datumaro.components.project import Project +from datumaro.util.command_targets import ProjectTarget, \ + ImageTarget, SourceTarget +from datumaro.util.image import save_image +from datumaro.util.test_utils import TestDir + + +class CommandTargetsTest(TestCase): + def test_image_false_when_no_file(self): + target = ImageTarget() + + status = target.test('somepath.jpg') + + self.assertFalse(status) + + def test_image_false_when_false(self): + with TestDir() as test_dir: + path = osp.join(test_dir, 'test.jpg') + with open(path, 'w+') as f: + f.write('qwerty123') + + target = ImageTarget() + + status = target.test(path) + + self.assertFalse(status) + + def test_image_true_when_true(self): + with TestDir() as test_dir: + path = osp.join(test_dir, 'test.jpg') + save_image(path, np.ones([10, 7, 3])) + + target = ImageTarget() + + status = target.test(path) + + self.assertTrue(status) + + def test_project_false_when_no_file(self): + target = ProjectTarget() + + status = target.test('somepath.jpg') + + self.assertFalse(status) + + def test_project_false_when_no_name(self): + target = ProjectTarget(project=Project()) + + status = target.test('') + + self.assertFalse(status) + + def test_project_true_when_project_file(self): + with TestDir() as test_dir: + path = osp.join(test_dir, 'test.jpg') + Project().save(path) + + target = ProjectTarget() + + status = target.test(path) + + self.assertTrue(status) + + def test_project_true_when_project_name(self): + project_name = 'qwerty' + project = Project({ + 'project_name': project_name + }) + target = ProjectTarget(project=project) + + status = target.test(project_name) + + self.assertTrue(status) + + def test_project_false_when_not_project_name(self): + project_name = 'qwerty' + project = Project({ + 'project_name': project_name + }) + target = ProjectTarget(project=project) + + status = target.test(project_name + '123') + + self.assertFalse(status) + + def test_project_false_when_not_project_file(self): + with TestDir() as test_dir: + path = osp.join(test_dir, 'test.jpg') + with open(path, 'w+') as f: + f.write('wqererw') + + target = ProjectTarget() + + status = target.test(path) + + self.assertFalse(status) + + def test_source_false_when_no_project(self): + target = SourceTarget() + + status = target.test('qwerty123') + + self.assertFalse(status) + + def test_source_true_when_source_exists(self): + source_name = 'qwerty' + project = Project() + project.add_source(source_name) + target = SourceTarget(project=project) + + status = target.test(source_name) + + self.assertTrue(status) + + def test_source_false_when_source_doesnt_exist(self): + source_name = 'qwerty' + project = Project() + project.add_source(source_name) + target = SourceTarget(project=project) + + status = target.test(source_name + '123') + + self.assertFalse(status) \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/tests/test_cvat_format.py b/testbed/openvinotoolkit__datumaro/tests/test_cvat_format.py new file mode 100644 index 0000000000000000000000000000000000000000..99dc485c4a7b28e48655da124450691e352e3be1 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/test_cvat_format.py @@ -0,0 +1,288 @@ +from functools import partial +import numpy as np +import os.path as osp + +from unittest import TestCase +from datumaro.components.project import Dataset +from datumaro.components.extractor import (DatasetItem, + AnnotationType, Points, Polygon, PolyLine, Bbox, Label, + LabelCategories, +) +from datumaro.plugins.cvat_format.extractor import CvatImporter +from datumaro.plugins.cvat_format.converter import CvatConverter +from datumaro.util.image import Image +from datumaro.util.test_utils import (TestDir, compare_datasets, + test_save_and_load) + + +DUMMY_IMAGE_DATASET_DIR = osp.join(osp.dirname(__file__), + 'assets', 'cvat_dataset', 'for_images') + +DUMMY_VIDEO_DATASET_DIR = osp.join(osp.dirname(__file__), + 'assets', 'cvat_dataset', 'for_video') + +class CvatImporterTest(TestCase): + def test_can_detect_image(self): + self.assertTrue(CvatImporter.detect(DUMMY_IMAGE_DATASET_DIR)) + + def test_can_detect_video(self): + self.assertTrue(CvatImporter.detect(DUMMY_VIDEO_DATASET_DIR)) + + def test_can_load_image(self): + expected_dataset = Dataset.from_iterable([ + DatasetItem(id='img0', subset='train', + image=np.ones((8, 8, 3)), + annotations=[ + Bbox(0, 2, 4, 2, label=0, z_order=1, + attributes={ + 'occluded': True, + 'a1': True, 'a2': 'v3' + }), + PolyLine([1, 2, 3, 4, 5, 6, 7, 8], + attributes={'occluded': False}), + ], attributes={'frame': 0}), + DatasetItem(id='img1', subset='train', + image=np.ones((10, 10, 3)), + annotations=[ + Polygon([1, 2, 3, 4, 6, 5], z_order=1, + attributes={'occluded': False}), + Points([1, 2, 3, 4, 5, 6], label=1, z_order=2, + attributes={'occluded': False}), + ], attributes={'frame': 1}), + ], categories={ + AnnotationType.label: LabelCategories.from_iterable([ + ['label1', '', {'a1', 'a2'}], + ['label2'], + ]) + }) + + parsed_dataset = CvatImporter()(DUMMY_IMAGE_DATASET_DIR).make_dataset() + + compare_datasets(self, expected_dataset, parsed_dataset) + + def test_can_load_video(self): + expected_dataset = Dataset.from_iterable([ + DatasetItem(id='frame_000010', subset='annotations', + image=255 * np.ones((20, 25, 3)), + annotations=[ + Bbox(3, 4, 7, 1, label=2, + id=0, + attributes={ + 'occluded': True, + 'outside': False, 'keyframe': True, + 'track_id': 0 + }), + Points([21.95, 8.00, 2.55, 15.09, 2.23, 3.16], + label=0, + id=1, + attributes={ + 'occluded': False, + 'outside': False, 'keyframe': True, + 'track_id': 1, 'hgl': 'hgkf', + }), + ], attributes={'frame': 10}), + DatasetItem(id='frame_000013', subset='annotations', + image=255 * np.ones((20, 25, 3)), + annotations=[ + Bbox(7, 6, 7, 2, label=2, + id=0, + attributes={ + 'occluded': False, + 'outside': True, 'keyframe': True, + 'track_id': 0 + }), + Points([21.95, 8.00, 9.55, 15.09, 5.23, 1.16], + label=0, + id=1, + attributes={ + 'occluded': False, + 'outside': True, 'keyframe': True, + 'track_id': 1, 'hgl': 'jk', + }), + PolyLine([7.85, 13.88, 3.50, 6.67, 15.90, 2.00, 13.31, 7.21], + label=2, + id=2, + attributes={ + 'occluded': False, + 'outside': False, 'keyframe': True, + 'track_id': 2, + }), + ], attributes={'frame': 13}), + DatasetItem(id='frame_000016', subset='annotations', + image=Image(path='frame_0000016.png', size=(20, 25)), + annotations=[ + Bbox(8, 7, 6, 10, label=2, + id=0, + attributes={ + 'occluded': False, + 'outside': True, 'keyframe': True, + 'track_id': 0 + }), + PolyLine([7.85, 13.88, 3.50, 6.67, 15.90, 2.00, 13.31, 7.21], + label=2, + id=2, + attributes={ + 'occluded': False, + 'outside': True, 'keyframe': True, + 'track_id': 2, + }), + ], attributes={'frame': 16}), + ], categories={ + AnnotationType.label: LabelCategories.from_iterable([ + ['klhg', '', {'hgl'}], + ['z U k'], + ['II'] + ]), + }) + + parsed_dataset = CvatImporter()(DUMMY_VIDEO_DATASET_DIR).make_dataset() + + compare_datasets(self, expected_dataset, parsed_dataset) + +class CvatConverterTest(TestCase): + def _test_save_and_load(self, source_dataset, converter, test_dir, + target_dataset=None, importer_args=None): + return test_save_and_load(self, source_dataset, converter, test_dir, + importer='cvat', + target_dataset=target_dataset, importer_args=importer_args) + + def test_can_save_and_load(self): + label_categories = LabelCategories() + for i in range(10): + label_categories.add(str(i)) + label_categories.items[2].attributes.update(['a1', 'a2', 'empty']) + label_categories.attributes.update(['occluded']) + + source_dataset = Dataset.from_iterable([ + DatasetItem(id=0, subset='s1', image=np.zeros((5, 10, 3)), + annotations=[ + Polygon([0, 0, 4, 0, 4, 4], + label=1, group=4, + attributes={ 'occluded': True}), + Points([1, 1, 3, 2, 2, 3], + label=2, + attributes={ 'a1': 'x', 'a2': 42, 'empty': '', + 'unknown': 'bar' }), + Label(1), + Label(2, attributes={ 'a1': 'y', 'a2': 44 }), + ] + ), + DatasetItem(id=1, subset='s1', + annotations=[ + PolyLine([0, 0, 4, 0, 4, 4], + label=3, id=4, group=4), + Bbox(5, 0, 1, 9, + label=3, id=4, group=4), + ] + ), + + DatasetItem(id=2, subset='s2', image=np.ones((5, 10, 3)), + annotations=[ + Polygon([0, 0, 4, 0, 4, 4], z_order=1, + label=3, group=4, + attributes={ 'occluded': False }), + PolyLine([5, 0, 9, 0, 5, 5]), # will be skipped as no label + ] + ), + + DatasetItem(id=3, subset='s3', image=Image( + path='3.jpg', size=(2, 4))), + ], categories={ + AnnotationType.label: label_categories, + }) + + target_dataset = Dataset.from_iterable([ + DatasetItem(id=0, subset='s1', image=np.zeros((5, 10, 3)), + annotations=[ + Polygon([0, 0, 4, 0, 4, 4], + label=1, group=4, + attributes={ 'occluded': True }), + Points([1, 1, 3, 2, 2, 3], + label=2, + attributes={ 'occluded': False, 'empty': '', + 'a1': 'x', 'a2': 42 }), + Label(1), + Label(2, attributes={ 'a1': 'y', 'a2': 44 }), + ], attributes={'frame': 0} + ), + DatasetItem(id=1, subset='s1', + annotations=[ + PolyLine([0, 0, 4, 0, 4, 4], + label=3, group=4, + attributes={ 'occluded': False }), + Bbox(5, 0, 1, 9, + label=3, group=4, + attributes={ 'occluded': False }), + ], attributes={'frame': 1} + ), + + DatasetItem(id=2, subset='s2', image=np.ones((5, 10, 3)), + annotations=[ + Polygon([0, 0, 4, 0, 4, 4], z_order=1, + label=3, group=4, + attributes={ 'occluded': False }), + ], attributes={'frame': 0} + ), + + DatasetItem(id=3, subset='s3', image=Image( + path='3.jpg', size=(2, 4)), + attributes={'frame': 0}), + ], categories={ + AnnotationType.label: label_categories, + }) + + with TestDir() as test_dir: + self._test_save_and_load(source_dataset, + partial(CvatConverter.convert, save_images=True), test_dir, + target_dataset=target_dataset) + + def test_relative_paths(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id='1', image=np.ones((4, 2, 3))), + DatasetItem(id='subdir1/1', image=np.ones((2, 6, 3))), + DatasetItem(id='subdir2/1', image=np.ones((5, 4, 3))), + ], categories={ AnnotationType.label: LabelCategories() }) + + target_dataset = Dataset.from_iterable([ + DatasetItem(id='1', image=np.ones((4, 2, 3)), + attributes={'frame': 0}), + DatasetItem(id='subdir1/1', image=np.ones((2, 6, 3)), + attributes={'frame': 1}), + DatasetItem(id='subdir2/1', image=np.ones((5, 4, 3)), + attributes={'frame': 2}), + ], categories={ + AnnotationType.label: LabelCategories() + }) + + with TestDir() as test_dir: + self._test_save_and_load(source_dataset, + partial(CvatConverter.convert, save_images=True), test_dir, + target_dataset=target_dataset) + + def test_preserve_frame_ids(self): + expected_dataset = Dataset.from_iterable([ + DatasetItem(id='some/name1', image=np.ones((4, 2, 3)), + attributes={'frame': 40}), + ], categories={ + AnnotationType.label: LabelCategories() + }) + + with TestDir() as test_dir: + self._test_save_and_load(expected_dataset, + CvatConverter.convert, test_dir) + + def test_reindex(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id='some/name1', image=np.ones((4, 2, 3)), + attributes={'frame': 40}), + ], categories={ AnnotationType.label: LabelCategories() }) + + expected_dataset = Dataset.from_iterable([ + DatasetItem(id='some/name1', image=np.ones((4, 2, 3)), + attributes={'frame': 0}), + ], categories={ AnnotationType.label: LabelCategories() }) + + with TestDir() as test_dir: + self._test_save_and_load(source_dataset, + partial(CvatConverter.convert, reindex=True), test_dir, + target_dataset=expected_dataset) diff --git a/testbed/openvinotoolkit__datumaro/tests/test_datumaro_format.py b/testbed/openvinotoolkit__datumaro/tests/test_datumaro_format.py new file mode 100644 index 0000000000000000000000000000000000000000..cc3bb5d3ec52741bd18c8a8be9129f79c93c80e3 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/test_datumaro_format.py @@ -0,0 +1,100 @@ +from functools import partial +import numpy as np + +from unittest import TestCase +from datumaro.components.project import Dataset +from datumaro.components.extractor import (DatasetItem, + AnnotationType, Label, Mask, Points, Polygon, + PolyLine, Bbox, Caption, + LabelCategories, MaskCategories, PointsCategories +) +from datumaro.plugins.datumaro_format.extractor import DatumaroImporter +from datumaro.plugins.datumaro_format.converter import DatumaroConverter +from datumaro.util.mask_tools import generate_colormap +from datumaro.util.image import Image +from datumaro.util.test_utils import (TestDir, compare_datasets_strict, + test_save_and_load) + + +class DatumaroConverterTest(TestCase): + def _test_save_and_load(self, source_dataset, converter, test_dir, + target_dataset=None, importer_args=None): + return test_save_and_load(self, source_dataset, converter, test_dir, + importer='datumaro', + target_dataset=target_dataset, importer_args=importer_args, + compare=compare_datasets_strict) + + @property + def test_dataset(self): + label_categories = LabelCategories() + for i in range(5): + label_categories.add('cat' + str(i)) + + mask_categories = MaskCategories( + generate_colormap(len(label_categories.items))) + + points_categories = PointsCategories() + for index, _ in enumerate(label_categories.items): + points_categories.add(index, ['cat1', 'cat2'], joints=[[0, 1]]) + + return Dataset.from_iterable([ + DatasetItem(id=100, subset='train', image=np.ones((10, 6, 3)), + annotations=[ + Caption('hello', id=1), + Caption('world', id=2, group=5), + Label(2, id=3, attributes={ + 'x': 1, + 'y': '2', + }), + Bbox(1, 2, 3, 4, label=4, id=4, z_order=1, attributes={ + 'score': 1.0, + }), + Bbox(5, 6, 7, 8, id=5, group=5), + Points([1, 2, 2, 0, 1, 1], label=0, id=5, z_order=4), + Mask(label=3, id=5, z_order=2, image=np.ones((2, 3))), + ]), + DatasetItem(id=21, subset='train', + annotations=[ + Caption('test'), + Label(2), + Bbox(1, 2, 3, 4, label=5, id=42, group=42) + ]), + + DatasetItem(id=2, subset='val', + annotations=[ + PolyLine([1, 2, 3, 4, 5, 6, 7, 8], id=11, z_order=1), + Polygon([1, 2, 3, 4, 5, 6, 7, 8], id=12, z_order=4), + ]), + + DatasetItem(id=42, subset='test', + attributes={'a1': 5, 'a2': '42'}), + + DatasetItem(id=42), + DatasetItem(id=43, image=Image(path='1/b/c.qq', size=(2, 4))), + ], categories={ + AnnotationType.label: label_categories, + AnnotationType.mask: mask_categories, + AnnotationType.points: points_categories, + }) + + def test_can_save_and_load(self): + with TestDir() as test_dir: + self._test_save_and_load(self.test_dataset, + partial(DatumaroConverter.convert, save_images=True), test_dir) + + def test_can_detect(self): + with TestDir() as test_dir: + DatumaroConverter.convert(self.test_dataset, save_dir=test_dir) + + self.assertTrue(DatumaroImporter.detect(test_dir)) + + def test_relative_paths(self): + test_dataset = Dataset.from_iterable([ + DatasetItem(id='1', image=np.ones((4, 2, 3))), + DatasetItem(id='subdir1/1', image=np.ones((2, 6, 3))), + DatasetItem(id='subdir2/1', image=np.ones((5, 4, 3))), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(test_dataset, + partial(DatumaroConverter.convert, save_images=True), test_dir) diff --git a/testbed/openvinotoolkit__datumaro/tests/test_diff.py b/testbed/openvinotoolkit__datumaro/tests/test_diff.py new file mode 100644 index 0000000000000000000000000000000000000000..bb25991ed27074f4ea0b15d9138bb71c515c6259 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/test_diff.py @@ -0,0 +1,251 @@ +import numpy as np + +from datumaro.components.extractor import (DatasetItem, Label, Bbox, + Caption, Mask, Points, DEFAULT_SUBSET_NAME) +from datumaro.components.project import Dataset +from datumaro.components.operations import DistanceComparator, ExactComparator + +from unittest import TestCase + + +class DistanceComparatorTest(TestCase): + def test_no_bbox_diff_with_same_item(self): + detections = 3 + anns = [ + Bbox(i * 10, 10, 10, 10, label=i) + for i in range(detections) + ] + item = DatasetItem(id=0, annotations=anns) + + iou_thresh = 0.5 + comp = DistanceComparator(iou_threshold=iou_thresh) + + result = comp.match_boxes(item, item) + + matches, mispred, a_greater, b_greater = result + self.assertEqual(0, len(mispred)) + self.assertEqual(0, len(a_greater)) + self.assertEqual(0, len(b_greater)) + self.assertEqual(len(item.annotations), len(matches)) + for a_bbox, b_bbox in matches: + self.assertLess(iou_thresh, a_bbox.iou(b_bbox)) + self.assertEqual(a_bbox.label, b_bbox.label) + + def test_can_find_bbox_with_wrong_label(self): + detections = 3 + class_count = 2 + item1 = DatasetItem(id=1, annotations=[ + Bbox(i * 10, 10, 10, 10, label=i) + for i in range(detections) + ]) + item2 = DatasetItem(id=2, annotations=[ + Bbox(i * 10, 10, 10, 10, label=(i + 1) % class_count) + for i in range(detections) + ]) + + iou_thresh = 0.5 + comp = DistanceComparator(iou_threshold=iou_thresh) + + result = comp.match_boxes(item1, item2) + + matches, mispred, a_greater, b_greater = result + self.assertEqual(len(item1.annotations), len(mispred)) + self.assertEqual(0, len(a_greater)) + self.assertEqual(0, len(b_greater)) + self.assertEqual(0, len(matches)) + for a_bbox, b_bbox in mispred: + self.assertLess(iou_thresh, a_bbox.iou(b_bbox)) + self.assertEqual((a_bbox.label + 1) % class_count, b_bbox.label) + + def test_can_find_missing_boxes(self): + detections = 3 + class_count = 2 + item1 = DatasetItem(id=1, annotations=[ + Bbox(i * 10, 10, 10, 10, label=i) + for i in range(detections) if i % 2 == 0 + ]) + item2 = DatasetItem(id=2, annotations=[ + Bbox(i * 10, 10, 10, 10, label=(i + 1) % class_count) + for i in range(detections) if i % 2 == 1 + ]) + + iou_thresh = 0.5 + comp = DistanceComparator(iou_threshold=iou_thresh) + + result = comp.match_boxes(item1, item2) + + matches, mispred, a_greater, b_greater = result + self.assertEqual(0, len(mispred)) + self.assertEqual(len(item1.annotations), len(a_greater)) + self.assertEqual(len(item2.annotations), len(b_greater)) + self.assertEqual(0, len(matches)) + + def test_no_label_diff_with_same_item(self): + detections = 3 + anns = [ Label(i) for i in range(detections) ] + item = DatasetItem(id=1, annotations=anns) + + result = DistanceComparator().match_labels(item, item) + + matches, a_greater, b_greater = result + self.assertEqual(0, len(a_greater)) + self.assertEqual(0, len(b_greater)) + self.assertEqual(len(item.annotations), len(matches)) + + def test_can_find_wrong_label(self): + item1 = DatasetItem(id=1, annotations=[ + Label(0), + Label(1), + Label(2), + ]) + item2 = DatasetItem(id=2, annotations=[ + Label(2), + Label(3), + Label(4), + ]) + + result = DistanceComparator().match_labels(item1, item2) + + matches, a_greater, b_greater = result + self.assertEqual(2, len(a_greater)) + self.assertEqual(2, len(b_greater)) + self.assertEqual(1, len(matches)) + + def test_can_match_points(self): + item1 = DatasetItem(id=1, annotations=[ + Points([1, 2, 2, 0, 1, 1], label=0), + + Points([3, 5, 5, 7, 5, 3], label=0), + ]) + item2 = DatasetItem(id=2, annotations=[ + Points([1.5, 2, 2, 0.5, 1, 1.5], label=0), + + Points([5, 7, 7, 7, 7, 5], label=0), + ]) + + result = DistanceComparator().match_points(item1, item2) + + matches, mismatches, a_greater, b_greater = result + self.assertEqual(1, len(a_greater)) + self.assertEqual(1, len(b_greater)) + self.assertEqual(1, len(matches)) + self.assertEqual(0, len(mismatches)) + +class ExactComparatorTest(TestCase): + def test_class_comparison(self): + a = Dataset.from_iterable([], categories=['a', 'b', 'c']) + b = Dataset.from_iterable([], categories=['b', 'c']) + + comp = ExactComparator() + _, _, _, _, errors = comp.compare_datasets(a, b) + + self.assertEqual(1, len(errors), errors) + + def test_item_comparison(self): + a = Dataset.from_iterable([ + DatasetItem(id=1, subset='train'), + DatasetItem(id=2, subset='test', attributes={'x': 1}), + ], categories=['a', 'b', 'c']) + + b = Dataset.from_iterable([ + DatasetItem(id=2, subset='test'), + DatasetItem(id=3), + ], categories=['a', 'b', 'c']) + + comp = ExactComparator() + _, _, a_extra_items, b_extra_items, errors = comp.compare_datasets(a, b) + + self.assertEqual({('1', 'train')}, a_extra_items) + self.assertEqual({('3', DEFAULT_SUBSET_NAME)}, b_extra_items) + self.assertEqual(1, len(errors), errors) + + def test_annotation_comparison(self): + a = Dataset.from_iterable([ + DatasetItem(id=1, annotations=[ + Caption('hello'), # unmatched + Caption('world', group=5), + Label(2, attributes={ 'x': 1, 'y': '2', }), + Bbox(1, 2, 3, 4, label=4, z_order=1, attributes={ + 'score': 1.0, + }), + Bbox(5, 6, 7, 8, group=5), + Points([1, 2, 2, 0, 1, 1], label=0, z_order=4), + Mask(label=3, z_order=2, image=np.ones((2, 3))), + ]), + ], categories=['a', 'b', 'c', 'd']) + + b = Dataset.from_iterable([ + DatasetItem(id=1, annotations=[ + Caption('world', group=5), + Label(2, attributes={ 'x': 1, 'y': '2', }), + Bbox(1, 2, 3, 4, label=4, z_order=1, attributes={ + 'score': 1.0, + }), + Bbox(5, 6, 7, 8, group=5), + Bbox(5, 6, 7, 8, group=5), # unmatched + Points([1, 2, 2, 0, 1, 1], label=0, z_order=4), + Mask(label=3, z_order=2, image=np.ones((2, 3))), + ]), + ], categories=['a', 'b', 'c', 'd']) + + comp = ExactComparator() + matched, unmatched, _, _, errors = comp.compare_datasets(a, b) + + self.assertEqual(6, len(matched), matched) + self.assertEqual(2, len(unmatched), unmatched) + self.assertEqual(0, len(errors), errors) + + def test_image_comparison(self): + a = Dataset.from_iterable([ + DatasetItem(id=11, image=np.ones((5, 4, 3)), annotations=[ + Bbox(5, 6, 7, 8), + ]), + DatasetItem(id=12, image=np.ones((5, 4, 3)), annotations=[ + Bbox(1, 2, 3, 4), + Bbox(5, 6, 7, 8), + ]), + DatasetItem(id=13, image=np.ones((5, 4, 3)), annotations=[ + Bbox(9, 10, 11, 12), # mismatch + ]), + + DatasetItem(id=14, image=np.zeros((5, 4, 3)), annotations=[ + Bbox(1, 2, 3, 4), + Bbox(5, 6, 7, 8), + ], attributes={ 'a': 1 }), + + DatasetItem(id=15, image=np.zeros((5, 5, 3)), annotations=[ + Bbox(1, 2, 3, 4), + Bbox(5, 6, 7, 8), + ]), + ], categories=['a', 'b', 'c', 'd']) + + b = Dataset.from_iterable([ + DatasetItem(id=21, image=np.ones((5, 4, 3)), annotations=[ + Bbox(5, 6, 7, 8), + ]), + DatasetItem(id=22, image=np.ones((5, 4, 3)), annotations=[ + Bbox(1, 2, 3, 4), + Bbox(5, 6, 7, 8), + ]), + DatasetItem(id=23, image=np.ones((5, 4, 3)), annotations=[ + Bbox(10, 10, 11, 12), # mismatch + ]), + + DatasetItem(id=24, image=np.zeros((5, 4, 3)), annotations=[ + Bbox(6, 6, 7, 8), # 1 ann missing, mismatch + ], attributes={ 'a': 2 }), + + DatasetItem(id=25, image=np.zeros((4, 4, 3)), annotations=[ + Bbox(6, 6, 7, 8), + ]), + ], categories=['a', 'b', 'c', 'd']) + + comp = ExactComparator(match_images=True) + matched_ann, unmatched_ann, a_unmatched, b_unmatched, errors = \ + comp.compare_datasets(a, b) + + self.assertEqual(3, len(matched_ann), matched_ann) + self.assertEqual(5, len(unmatched_ann), unmatched_ann) + self.assertEqual(1, len(a_unmatched), a_unmatched) + self.assertEqual(1, len(b_unmatched), b_unmatched) + self.assertEqual(1, len(errors), errors) \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/tests/test_image_dir_format.py b/testbed/openvinotoolkit__datumaro/tests/test_image_dir_format.py new file mode 100644 index 0000000000000000000000000000000000000000..f7f21b08888433a0d3be456b07f72542426fce46 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/test_image_dir_format.py @@ -0,0 +1,31 @@ +import numpy as np + +from unittest import TestCase + +from datumaro.components.project import Dataset +from datumaro.components.extractor import DatasetItem +from datumaro.plugins.image_dir import ImageDirConverter +from datumaro.util.test_utils import TestDir, test_save_and_load + + +class ImageDirFormatTest(TestCase): + def test_can_load(self): + dataset = Dataset.from_iterable([ + DatasetItem(id=1, image=np.ones((10, 6, 3))), + DatasetItem(id=2, image=np.ones((5, 4, 3))), + ]) + + with TestDir() as test_dir: + test_save_and_load(self, dataset, ImageDirConverter.convert, + test_dir, importer='image_dir') + + def test_relative_paths(self): + dataset = Dataset.from_iterable([ + DatasetItem(id='1', image=np.ones((4, 2, 3))), + DatasetItem(id='subdir1/1', image=np.ones((2, 6, 3))), + DatasetItem(id='subdir2/1', image=np.ones((5, 4, 3))), + ]) + + with TestDir() as test_dir: + test_save_and_load(self, dataset, ImageDirConverter.convert, + test_dir, importer='image_dir') \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/tests/test_imagenet_format.py b/testbed/openvinotoolkit__datumaro/tests/test_imagenet_format.py new file mode 100644 index 0000000000000000000000000000000000000000..b6bd4e0c9766b5f59804202ce6ea77f571bf4700 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/test_imagenet_format.py @@ -0,0 +1,111 @@ +from unittest import TestCase + +import numpy as np +import os.path as osp + +from datumaro.components.project import Project, Dataset +from datumaro.components.extractor import (DatasetItem, Label, + LabelCategories, AnnotationType +) +from datumaro.plugins.imagenet_format import ImagenetConverter +from datumaro.plugins.imagenet_format import ImagenetImporter +from datumaro.util.test_utils import TestDir, compare_datasets + +class ImagenetFormatTest(TestCase): + def test_can_save_and_load(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id='1', + image=np.ones((8, 8, 3)), + annotations=[Label(0)] + ), + DatasetItem(id='2', + image=np.ones((10, 10, 3)), + annotations=[Label(1)] + ), + DatasetItem(id='3', + image=np.ones((10, 10, 3)), + annotations=[Label(0)] + ), + DatasetItem(id='4', + image=np.ones((8, 8, 3)), + annotations=[Label(2)] + ), + ], categories={ + AnnotationType.label: LabelCategories.from_iterable( + 'label_' + str(label) for label in range(3)), + }) + + with TestDir() as test_dir: + ImagenetConverter.convert(source_dataset, test_dir, save_images=True) + + parsed_dataset = ImagenetImporter()(test_dir).make_dataset() + + compare_datasets(self, source_dataset, parsed_dataset, + require_images=True) + + def test_can_save_and_load_with_multiple_labels(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id='1', + image=np.ones((8, 8, 3)), + annotations=[Label(0), Label(1)] + ), + DatasetItem(id='2', + image=np.ones((10, 10, 3)), + annotations=[Label(0), Label(1)] + ), + DatasetItem(id='3', + image=np.ones((10, 10, 3)), + annotations=[Label(0), Label(2)] + ), + DatasetItem(id='4', + image=np.ones((8, 8, 3)), + annotations=[Label(2), Label(4)] + ), + DatasetItem(id='5', + image=np.ones((10, 10, 3)), + annotations=[Label(3), Label(4)] + ), + DatasetItem(id='6', + image=np.ones((10, 10, 3)), + ), + DatasetItem(id='7', + image=np.ones((8, 8, 3)) + ), + ], categories={ + AnnotationType.label: LabelCategories.from_iterable( + 'label_' + str(label) for label in range(5)), + }) + + with TestDir() as test_dir: + ImagenetConverter.convert(source_dataset, test_dir, save_images=True) + + parsed_dataset = ImagenetImporter()(test_dir).make_dataset() + + compare_datasets(self, source_dataset, parsed_dataset, + require_images=True) + + +DUMMY_DATASET_DIR = osp.join(osp.dirname(__file__), 'assets', 'imagenet_dataset') + +class ImagenetImporterTest(TestCase): + def test_can_import(self): + expected_dataset = Dataset.from_iterable([ + DatasetItem(id='1', + image=np.ones((8, 8, 3)), + annotations=[Label(0), Label(1)] + ), + DatasetItem(id='2', + image=np.ones((10, 10, 3)), + annotations=[Label(0)] + ), + ], categories={ + AnnotationType.label: LabelCategories.from_iterable( + 'label_' + str(label) for label in range(2)), + }) + + dataset = Project.import_from(DUMMY_DATASET_DIR, 'imagenet').make_dataset() + + compare_datasets(self, expected_dataset, dataset, require_images=True) + + def test_can_detect_imagenet(self): + self.assertTrue(ImagenetImporter.detect(DUMMY_DATASET_DIR)) diff --git a/testbed/openvinotoolkit__datumaro/tests/test_imagenet_txt_format.py b/testbed/openvinotoolkit__datumaro/tests/test_imagenet_txt_format.py new file mode 100644 index 0000000000000000000000000000000000000000..251c71fcc91ff32c70bc315c1724c80a86b95fa0 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/test_imagenet_txt_format.py @@ -0,0 +1,120 @@ +from unittest import TestCase + +import numpy as np +import os.path as osp + +from datumaro.components.project import Project, Dataset +from datumaro.components.extractor import (DatasetItem, Label, + LabelCategories, AnnotationType +) +from datumaro.plugins.imagenet_txt_format import ImagenetTxtConverter, ImagenetTxtImporter +from datumaro.util.test_utils import TestDir, compare_datasets + + +class ImagenetTxtFormatTest(TestCase): + def test_can_save_and_load(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id='1', subset='train', + annotations=[Label(0)] + ), + DatasetItem(id='2', subset='train', + annotations=[Label(0)] + ), + DatasetItem(id='3', subset='train', image=np.zeros((8, 8, 3)), + annotations=[Label(0)] + ), + DatasetItem(id='4', subset='train', + annotations=[Label(1)] + ), + DatasetItem(id='5', subset='train', image=np.zeros((4, 8, 3)), + annotations=[Label(1)] + ), + DatasetItem(id='6', subset='train', + annotations=[Label(5)] + ), + ], categories={ + AnnotationType.label: LabelCategories.from_iterable( + 'label_' + str(label) for label in range(10)), + }) + + with TestDir() as test_dir: + ImagenetTxtConverter.convert(source_dataset, test_dir, + save_images=True) + + parsed_dataset = ImagenetTxtImporter()(test_dir).make_dataset() + + compare_datasets(self, source_dataset, parsed_dataset, + require_images=True) + + def test_can_save_and_load_with_multiple_labels(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id='1', subset='train', + annotations=[Label(1), Label(3)] + ), + DatasetItem(id='2', subset='train', image=np.zeros((8, 6, 3)), + annotations=[Label(0)] + ), + DatasetItem(id='3', subset='train', image=np.zeros((2, 8, 3)), + ), + ], categories={ + AnnotationType.label: LabelCategories.from_iterable( + 'label_' + str(label) for label in range(10)), + }) + + with TestDir() as test_dir: + ImagenetTxtConverter.convert(source_dataset, test_dir, + save_images=True) + + parsed_dataset = ImagenetTxtImporter()(test_dir).make_dataset() + + compare_datasets(self, source_dataset, parsed_dataset, + require_images=True) + + def test_can_save_dataset_with_no_subsets(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id='a/b/c', image=np.zeros((8, 4, 3)), + annotations=[Label(1)] + ), + ], categories={ + AnnotationType.label: LabelCategories.from_iterable( + 'label_' + str(label) for label in range(10)), + }) + + with TestDir() as test_dir: + ImagenetTxtConverter.convert(source_dataset, test_dir, + save_images=True) + + parsed_dataset = ImagenetTxtImporter()(test_dir).make_dataset() + + compare_datasets(self, source_dataset, parsed_dataset, + require_images=True) + +DUMMY_DATASET_DIR = osp.join(osp.dirname(__file__), 'assets', 'imagenet_txt_dataset') + +class ImagenetTxtImporterTest(TestCase): + def test_can_import(self): + expected_dataset = Dataset.from_iterable([ + DatasetItem(id='1', subset='train', image=np.zeros((8, 6, 3)), + annotations=[Label(0)] + ), + DatasetItem(id='2', subset='train', image=np.zeros((2, 8, 3)), + annotations=[Label(5)] + ), + DatasetItem(id='3', subset='train', + annotations=[Label(3)] + ), + DatasetItem(id='4', subset='train', + annotations=[Label(5)] + ), + ], categories={ + AnnotationType.label: LabelCategories.from_iterable( + 'label_%s' % label for label in range(10)), + }) + + dataset = Project.import_from(DUMMY_DATASET_DIR, 'imagenet_txt') \ + .make_dataset() + + compare_datasets(self, expected_dataset, dataset, require_images=True) + + def test_can_detect_imagenet(self): + self.assertTrue(ImagenetTxtImporter.detect(DUMMY_DATASET_DIR)) diff --git a/testbed/openvinotoolkit__datumaro/tests/test_images.py b/testbed/openvinotoolkit__datumaro/tests/test_images.py new file mode 100644 index 0000000000000000000000000000000000000000..a003b8d426e28642641c6a9f584b35b60482a60c --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/test_images.py @@ -0,0 +1,122 @@ +import numpy as np +import os.path as osp + +from unittest import TestCase + +from datumaro.util.test_utils import TestDir +from datumaro.util.image import (lazy_image, load_image, save_image, \ + Image, ByteImage, encode_image) +from datumaro.util.image_cache import ImageCache + + +class LazyImageTest(TestCase): + def test_cache_works(self): + with TestDir() as test_dir: + image = np.ones((100, 100, 3), dtype=np.uint8) + image_path = osp.join(test_dir, 'image.jpg') + save_image(image_path, image) + + caching_loader = lazy_image(image_path, cache=None) + self.assertTrue(caching_loader() is caching_loader()) + + non_caching_loader = lazy_image(image_path, cache=False) + self.assertFalse(non_caching_loader() is non_caching_loader()) + +class ImageCacheTest(TestCase): + def test_cache_fifo_displacement(self): + capacity = 2 + cache = ImageCache(capacity) + + loaders = [lazy_image(None, loader=lambda p: object(), cache=cache) + for _ in range(capacity + 1)] + + first_request = [loader() for loader in loaders[1 : ]] + loaders[0]() # pop something from the cache + + second_request = [loader() for loader in loaders[2 : ]] + second_request.insert(0, loaders[1]()) + + matches = sum([a is b for a, b in zip(first_request, second_request)]) + self.assertEqual(matches, len(first_request) - 1) + + def test_global_cache_is_accessible(self): + loader = lazy_image(None, loader=lambda p: object()) + + ImageCache.get_instance().clear() + self.assertTrue(loader() is loader()) + self.assertEqual(ImageCache.get_instance().size(), 1) + +class ImageTest(TestCase): + def test_lazy_image_shape(self): + data = np.ones((5, 6, 3)) + + image_lazy = Image(data=data, size=(2, 4)) + image_eager = Image(data=data) + + self.assertEqual((2, 4), image_lazy.size) + self.assertEqual((5, 6), image_eager.size) + + def test_ctors(self): + with TestDir() as test_dir: + path = osp.join(test_dir, 'path.png') + image = np.ones([2, 4, 3]) + save_image(path, image) + + for args in [ + { 'data': image }, + { 'data': image, 'path': path }, + { 'data': image, 'path': path, 'size': (2, 4) }, + { 'data': image, 'path': path, 'loader': load_image, 'size': (2, 4) }, + { 'path': path }, + { 'path': path, 'loader': load_image }, + { 'path': 'somepath', 'loader': lambda p: image }, + { 'loader': lambda p: image }, + { 'path': path, 'size': (2, 4) }, + ]: + with self.subTest(**args): + img = Image(**args) + # pylint: disable=pointless-statement + self.assertTrue(img.has_data) + self.assertEqual(img, image) + self.assertEqual(img.size, tuple(image.shape[:2])) + # pylint: enable=pointless-statement + +class BytesImageTest(TestCase): + def test_lazy_image_shape(self): + data = encode_image(np.ones((5, 6, 3)), 'png') + + image_lazy = ByteImage(data=data, size=(2, 4)) + image_eager = ByteImage(data=data) + + self.assertEqual((2, 4), image_lazy.size) + self.assertEqual((5, 6), image_eager.size) + + def test_ctors(self): + with TestDir() as test_dir: + path = osp.join(test_dir, 'path.png') + image = np.ones([2, 4, 3]) + image_bytes = encode_image(image, 'png') + + for args in [ + { 'data': image_bytes }, + { 'data': lambda _: image_bytes }, + { 'data': lambda _: image_bytes, 'ext': '.jpg' }, + { 'data': image_bytes, 'path': path }, + { 'data': image_bytes, 'path': path, 'size': (2, 4) }, + { 'data': image_bytes, 'path': path, 'size': (2, 4) }, + { 'path': path }, + { 'path': path, 'size': (2, 4) }, + ]: + with self.subTest(**args): + img = ByteImage(**args) + # pylint: disable=pointless-statement + self.assertEqual('data' in args, img.has_data) + if img.has_data: + self.assertEqual(img, image) + self.assertEqual(img.get_bytes(), image_bytes) + img.size + if 'size' in args: + self.assertEqual(img.size, args['size']) + if 'ext' in args or 'path' in args: + self.assertEqual(img.ext, args.get('ext', '.png')) + # pylint: enable=pointless-statement diff --git a/testbed/openvinotoolkit__datumaro/tests/test_labelme_format.py b/testbed/openvinotoolkit__datumaro/tests/test_labelme_format.py new file mode 100644 index 0000000000000000000000000000000000000000..f51922224f92577153982857b2c6c3d2d272742b --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/test_labelme_format.py @@ -0,0 +1,186 @@ +from functools import partial +import numpy as np +import os.path as osp + +from unittest import TestCase +from datumaro.components.project import Dataset +from datumaro.components.extractor import (DatasetItem, + AnnotationType, Bbox, Mask, Polygon, LabelCategories +) +from datumaro.components.project import Project +from datumaro.plugins.labelme_format import LabelMeImporter, LabelMeConverter +from datumaro.util.test_utils import (TestDir, compare_datasets, + test_save_and_load) + + +class LabelMeConverterTest(TestCase): + def _test_save_and_load(self, source_dataset, converter, test_dir, + target_dataset=None, importer_args=None): + return test_save_and_load(self, source_dataset, converter, test_dir, + importer='label_me', + target_dataset=target_dataset, importer_args=importer_args) + + def test_can_save_and_load(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id='dir1/1', subset='train', + image=np.ones((16, 16, 3)), + annotations=[ + Bbox(0, 4, 4, 8, label=2, group=2), + Polygon([0, 4, 4, 4, 5, 6], label=3, attributes={ + 'occluded': True, + 'a1': 'qwe', + 'a2': True, + 'a3': 123, + }), + Mask(np.array([[0, 1], [1, 0], [1, 1]]), group=2, + attributes={ 'username': 'test' }), + Bbox(1, 2, 3, 4, group=3), + Mask(np.array([[0, 0], [0, 0], [1, 1]]), group=3, + attributes={ 'occluded': True } + ), + ] + ), + ], categories={ + AnnotationType.label: LabelCategories.from_iterable( + 'label_' + str(label) for label in range(10)), + }) + + target_dataset = Dataset.from_iterable([ + DatasetItem(id='dir1/1', subset='train', + image=np.ones((16, 16, 3)), + annotations=[ + Bbox(0, 4, 4, 8, label=0, group=2, id=0, + attributes={ + 'occluded': False, 'username': '', + } + ), + Polygon([0, 4, 4, 4, 5, 6], label=1, id=1, + attributes={ + 'occluded': True, 'username': '', + 'a1': 'qwe', + 'a2': True, + 'a3': 123, + } + ), + Mask(np.array([[0, 1], [1, 0], [1, 1]]), group=2, + id=2, attributes={ + 'occluded': False, 'username': 'test' + } + ), + Bbox(1, 2, 3, 4, group=1, id=3, attributes={ + 'occluded': False, 'username': '', + }), + Mask(np.array([[0, 0], [0, 0], [1, 1]]), group=1, + id=4, attributes={ + 'occluded': True, 'username': '' + } + ), + ] + ), + ], categories={ + AnnotationType.label: LabelCategories.from_iterable([ + 'label_2', 'label_3']), + }) + + with TestDir() as test_dir: + self._test_save_and_load( + source_dataset, + partial(LabelMeConverter.convert, save_images=True), + test_dir, target_dataset=target_dataset) + + +DUMMY_DATASET_DIR = osp.join(osp.dirname(__file__), 'assets', 'labelme_dataset') + +class LabelMeImporterTest(TestCase): + def test_can_detect(self): + self.assertTrue(LabelMeImporter.detect(DUMMY_DATASET_DIR)) + + def test_can_import(self): + img1 = np.ones((77, 102, 3)) * 255 + img1[6:32, 7:41] = 0 + + mask1 = np.zeros((77, 102), dtype=int) + mask1[67:69, 58:63] = 1 + + mask2 = np.zeros((77, 102), dtype=int) + mask2[13:25, 54:71] = [ + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], + [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + ] + + target_dataset = Dataset.from_iterable([ + DatasetItem(id='example_folder/img1', image=img1, + annotations=[ + Polygon([43, 34, 45, 34, 45, 37, 43, 37], + label=0, id=0, + attributes={ + 'occluded': False, + 'username': 'admin' + } + ), + Mask(mask1, label=1, id=1, + attributes={ + 'occluded': False, + 'username': 'brussell' + } + ), + Polygon([30, 12, 42, 21, 24, 26, 15, 22, 18, 14, 22, 12, 27, 12], + label=2, group=2, id=2, + attributes={ + 'a1': True, + 'occluded': True, + 'username': 'anonymous' + } + ), + Polygon([35, 21, 43, 22, 40, 28, 28, 31, 31, 22, 32, 25], + label=3, group=2, id=3, + attributes={ + 'kj': True, + 'occluded': False, + 'username': 'anonymous' + } + ), + Bbox(13, 19, 10, 11, label=4, group=2, id=4, + attributes={ + 'hg': True, + 'occluded': True, + 'username': 'anonymous' + } + ), + Mask(mask2, label=5, group=1, id=5, + attributes={ + 'd': True, + 'occluded': False, + 'username': 'anonymous' + } + ), + Polygon([64, 21, 74, 24, 72, 32, 62, 34, 60, 27, 62, 22], + label=6, group=1, id=6, + attributes={ + 'gfd lkj lkj hi': True, + 'occluded': False, + 'username': 'anonymous' + } + ), + ] + ), + ], categories={ + AnnotationType.label: LabelCategories.from_iterable([ + 'window', 'license plate', 'o1', + 'q1', 'b1', 'm1', 'hg', + ]), + }) + + parsed = Project.import_from(DUMMY_DATASET_DIR, 'label_me') \ + .make_dataset() + compare_datasets(self, expected=target_dataset, actual=parsed) \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/tests/test_mot_format.py b/testbed/openvinotoolkit__datumaro/tests/test_mot_format.py new file mode 100644 index 0000000000000000000000000000000000000000..fd647426bbfe9a1a96ca11d33b581124a952d897 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/test_mot_format.py @@ -0,0 +1,129 @@ +from functools import partial +import numpy as np +import os.path as osp + +from unittest import TestCase +from datumaro.components.project import Dataset +from datumaro.components.extractor import (DatasetItem, + AnnotationType, Bbox, LabelCategories +) +from datumaro.components.project import Project +from datumaro.plugins.mot_format import MotSeqGtConverter, MotSeqImporter +from datumaro.util.test_utils import (TestDir, compare_datasets, + test_save_and_load) + + +class MotConverterTest(TestCase): + def _test_save_and_load(self, source_dataset, converter, test_dir, + target_dataset=None, importer_args=None): + return test_save_and_load(self, source_dataset, converter, test_dir, + importer='mot_seq', + target_dataset=target_dataset, importer_args=importer_args) + + def test_can_save_bboxes(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id=1, subset='train', + image=np.ones((16, 16, 3)), + annotations=[ + Bbox(0, 4, 4, 8, label=2, attributes={ + 'occluded': True, + }), + Bbox(0, 4, 4, 4, label=3, attributes={ + 'visibility': 0.4, + }), + Bbox(2, 4, 4, 4, attributes={ + 'ignored': True + }), + ] + ), + + DatasetItem(id=2, subset='val', + image=np.ones((8, 8, 3)), + annotations=[ + Bbox(1, 2, 4, 2, label=3), + ] + ), + + DatasetItem(id=3, subset='test', + image=np.ones((5, 4, 3)) * 3, + ), + ], categories={ + AnnotationType.label: LabelCategories.from_iterable( + 'label_' + str(label) for label in range(10)), + }) + + target_dataset = Dataset.from_iterable([ + DatasetItem(id=1, + image=np.ones((16, 16, 3)), + annotations=[ + Bbox(0, 4, 4, 8, label=2, attributes={ + 'occluded': True, + 'visibility': 0.0, + 'ignored': False, + }), + Bbox(0, 4, 4, 4, label=3, attributes={ + 'occluded': False, + 'visibility': 0.4, + 'ignored': False, + }), + Bbox(2, 4, 4, 4, attributes={ + 'occluded': False, + 'visibility': 1.0, + 'ignored': True, + }), + ] + ), + + DatasetItem(id=2, + image=np.ones((8, 8, 3)), + annotations=[ + Bbox(1, 2, 4, 2, label=3, attributes={ + 'occluded': False, + 'visibility': 1.0, + 'ignored': False, + }), + ] + ), + + DatasetItem(id=3, + image=np.ones((5, 4, 3)) * 3, + ), + ], categories={ + AnnotationType.label: LabelCategories.from_iterable( + 'label_' + str(label) for label in range(10)), + }) + + with TestDir() as test_dir: + self._test_save_and_load( + source_dataset, + partial(MotSeqGtConverter.convert, save_images=True), + test_dir, target_dataset=target_dataset) + + +DUMMY_DATASET_DIR = osp.join(osp.dirname(__file__), 'assets', 'mot_dataset') + +class MotImporterTest(TestCase): + def test_can_detect(self): + self.assertTrue(MotSeqImporter.detect(DUMMY_DATASET_DIR)) + + def test_can_import(self): + expected_dataset = Dataset.from_iterable([ + DatasetItem(id=1, + image=np.ones((16, 16, 3)), + annotations=[ + Bbox(0, 4, 4, 8, label=2, attributes={ + 'occluded': False, + 'visibility': 1.0, + 'ignored': False, + }), + ] + ), + ], categories={ + AnnotationType.label: LabelCategories.from_iterable( + 'label_' + str(label) for label in range(10)), + }) + + dataset = Project.import_from(DUMMY_DATASET_DIR, 'mot_seq') \ + .make_dataset() + + compare_datasets(self, expected_dataset, dataset) \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/tests/test_ops.py b/testbed/openvinotoolkit__datumaro/tests/test_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..c0c206067ce7a5941838a64348ecdf23c4b8cb58 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/test_ops.py @@ -0,0 +1,498 @@ +from unittest import TestCase + +import numpy as np + +from datumaro.components.extractor import (Bbox, Caption, DatasetItem, + Extractor, Label, Mask, Points, Polygon, PolyLine, DEFAULT_SUBSET_NAME, + LabelCategories, PointsCategories, MaskCategories, AnnotationType) +from datumaro.components.operations import (FailedAttrVotingError, + IntersectMerge, NoMatchingAnnError, NoMatchingItemError, WrongGroupError, + compute_ann_statistics, mean_std) +from datumaro.components.project import Dataset +from datumaro.util.test_utils import compare_datasets + + +class TestOperations(TestCase): + def test_mean_std(self): + expected_mean = [100, 50, 150] + expected_std = [20, 50, 10] + + dataset = Dataset.from_iterable([ + DatasetItem(id=1, image=np.random.normal( + expected_mean, expected_std, size=(w, h, 3)) + ) + for i, (w, h) in enumerate([ + (3000, 100), (800, 600), (400, 200), (700, 300) + ]) + ]) + + actual_mean, actual_std = mean_std(dataset) + + for em, am in zip(expected_mean, actual_mean): + self.assertAlmostEqual(em, am, places=0) + for estd, astd in zip(expected_std, actual_std): + self.assertAlmostEqual(estd, astd, places=0) + + def test_stats(self): + dataset = Dataset.from_iterable([ + DatasetItem(id=1, image=np.ones((5, 5, 3)), annotations=[ + Caption('hello'), + Caption('world'), + Label(2, attributes={ 'x': 1, 'y': '2', }), + Bbox(1, 2, 2, 2, label=2, attributes={ 'score': 0.5, }), + Bbox(5, 6, 2, 2, attributes={ + 'x': 1, 'y': '3', 'occluded': True, + }), + Points([1, 2, 2, 0, 1, 1], label=0), + Mask(label=3, image=np.array([ + [0, 0, 1, 1, 1], + [0, 0, 1, 1, 1], + [0, 0, 1, 1, 1], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + ])), + ]), + DatasetItem(id=2, image=np.ones((2, 4, 3)), annotations=[ + Label(2, attributes={ 'x': 2, 'y': '2', }), + Bbox(1, 2, 2, 2, label=3, attributes={ 'score': 0.5, }), + Bbox(5, 6, 2, 2, attributes={ + 'x': 2, 'y': '3', 'occluded': False, + }), + ]), + DatasetItem(id=3), + ], categories=['label_%s' % i for i in range(4)]) + + expected = { + 'images count': 3, + 'annotations count': 10, + 'unannotated images count': 1, + 'unannotated images': ['3'], + 'annotations by type': { + 'label': { 'count': 2, }, + 'polygon': { 'count': 0, }, + 'polyline': { 'count': 0, }, + 'bbox': { 'count': 4, }, + 'mask': { 'count': 1, }, + 'points': { 'count': 1, }, + 'caption': { 'count': 2, }, + }, + 'annotations': { + 'labels': { + 'count': 6, + 'distribution': { + 'label_0': [1, 1/6], + 'label_1': [0, 0.0], + 'label_2': [3, 3/6], + 'label_3': [2, 2/6], + }, + 'attributes': { + 'x': { + 'count': 2, # annotations with no label are skipped + 'values count': 2, + 'values present': ['1', '2'], + 'distribution': { + '1': [1, 1/2], + '2': [1, 1/2], + }, + }, + 'y': { + 'count': 2, # annotations with no label are skipped + 'values count': 1, + 'values present': ['2'], + 'distribution': { + '2': [2, 2/2], + }, + }, + # must not include "special" attributes like "occluded" + } + }, + 'segments': { + 'avg. area': (4 * 2 + 9 * 1) / 3, + 'area distribution': [ + {'min': 4.0, 'max': 4.5, 'count': 2, 'percent': 2/3}, + {'min': 4.5, 'max': 5.0, 'count': 0, 'percent': 0.0}, + {'min': 5.0, 'max': 5.5, 'count': 0, 'percent': 0.0}, + {'min': 5.5, 'max': 6.0, 'count': 0, 'percent': 0.0}, + {'min': 6.0, 'max': 6.5, 'count': 0, 'percent': 0.0}, + {'min': 6.5, 'max': 7.0, 'count': 0, 'percent': 0.0}, + {'min': 7.0, 'max': 7.5, 'count': 0, 'percent': 0.0}, + {'min': 7.5, 'max': 8.0, 'count': 0, 'percent': 0.0}, + {'min': 8.0, 'max': 8.5, 'count': 0, 'percent': 0.0}, + {'min': 8.5, 'max': 9.0, 'count': 1, 'percent': 1/3}, + ], + 'pixel distribution': { + 'label_0': [0, 0.0], + 'label_1': [0, 0.0], + 'label_2': [4, 4/17], + 'label_3': [13, 13/17], + }, + } + }, + } + + actual = compute_ann_statistics(dataset) + + self.assertEqual(expected, actual) + + def test_stats_with_empty_dataset(self): + dataset = Dataset.from_iterable([ + DatasetItem(id=1), + DatasetItem(id=3), + ], categories=['label_%s' % i for i in range(4)]) + + expected = { + 'images count': 2, + 'annotations count': 0, + 'unannotated images count': 2, + 'unannotated images': ['1', '3'], + 'annotations by type': { + 'label': { 'count': 0, }, + 'polygon': { 'count': 0, }, + 'polyline': { 'count': 0, }, + 'bbox': { 'count': 0, }, + 'mask': { 'count': 0, }, + 'points': { 'count': 0, }, + 'caption': { 'count': 0, }, + }, + 'annotations': { + 'labels': { + 'count': 0, + 'distribution': { + 'label_0': [0, 0.0], + 'label_1': [0, 0.0], + 'label_2': [0, 0.0], + 'label_3': [0, 0.0], + }, + 'attributes': {} + }, + 'segments': { + 'avg. area': 0, + 'area distribution': [], + 'pixel distribution': { + 'label_0': [0, 0.0], + 'label_1': [0, 0.0], + 'label_2': [0, 0.0], + 'label_3': [0, 0.0], + }, + } + }, + } + + actual = compute_ann_statistics(dataset) + + self.assertEqual(expected, actual) + +class TestMultimerge(TestCase): + def test_can_match_items(self): + # items 1 and 3 are unique, item 2 is common and should be merged + + source0 = Dataset.from_iterable([ + DatasetItem(1, annotations=[ Label(0), ]), + DatasetItem(2, annotations=[ Label(0), ]), + ], categories=['a', 'b']) + + source1 = Dataset.from_iterable([ + DatasetItem(2, annotations=[ Label(1), ]), + DatasetItem(3, annotations=[ Label(0), ]), + ], categories=['a', 'b']) + + source2 = Dataset.from_iterable([ + DatasetItem(2, annotations=[ Label(0), Bbox(1, 2, 3, 4) ]), + ], categories=['a', 'b']) + + expected = Dataset.from_iterable([ + DatasetItem(1, annotations=[ + Label(0, attributes={'score': 1/3}), + ]), + DatasetItem(2, annotations=[ + Label(0, attributes={'score': 2/3}), + Label(1, attributes={'score': 1/3}), + Bbox(1, 2, 3, 4, attributes={'score': 1.0}), + ]), + DatasetItem(3, annotations=[ + Label(0, attributes={'score': 1/3}), + ]), + ], categories=['a', 'b']) + + merger = IntersectMerge() + merged = merger([source0, source1, source2]) + + compare_datasets(self, expected, merged) + self.assertEqual( + [ + NoMatchingItemError(item_id=('1', DEFAULT_SUBSET_NAME), + sources={1, 2}), + NoMatchingItemError(item_id=('3', DEFAULT_SUBSET_NAME), + sources={0, 2}), + ], + sorted((e for e in merger.errors + if isinstance(e, NoMatchingItemError)), + key=lambda e: e.item_id) + ) + self.assertEqual( + [ + NoMatchingAnnError(item_id=('2', DEFAULT_SUBSET_NAME), + sources={0, 1}, ann=source2.get('2').annotations[1]), + ], + sorted((e for e in merger.errors + if isinstance(e, NoMatchingAnnError)), + key=lambda e: e.item_id) + ) + + def test_can_match_shapes(self): + source0 = Dataset.from_iterable([ + DatasetItem(1, annotations=[ + # unique + Bbox(1, 2, 3, 4, label=1), + + # common + Mask(label=2, z_order=2, image=np.array([ + [0, 0, 0, 0], + [0, 0, 0, 0], + [1, 1, 1, 0], + [1, 1, 1, 0], + ])), + Polygon([1, 0, 3, 2, 1, 2]), + + # an instance with keypoints + Bbox(4, 5, 2, 4, label=2, z_order=1, group=1), + Points([5, 6], label=0, group=1), + Points([6, 8], label=1, group=1), + + PolyLine([1, 1, 2, 1, 3, 1]), + ]), + ], categories=['a', 'b', 'c']) + + source1 = Dataset.from_iterable([ + DatasetItem(1, annotations=[ + # common + Mask(label=2, image=np.array([ + [0, 0, 0, 0], + [0, 1, 1, 1], + [0, 1, 1, 1], + [0, 1, 1, 1], + ])), + Polygon([0, 2, 2, 0, 2, 1]), + + # an instance with keypoints + Bbox(4, 4, 2, 5, label=2, z_order=1, group=2), + Points([5.5, 6.5], label=0, group=2), + Points([6, 8], label=1, group=2), + + PolyLine([1, 1.5, 2, 1.5]), + ]), + ], categories=['a', 'b', 'c']) + + source2 = Dataset.from_iterable([ + DatasetItem(1, annotations=[ + # common + Mask(label=2, z_order=3, image=np.array([ + [0, 0, 1, 1], + [0, 1, 1, 1], + [1, 1, 1, 1], + [1, 1, 1, 0], + ])), + Polygon([3, 1, 2, 2, 0, 1]), + + # an instance with keypoints, one is missing + Bbox(3, 6, 2, 3, label=2, z_order=4, group=3), + Points([4.5, 5.5], label=0, group=3), + + PolyLine([1, 1.25, 3, 1, 4, 2]), + ]), + ], categories=['a', 'b', 'c']) + + expected = Dataset.from_iterable([ + DatasetItem(1, annotations=[ + # unique + Bbox(1, 2, 3, 4, label=1), + + # common + # nearest to mean bbox + Mask(label=2, z_order=3, image=np.array([ + [0, 0, 0, 0], + [0, 1, 1, 1], + [0, 1, 1, 1], + [0, 1, 1, 1], + ])), + Polygon([1, 0, 3, 2, 1, 2]), + + # an instance with keypoints + Bbox(4, 5, 2, 4, label=2, z_order=4, group=1), + Points([5, 6], label=0, group=1), + Points([6, 8], label=1, group=1), + + PolyLine([1, 1.25, 3, 1, 4, 2]), + ]), + ], categories=['a', 'b', 'c']) + + merger = IntersectMerge(conf={'quorum': 1, 'pairwise_dist': 0.1}) + merged = merger([source0, source1, source2]) + + compare_datasets(self, expected, merged, ignored_attrs={'score'}) + self.assertEqual( + [ + NoMatchingAnnError(item_id=('1', DEFAULT_SUBSET_NAME), + sources={2}, ann=source0.get('1').annotations[5]), + NoMatchingAnnError(item_id=('1', DEFAULT_SUBSET_NAME), + sources={1, 2}, ann=source0.get('1').annotations[0]), + ], + sorted((e for e in merger.errors + if isinstance(e, NoMatchingAnnError)), + key=lambda e: len(e.sources)) + ) + + def test_attributes(self): + source0 = Dataset.from_iterable([ + DatasetItem(1, annotations=[ + Label(2, attributes={ + 'unique': 1, + 'common_under_quorum': 2, + 'common_over_quorum': 3, + 'ignored': 'q', + }), + ]), + ], categories=['a', 'b', 'c']) + + source1 = Dataset.from_iterable([ + DatasetItem(1, annotations=[ + Label(2, attributes={ + 'common_under_quorum': 2, + 'common_over_quorum': 3, + 'ignored': 'q', + }), + ]), + ], categories=['a', 'b', 'c']) + + source2 = Dataset.from_iterable([ + DatasetItem(1, annotations=[ + Label(2, attributes={ + 'common_over_quorum': 3, + 'ignored': 'q', + }), + ]), + ], categories=['a', 'b', 'c']) + + expected = Dataset.from_iterable([ + DatasetItem(1, annotations=[ + Label(2, attributes={ 'common_over_quorum': 3 }), + ]), + ], categories=['a', 'b', 'c']) + + merger = IntersectMerge(conf={ + 'quorum': 3, 'ignored_attributes': {'ignored'}}) + merged = merger([source0, source1, source2]) + + compare_datasets(self, expected, merged, ignored_attrs={'score'}) + self.assertEqual(2, len([e for e in merger.errors + if isinstance(e, FailedAttrVotingError)]) + ) + + def test_group_checks(self): + dataset = Dataset.from_iterable([ + DatasetItem(1, annotations=[ + Bbox(0, 0, 0, 0, label=0, group=1), # misses an optional label + Bbox(0, 0, 0, 0, label=1, group=1), + + Bbox(0, 0, 0, 0, label=2, group=2), # misses a mandatory label - error + Bbox(0, 0, 0, 0, label=2, group=2), + + Bbox(0, 0, 0, 0, label=4), # misses an optional label + Bbox(0, 0, 0, 0, label=5), # misses a mandatory label - error + Bbox(0, 0, 0, 0, label=0), # misses a mandatory label - error + + Bbox(0, 0, 0, 0, label=3), # not listed - not checked + ]), + ], categories=['a', 'a_g1', 'a_g2_opt', 'b', 'c', 'c_g1_opt']) + + merger = IntersectMerge(conf={'groups': [ + ['a', 'a_g1', 'a_g2_opt?'], ['c', 'c_g1_opt?'] + ]}) + merger([dataset, dataset]) + + self.assertEqual(3, len([e for e in merger.errors + if isinstance(e, WrongGroupError)]), merger.errors + ) + + def test_can_merge_classes(self): + source0 = Dataset.from_iterable([ + DatasetItem(1, annotations=[ + Label(0), + Label(1), + Bbox(0, 0, 1, 1, label=1), + ]), + ], categories=['a', 'b']) + + source1 = Dataset.from_iterable([ + DatasetItem(1, annotations=[ + Label(0), + Label(1), + Bbox(0, 0, 1, 1, label=0), + Bbox(0, 0, 1, 1, label=1), + ]), + ], categories=['b', 'c']) + + expected = Dataset.from_iterable([ + DatasetItem(1, annotations=[ + Label(0), + Label(1), + Label(2), + Bbox(0, 0, 1, 1, label=1), + Bbox(0, 0, 1, 1, label=2), + ]), + ], categories=['a', 'b', 'c']) + + merger = IntersectMerge() + merged = merger([source0, source1]) + + compare_datasets(self, expected, merged, ignored_attrs={'score'}) + + def test_can_merge_categories(self): + source0 = Dataset.from_iterable([ + DatasetItem(1, annotations=[ Label(0), ]), + ], categories={ + AnnotationType.label: LabelCategories.from_iterable(['a', 'b']), + AnnotationType.points: PointsCategories.from_iterable([ + (0, ['l0', 'l1']), + (1, ['l2', 'l3']), + ]), + AnnotationType.mask: MaskCategories({ + 0: (0, 1, 2), + 1: (1, 2, 3), + }), + }) + + source1 = Dataset.from_iterable([ + DatasetItem(1, annotations=[ Label(0), ]), + ], categories={ + AnnotationType.label: LabelCategories.from_iterable(['c', 'b']), + AnnotationType.points: PointsCategories.from_iterable([ + (0, []), + (1, ['l2', 'l3']), + ]), + AnnotationType.mask: MaskCategories({ + 0: (0, 2, 4), + 1: (1, 2, 3), + }), + }) + + expected = Dataset.from_iterable([ + DatasetItem(1, annotations=[ Label(0), Label(2), ]), + ], categories={ + AnnotationType.label: LabelCategories.from_iterable(['a', 'b', 'c']), + AnnotationType.points: PointsCategories.from_iterable([ + (0, ['l0', 'l1']), + (1, ['l2', 'l3']), + (2, []), + ]), + AnnotationType.mask: MaskCategories({ + 0: (0, 1, 2), + 1: (1, 2, 3), + 2: (0, 2, 4), + }), + }) + + merger = IntersectMerge() + merged = merger([source0, source1]) + + compare_datasets(self, expected, merged, ignored_attrs={'score'}) \ No newline at end of file diff --git a/testbed/openvinotoolkit__datumaro/tests/test_splitter.py b/testbed/openvinotoolkit__datumaro/tests/test_splitter.py new file mode 100644 index 0000000000000000000000000000000000000000..276ed5f557392b8bf3a09a90440047a69883e4da --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/test_splitter.py @@ -0,0 +1,610 @@ +import numpy as np + +from unittest import TestCase + +from datumaro.components.project import Dataset +from datumaro.components.extractor import (DatasetItem, Label, Bbox, + LabelCategories, AnnotationType) + +import datumaro.plugins.splitter as splitter +from datumaro.components.operations import compute_ann_statistics + + +class SplitterTest(TestCase): + @staticmethod + def _get_subset(idx): + subsets = ["", "a", "b", "", "", "a", "", "b", "", "a"] + return subsets[idx % len(subsets)] + + def _generate_dataset(self, config): + # counts = {(0,0):20, (0,1):20, (0,2):30, (1,0):20, (1,1):10, (1,2):20} + # attr1 = ['attr1', 'attr2'] + # attr2 = ['attr1', 'attr3'] + # config = { "label1": { "attrs": attr1, "counts": counts }, + # "label2": { "attrs": attr2, "counts": counts }} + iterable = [] + label_cat = LabelCategories() + idx = 0 + for label_id, label in enumerate(config.keys()): + anames = config[label]["attrs"] + counts = config[label]["counts"] + label_cat.add(label, attributes=anames) + if isinstance(counts, dict): + for attrs, count in counts.items(): + attributes = dict() + if isinstance(attrs, tuple): + for aname, value in zip(anames, attrs): + attributes[aname] = value + else: + attributes[anames[0]] = attrs + for _ in range(count): + idx += 1 + iterable.append( + DatasetItem(idx, subset=self._get_subset(idx), + annotations=[ + Label(label_id, attributes=attributes) + ], + ) + ) + else: + for _ in range(counts): + idx += 1 + iterable.append( + DatasetItem(idx, subset=self._get_subset(idx), + annotations=[Label(label_id)]) + ) + categories = {AnnotationType.label: label_cat} + dataset = Dataset.from_iterable(iterable, categories) + return dataset + + def test_split_for_classification_multi_class_no_attr(self): + config = { + "label1": {"attrs": None, "counts": 10}, + "label2": {"attrs": None, "counts": 20}, + "label3": {"attrs": None, "counts": 30}, + } + source = self._generate_dataset(config) + + splits = [("train", 0.7), ("test", 0.3)] + actual = splitter.ClassificationSplit(source, splits) + + self.assertEqual(42, len(actual.get_subset("train"))) + self.assertEqual(18, len(actual.get_subset("test"))) + + # check stats for train + stat_train = compute_ann_statistics(actual.get_subset("train")) + dist_train = stat_train["annotations"]["labels"]["distribution"] + self.assertEqual(7, dist_train["label1"][0]) + self.assertEqual(14, dist_train["label2"][0]) + self.assertEqual(21, dist_train["label3"][0]) + + # check stats for test + stat_test = compute_ann_statistics(actual.get_subset("test")) + dist_test = stat_test["annotations"]["labels"]["distribution"] + self.assertEqual(3, dist_test["label1"][0]) + self.assertEqual(6, dist_test["label2"][0]) + self.assertEqual(9, dist_test["label3"][0]) + + def test_split_for_classification_single_class_single_attr(self): + counts = {0: 10, 1: 20, 2: 30} + config = {"label": {"attrs": ["attr"], "counts": counts}} + source = self._generate_dataset(config) + + splits = [("train", 0.7), ("test", 0.3)] + actual = splitter.ClassificationSplit(source, splits) + + self.assertEqual(42, len(actual.get_subset("train"))) + self.assertEqual(18, len(actual.get_subset("test"))) + + # check stats for train + stat_train = compute_ann_statistics(actual.get_subset("train")) + attr_train = stat_train["annotations"]["labels"]["attributes"] + self.assertEqual(7, attr_train["attr"]["distribution"]["0"][0]) + self.assertEqual(14, attr_train["attr"]["distribution"]["1"][0]) + self.assertEqual(21, attr_train["attr"]["distribution"]["2"][0]) + + # check stats for test + stat_test = compute_ann_statistics(actual.get_subset("test")) + attr_test = stat_test["annotations"]["labels"]["attributes"] + self.assertEqual(3, attr_test["attr"]["distribution"]["0"][0]) + self.assertEqual(6, attr_test["attr"]["distribution"]["1"][0]) + self.assertEqual(9, attr_test["attr"]["distribution"]["2"][0]) + + def test_split_for_classification_single_class_multi_attr(self): + counts = { + (0, 0): 20, + (0, 1): 20, + (0, 2): 30, + (1, 0): 20, + (1, 1): 10, + (1, 2): 20, + } + attrs = ["attr1", "attr2"] + config = {"label": {"attrs": attrs, "counts": counts}} + source = self._generate_dataset(config) + + splits = [("train", 0.7), ("test", 0.3)] + actual = splitter.ClassificationSplit(source, splits) + + self.assertEqual(84, len(actual.get_subset("train"))) + self.assertEqual(36, len(actual.get_subset("test"))) + + # check stats for train + stat_train = compute_ann_statistics(actual.get_subset("train")) + attr_train = stat_train["annotations"]["labels"]["attributes"] + self.assertEqual(49, attr_train["attr1"]["distribution"]["0"][0]) + self.assertEqual(35, attr_train["attr1"]["distribution"]["1"][0]) + self.assertEqual(28, attr_train["attr2"]["distribution"]["0"][0]) + self.assertEqual(21, attr_train["attr2"]["distribution"]["1"][0]) + self.assertEqual(35, attr_train["attr2"]["distribution"]["2"][0]) + + # check stats for test + stat_test = compute_ann_statistics(actual.get_subset("test")) + attr_test = stat_test["annotations"]["labels"]["attributes"] + self.assertEqual(21, attr_test["attr1"]["distribution"]["0"][0]) + self.assertEqual(15, attr_test["attr1"]["distribution"]["1"][0]) + self.assertEqual(12, attr_test["attr2"]["distribution"]["0"][0]) + self.assertEqual(9, attr_test["attr2"]["distribution"]["1"][0]) + self.assertEqual(15, attr_test["attr2"]["distribution"]["2"][0]) + + def test_split_for_classification_multi_label_with_attr(self): + counts = { + (0, 0): 20, + (0, 1): 20, + (0, 2): 30, + (1, 0): 20, + (1, 1): 10, + (1, 2): 20, + } + attr1 = ["attr1", "attr2"] + attr2 = ["attr1", "attr3"] + config = { + "label1": {"attrs": attr1, "counts": counts}, + "label2": {"attrs": attr2, "counts": counts}, + } + source = self._generate_dataset(config) + + splits = [("train", 0.7), ("test", 0.3)] + actual = splitter.ClassificationSplit(source, splits) + + train = actual.get_subset("train") + test = actual.get_subset("test") + self.assertEqual(168, len(train)) + self.assertEqual(72, len(test)) + + # check stats for train + stat_train = compute_ann_statistics(train) + dist_train = stat_train["annotations"]["labels"]["distribution"] + self.assertEqual(84, dist_train["label1"][0]) + self.assertEqual(84, dist_train["label2"][0]) + attr_train = stat_train["annotations"]["labels"]["attributes"] + self.assertEqual(49 * 2, attr_train["attr1"]["distribution"]["0"][0]) + self.assertEqual(35 * 2, attr_train["attr1"]["distribution"]["1"][0]) + self.assertEqual(28, attr_train["attr2"]["distribution"]["0"][0]) + self.assertEqual(21, attr_train["attr2"]["distribution"]["1"][0]) + self.assertEqual(35, attr_train["attr2"]["distribution"]["2"][0]) + self.assertEqual(28, attr_train["attr3"]["distribution"]["0"][0]) + self.assertEqual(21, attr_train["attr3"]["distribution"]["1"][0]) + self.assertEqual(35, attr_train["attr3"]["distribution"]["2"][0]) + + # check stats for test + stat_test = compute_ann_statistics(test) + dist_test = stat_test["annotations"]["labels"]["distribution"] + self.assertEqual(36, dist_test["label1"][0]) + self.assertEqual(36, dist_test["label2"][0]) + attr_test = stat_test["annotations"]["labels"]["attributes"] + self.assertEqual(21 * 2, attr_test["attr1"]["distribution"]["0"][0]) + self.assertEqual(15 * 2, attr_test["attr1"]["distribution"]["1"][0]) + self.assertEqual(12, attr_test["attr2"]["distribution"]["0"][0]) + self.assertEqual(9, attr_test["attr2"]["distribution"]["1"][0]) + self.assertEqual(15, attr_test["attr2"]["distribution"]["2"][0]) + self.assertEqual(12, attr_test["attr3"]["distribution"]["0"][0]) + self.assertEqual(9, attr_test["attr3"]["distribution"]["1"][0]) + self.assertEqual(15, attr_test["attr3"]["distribution"]["2"][0]) + + # random seed test + r1 = splitter.ClassificationSplit(source, splits, seed=1234) + r2 = splitter.ClassificationSplit(source, splits, seed=1234) + r3 = splitter.ClassificationSplit(source, splits, seed=4321) + self.assertEqual( + list(r1.get_subset("test")), list(r2.get_subset("test")) + ) + self.assertNotEqual( + list(r1.get_subset("test")), list(r3.get_subset("test")) + ) + + def test_split_for_classification_gives_error(self): + with self.subTest("no label"): + source = Dataset.from_iterable([ + DatasetItem(1, annotations=[]), + DatasetItem(2, annotations=[]), + ], categories=["a", "b", "c"]) + + with self.assertRaisesRegex(Exception, "exactly one is expected"): + splits = [("train", 0.7), ("test", 0.3)] + actual = splitter.ClassificationSplit(source, splits) + len(actual.get_subset("train")) + + with self.subTest("multi label"): + source = Dataset.from_iterable([ + DatasetItem(1, annotations=[Label(0), Label(1)]), + DatasetItem(2, annotations=[Label(0), Label(2)]), + ], categories=["a", "b", "c"]) + + with self.assertRaisesRegex(Exception, "exactly one is expected"): + splits = [("train", 0.7), ("test", 0.3)] + splitter.ClassificationSplit(source, splits) + len(actual.get_subset("train")) + + source = Dataset.from_iterable([ + DatasetItem(1, annotations=[Label(0)]), + DatasetItem(2, annotations=[Label(1)]), + ], categories=["a", "b", "c"]) + + with self.subTest("wrong ratio"): + with self.assertRaisesRegex(Exception, "in the range"): + splits = [("train", -0.5), ("test", 1.5)] + splitter.ClassificationSplit(source, splits) + + with self.assertRaisesRegex(Exception, "Sum of ratios"): + splits = [("train", 0.5), ("test", 0.5), ("val", 0.5)] + splitter.ClassificationSplit(source, splits) + + with self.subTest("wrong subset name"): + with self.assertRaisesRegex(Exception, "Subset name"): + splits = [("train_", 0.5), ("val", 0.2), ("test", 0.3)] + splitter.ClassificationSplit(source, splits) + + def test_split_for_matching_reid(self): + counts = {i: (i % 3 + 1) * 7 for i in range(10)} + config = {"person": {"attrs": ["PID"], "counts": counts}} + source = self._generate_dataset(config) + + splits = [("train", 0.5), ("val", 0.2), ("test", 0.3)] + test_splits = [("query", 0.4 / 0.7), ("gallery", 0.3 / 0.7)] + actual = splitter.MatchingReIDSplit(source, splits, test_splits) + + stats = dict() + for sname in ["train", "val", "test"]: + subset = actual.get_subset(sname) + stat_subset = compute_ann_statistics(subset)["annotations"] + stat_attr = stat_subset["labels"]["attributes"]["PID"] + stats[sname] = stat_attr + + for sname in ["gallery", "query"]: + subset = actual.get_subset_by_group(sname) + stat_subset = compute_ann_statistics(subset)["annotations"] + stat_attr = stat_subset["labels"]["attributes"]["PID"] + stats[sname] = stat_attr + + self.assertEqual(65, stats["train"]["count"]) # depends on heuristic + self.assertEqual(26, stats["val"]["count"]) # depends on heuristic + self.assertEqual(42, stats["test"]["count"]) # depends on heuristic + + train_ids = stats["train"]["values present"] + self.assertEqual(7, len(train_ids)) + self.assertEqual(train_ids, stats["val"]["values present"]) + + trainval = stats["train"]["count"] + stats["val"]["count"] + self.assertEqual(int(trainval * 0.5 / 0.7), stats["train"]["count"]) + self.assertEqual(int(trainval * 0.2 / 0.7), stats["val"]["count"]) + + dist_train = stats["train"]["distribution"] + dist_val = stats["val"]["distribution"] + for pid in train_ids: + total = counts[int(pid)] + self.assertEqual(int(total * 0.5 / 0.7), dist_train[pid][0]) + self.assertEqual(int(total * 0.2 / 0.7), dist_val[pid][0]) + + test_ids = stats["test"]["values present"] + self.assertEqual(3, len(test_ids)) + self.assertEqual(test_ids, stats["gallery"]["values present"]) + self.assertEqual(test_ids, stats["query"]["values present"]) + + dist_test = stats["test"]["distribution"] + dist_gallery = stats["gallery"]["distribution"] + dist_query = stats["query"]["distribution"] + for pid in test_ids: + total = counts[int(pid)] + self.assertEqual(total, dist_test[pid][0]) + self.assertEqual(int(total * 0.3 / 0.7), dist_gallery[pid][0]) + self.assertEqual(int(total * 0.4 / 0.7), dist_query[pid][0]) + + # random seed test + splits = [("train", 0.5), ("test", 0.5)] + r1 = splitter.MatchingReIDSplit(source, splits, test_splits, seed=1234) + r2 = splitter.MatchingReIDSplit(source, splits, test_splits, seed=1234) + r3 = splitter.MatchingReIDSplit(source, splits, test_splits, seed=4321) + self.assertEqual( + list(r1.get_subset("test")), list(r2.get_subset("test")) + ) + self.assertNotEqual( + list(r1.get_subset("test")), list(r3.get_subset("test")) + ) + + def test_split_for_matching_reid_gives_error(self): + with self.subTest("no label"): + source = Dataset.from_iterable([ + DatasetItem(1, annotations=[]), + DatasetItem(2, annotations=[]), + ], categories=["a", "b", "c"]) + + with self.assertRaisesRegex(Exception, "exactly one is expected"): + splits = [("train", 0.5), ("val", 0.2), ("test", 0.3)] + test_splits = [("query", 0.4 / 0.7), ("gallery", 0.3 / 0.7)] + actual = splitter.MatchingReIDSplit(source, splits, test_splits) + len(actual.get_subset("train")) + + with self.subTest(msg="multi label"): + source = Dataset.from_iterable([ + DatasetItem(1, annotations=[Label(0), Label(1)]), + DatasetItem(2, annotations=[Label(0), Label(2)]), + ], categories=["a", "b", "c"]) + + with self.assertRaisesRegex(Exception, "exactly one is expected"): + splits = [("train", 0.5), ("val", 0.2), ("test", 0.3)] + test_splits = [("query", 0.4 / 0.7), ("gallery", 0.3 / 0.7)] + actual = splitter.MatchingReIDSplit(source, splits, test_splits) + len(actual.get_subset("train")) + + counts = {i: (i % 3 + 1) * 7 for i in range(10)} + config = {"person": {"attrs": ["PID"], "counts": counts}} + source = self._generate_dataset(config) + with self.subTest("wrong ratio"): + with self.assertRaisesRegex(Exception, "in the range"): + splits = [("train", -0.5), ("val", 0.2), ("test", 0.3)] + test_splits = [("query", 0.4 / 0.7), ("gallery", 0.3 / 0.7)] + splitter.MatchingReIDSplit(source, splits, test_splits) + + with self.assertRaisesRegex(Exception, "Sum of ratios"): + splits = [("train", 0.6), ("val", 0.2), ("test", 0.3)] + test_splits = [("query", 0.4 / 0.7), ("gallery", 0.3 / 0.7)] + splitter.MatchingReIDSplit(source, splits, test_splits) + + with self.assertRaisesRegex(Exception, "in the range"): + splits = [("train", 0.5), ("val", 0.2), ("test", 0.3)] + test_splits = [("query", -0.4 / 0.7), ("gallery", 0.3 / 0.7)] + actual = splitter.MatchingReIDSplit(source, splits, test_splits) + len(actual.get_subset_by_group("query")) + + with self.assertRaisesRegex(Exception, "Sum of ratios"): + splits = [("train", 0.5), ("val", 0.2), ("test", 0.3)] + test_splits = [("query", 0.5 / 0.7), ("gallery", 0.3 / 0.7)] + actual = splitter.MatchingReIDSplit(source, splits, test_splits) + len(actual.get_subset_by_group("query")) + + with self.subTest("wrong subset name"): + with self.assertRaisesRegex(Exception, "Subset name"): + splits = [("_train", 0.5), ("val", 0.2), ("test", 0.3)] + test_splits = [("query", 0.4 / 0.7), ("gallery", 0.3 / 0.7)] + splitter.MatchingReIDSplit(source, splits, test_splits) + + with self.assertRaisesRegex(Exception, "Subset name"): + splits = [("train", 0.5), ("val", 0.2), ("test", 0.3)] + test_splits = [("_query", 0.4 / 0.7), ("gallery", 0.3 / 0.7)] + actual = splitter.MatchingReIDSplit(source, splits, test_splits) + len(actual.get_subset_by_group("query")) + + with self.subTest("wrong attribute name for person id"): + splits = [("train", 0.5), ("val", 0.2), ("test", 0.3)] + test_splits = [("query", 0.4 / 0.7), ("gallery", 0.3 / 0.7)] + actual = splitter.MatchingReIDSplit(source, splits, test_splits) + + with self.assertRaisesRegex(Exception, "Unknown group"): + actual.get_subset_by_group("_gallery") + + def _generate_detection_dataset(self, **kwargs): + append_bbox = kwargs.get("append_bbox") + with_attr = kwargs.get("with_attr", False) + nimages = kwargs.get("nimages", 10) + + label_cat = LabelCategories() + for i in range(6): + label = "label%d" % (i + 1) + if with_attr is True: + attributes = {"attr0", "attr%d" % (i + 1)} + else: + attributes = {} + label_cat.add(label, attributes=attributes) + categories = {AnnotationType.label: label_cat} + + iterable = [] + attr_val = 0 + totals = np.zeros(3) + objects = [(1, 5, 2), (3, 4, 1), (2, 3, 4), (1, 1, 1), (2, 4, 2)] + for img_id in range(nimages): + cnts = objects[img_id % len(objects)] + totals += cnts + annotations = [] + for label_id, count in enumerate(cnts): + attributes = {} + if with_attr: + attr_val += 1 + attributes["attr0"] = attr_val % 3 + attributes["attr%d" % (label_id + 1)] = attr_val % 2 + for ann_id in range(count): + append_bbox(annotations, label_id=label_id, ann_id=ann_id, + attributes=attributes) + item = DatasetItem(img_id, subset=self._get_subset(img_id), + annotations=annotations, attributes={"id": img_id}) + iterable.append(item) + + dataset = Dataset.from_iterable(iterable, categories) + return dataset, totals + + @staticmethod + def _get_append_bbox(dataset_type): + def append_bbox_coco(annotations, **kwargs): + annotations.append( + Bbox(1, 1, 2, 2, label=kwargs["label_id"], + id=kwargs["ann_id"], + attributes=kwargs["attributes"], + group=kwargs["ann_id"], + ) + ) + annotations.append( + Label(kwargs["label_id"], attributes=kwargs["attributes"]) + ) + + def append_bbox_voc(annotations, **kwargs): + annotations.append( + Bbox(1, 1, 2, 2, label=kwargs["label_id"], + id=kwargs["ann_id"] + 1, + attributes=kwargs["attributes"], + group=kwargs["ann_id"], + ) + ) # obj + annotations.append( + Label(kwargs["label_id"], attributes=kwargs["attributes"]) + ) + annotations.append( + Bbox(1, 1, 2, 2, label=kwargs["label_id"] + 3, + group=kwargs["ann_id"], + ) + ) # part + annotations.append( + Label(kwargs["label_id"] + 3, attributes=kwargs["attributes"]) + ) + + def append_bbox_yolo(annotations, **kwargs): + annotations.append(Bbox(1, 1, 2, 2, label=kwargs["label_id"])) + annotations.append( + Label(kwargs["label_id"], attributes=kwargs["attributes"]) + ) + + def append_bbox_cvat(annotations, **kwargs): + annotations.append( + Bbox(1, 1, 2, 2, label=kwargs["label_id"], + id=kwargs["ann_id"], + attributes=kwargs["attributes"], + group=kwargs["ann_id"], + z_order=kwargs["ann_id"], + ) + ) + annotations.append( + Label(kwargs["label_id"], attributes=kwargs["attributes"]) + ) + + def append_bbox_labelme(annotations, **kwargs): + annotations.append( + Bbox(1, 1, 2, 2, label=kwargs["label_id"], + id=kwargs["ann_id"], + attributes=kwargs["attributes"], + ) + ) + annotations.append( + Label(kwargs["label_id"], attributes=kwargs["attributes"]) + ) + + def append_bbox_mot(annotations, **kwargs): + annotations.append( + Bbox(1, 1, 2, 2, label=kwargs["label_id"], + attributes=kwargs["attributes"], + ) + ) + annotations.append( + Label(kwargs["label_id"], attributes=kwargs["attributes"]) + ) + + def append_bbox_widerface(annotations, **kwargs): + annotations.append( + Bbox(1, 1, 2, 2, attributes=kwargs["attributes"]) + ) + annotations.append(Label(0, attributes=kwargs["attributes"])) + + functions = { + "coco": append_bbox_coco, + "voc": append_bbox_voc, + "yolo": append_bbox_yolo, + "cvat": append_bbox_cvat, + "labelme": append_bbox_labelme, + "mot": append_bbox_mot, + "widerface": append_bbox_widerface, + } + + func = functions.get(dataset_type, append_bbox_cvat) + return func + + def test_split_for_detection(self): + dtypes = ["coco", "voc", "yolo", "cvat", "labelme", "mot", "widerface"] + params = [] + for dtype in dtypes: + for with_attr in [False, True]: + params.append((dtype, with_attr, 10, 5, 3, 2)) + params.append((dtype, with_attr, 10, 7, 0, 3)) + + for dtype, with_attr, nimages, train, val, test in params: + source, _ = self._generate_detection_dataset( + append_bbox=self._get_append_bbox(dtype), + with_attr=with_attr, + nimages=nimages, + ) + total = np.sum([train, val, test]) + splits = [ + ("train", train / total), + ("val", val / total), + ("test", test / total), + ] + with self.subTest( + dtype=dtype, + with_attr=with_attr, + nimage=nimages, + train=train, + val=val, + test=test, + ): + actual = splitter.DetectionSplit(source, splits) + + self.assertEqual(train, len(actual.get_subset("train"))) + self.assertEqual(val, len(actual.get_subset("val"))) + self.assertEqual(test, len(actual.get_subset("test"))) + + # random seed test + source, _ = self._generate_detection_dataset( + append_bbox=self._get_append_bbox("cvat"), + with_attr=True, + nimages=10, + ) + + splits = [("train", 0.5), ("test", 0.5)] + r1 = splitter.DetectionSplit(source, splits, seed=1234) + r2 = splitter.DetectionSplit(source, splits, seed=1234) + r3 = splitter.DetectionSplit(source, splits, seed=4321) + self.assertEqual( + list(r1.get_subset("test")), list(r2.get_subset("test")) + ) + self.assertNotEqual( + list(r1.get_subset("test")), list(r3.get_subset("test")) + ) + + def test_split_for_detection_gives_error(self): + with self.subTest(msg="bbox annotation"): + source = Dataset.from_iterable([ + DatasetItem(1, annotations=[Label(0), Label(1)]), + DatasetItem(2, annotations=[Label(0), Label(2)]), + ], categories=["a", "b", "c"]) + + with self.assertRaisesRegex(Exception, "more than one bbox"): + splits = [("train", 0.5), ("val", 0.2), ("test", 0.3)] + actual = splitter.DetectionSplit(source, splits) + len(actual.get_subset("train")) + + source, _ = self._generate_detection_dataset( + append_bbox=self._get_append_bbox("cvat"), + with_attr=True, + nimages=5, + ) + + with self.subTest("wrong ratio"): + with self.assertRaisesRegex(Exception, "in the range"): + splits = [("train", -0.5), ("test", 1.5)] + splitter.DetectionSplit(source, splits) + + with self.assertRaisesRegex(Exception, "Sum of ratios"): + splits = [("train", 0.5), ("test", 0.5), ("val", 0.5)] + splitter.DetectionSplit(source, splits) + + with self.subTest("wrong subset name"): + with self.assertRaisesRegex(Exception, "Subset name"): + splits = [("train_", 0.5), ("val", 0.2), ("test", 0.3)] + splitter.DetectionSplit(source, splits) diff --git a/testbed/openvinotoolkit__datumaro/tests/test_vgg_face2_format.py b/testbed/openvinotoolkit__datumaro/tests/test_vgg_face2_format.py new file mode 100644 index 0000000000000000000000000000000000000000..5c3bc02465a55b42ff9b2db4d1a3bf6f11eece40 --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/test_vgg_face2_format.py @@ -0,0 +1,99 @@ +import os.path as osp +from unittest import TestCase + +import numpy as np +from datumaro.components.extractor import Bbox, DatasetItem, Points +from datumaro.components.project import Dataset, Project +from datumaro.plugins.vgg_face2_format import (VggFace2Converter, + VggFace2Importer) +from datumaro.util.test_utils import TestDir, compare_datasets + + +class VggFace2FormatTest(TestCase): + def test_can_save_and_load(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id='1', subset='train', image=np.ones((8, 8, 3)), + annotations=[ + Bbox(0, 2, 4, 2), + Points([3.2, 3.12, 4.11, 3.2, 2.11, + 2.5, 3.5, 2.11, 3.8, 2.13]), + ] + ), + DatasetItem(id='2', subset='train', image=np.ones((10, 10, 3)), + annotations=[ + Points([4.23, 4.32, 5.34, 4.45, 3.54, + 3.56, 4.52, 3.51, 4.78, 3.34]), + ] + ), + DatasetItem(id='3', subset='val', image=np.ones((8, 8, 3))), + DatasetItem(id='4', subset='val', image=np.ones((10, 10, 3)), + annotations=[ + Bbox(0, 2, 4, 2), + Points([3.2, 3.12, 4.11, 3.2, 2.11, + 2.5, 3.5, 2.11, 3.8, 2.13]), + Bbox(2, 2, 1, 2), + Points([2.787, 2.898, 2.965, 2.79, 2.8, + 2.456, 2.81, 2.32, 2.89, 2.3]), + ] + ), + DatasetItem(id='5', subset='val', image=np.ones((8, 8, 3)), + annotations=[ + Bbox(2, 2, 2, 2), + ] + ), + ], categories=[]) + + with TestDir() as test_dir: + VggFace2Converter.convert(source_dataset, test_dir, save_images=True) + parsed_dataset = VggFace2Importer()(test_dir).make_dataset() + + compare_datasets(self, source_dataset, parsed_dataset) + + def test_can_save_dataset_with_no_subsets(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id='a/b/1', image=np.ones((8, 8, 3)), + annotations=[ + Bbox(0, 2, 4, 2), + Points([4.23, 4.32, 5.34, 4.45, 3.54, + 3.56, 4.52, 3.51, 4.78, 3.34]), + ] + ), + ], categories=[]) + + with TestDir() as test_dir: + VggFace2Converter.convert(source_dataset, test_dir, save_images=True) + parsed_dataset = VggFace2Importer()(test_dir).make_dataset() + + compare_datasets(self, source_dataset, parsed_dataset) + + +DUMMY_DATASET_DIR = osp.join(osp.dirname(__file__), 'assets', 'vgg_face2_dataset') + +class VggFace2ImporterTest(TestCase): + def test_can_detect(self): + self.assertTrue(VggFace2Importer.detect(DUMMY_DATASET_DIR)) + + def test_can_import(self): + expected_dataset = Dataset.from_iterable([ + DatasetItem(id='n000001/0001_01', subset='train', + image=np.ones((10, 15, 3)), + annotations=[ + Bbox(2, 2, 1, 2), + Points([2.787, 2.898, 2.965, 2.79, 2.8, + 2.456, 2.81, 2.32, 2.89, 2.3]), + ] + ), + DatasetItem(id='n000002/0002_01', subset='train', + image=np.ones((10, 15, 3)), + annotations=[ + Bbox(1, 3, 1, 1), + Points([1.2, 3.8, 1.8, 3.82, 1.51, + 3.634, 1.43, 3.34, 1.65, 3.32]) + ] + ), + ], categories=[]) + + dataset = Project.import_from(DUMMY_DATASET_DIR, 'vgg_face2') \ + .make_dataset() + + compare_datasets(self, expected_dataset, dataset) diff --git a/testbed/openvinotoolkit__datumaro/tests/test_voc_format.py b/testbed/openvinotoolkit__datumaro/tests/test_voc_format.py new file mode 100644 index 0000000000000000000000000000000000000000..b33aaa125f518bdc1a20ad7f6df0333a658b7ffc --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/test_voc_format.py @@ -0,0 +1,671 @@ +from collections import OrderedDict +from functools import partial +import numpy as np +import os.path as osp + +from unittest import TestCase + +from datumaro.components.extractor import (Extractor, DatasetItem, + AnnotationType, Label, Bbox, Mask, LabelCategories, +) +import datumaro.plugins.voc_format.format as VOC +from datumaro.plugins.voc_format.converter import ( + VocConverter, + VocClassificationConverter, + VocDetectionConverter, + VocLayoutConverter, + VocActionConverter, + VocSegmentationConverter, +) +from datumaro.plugins.voc_format.importer import VocImporter +from datumaro.components.project import Project +from datumaro.util.image import Image +from datumaro.util.test_utils import (TestDir, compare_datasets, + test_save_and_load) + + +class VocFormatTest(TestCase): + def test_colormap_generator(self): + reference = np.array([ + [ 0, 0, 0], + [128, 0, 0], + [ 0, 128, 0], + [128, 128, 0], + [ 0, 0, 128], + [128, 0, 128], + [ 0, 128, 128], + [128, 128, 128], + [ 64, 0, 0], + [192, 0, 0], + [ 64, 128, 0], + [192, 128, 0], + [ 64, 0, 128], + [192, 0, 128], + [ 64, 128, 128], + [192, 128, 128], + [ 0, 64, 0], + [128, 64, 0], + [ 0, 192, 0], + [128, 192, 0], + [ 0, 64, 128], + [224, 224, 192], # ignored + ]) + + self.assertTrue(np.array_equal(reference, list(VOC.VocColormap.values()))) + + def test_can_write_and_parse_labelmap(self): + src_label_map = VOC.make_voc_label_map() + src_label_map['qq'] = [None, ['part1', 'part2'], ['act1', 'act2']] + src_label_map['ww'] = [(10, 20, 30), [], ['act3']] + + with TestDir() as test_dir: + file_path = osp.join(test_dir, 'test.txt') + + VOC.write_label_map(file_path, src_label_map) + dst_label_map = VOC.parse_label_map(file_path) + + self.assertEqual(src_label_map, dst_label_map) + +class TestExtractorBase(Extractor): + def _label(self, voc_label): + return self.categories()[AnnotationType.label].find(voc_label)[0] + + def categories(self): + return VOC.make_voc_categories() + + +DUMMY_DATASET_DIR = osp.join(osp.dirname(__file__), 'assets', 'voc_dataset') + +class VocImportTest(TestCase): + def test_can_import(self): + class DstExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id='2007_000001', subset='train', + image=Image(path='2007_000001.jpg', size=(10, 20)), + annotations=[ + Label(self._label(l.name)) + for l in VOC.VocLabel if l.value % 2 == 1 + ] + [ + Bbox(1, 2, 2, 2, label=self._label('cat'), + attributes={ + 'pose': VOC.VocPose(1).name, + 'truncated': True, + 'difficult': False, + 'occluded': False, + }, + id=1, group=1, + ), + Bbox(4, 5, 2, 2, label=self._label('person'), + attributes={ + 'truncated': False, + 'difficult': False, + 'occluded': False, + **{ + a.name: a.value % 2 == 1 + for a in VOC.VocAction + } + }, + id=2, group=2, + ), + Bbox(5.5, 6, 2, 2, label=self._label( + VOC.VocBodyPart(1).name), + group=2 + ), + Mask(image=np.ones([5, 10]), + label=self._label(VOC.VocLabel(2).name), + group=1, + ), + ] + ), + DatasetItem(id='2007_000002', subset='test', + image=np.ones((10, 20, 3))), + ]) + + dataset = Project.import_from(DUMMY_DATASET_DIR, 'voc').make_dataset() + + compare_datasets(self, DstExtractor(), dataset) + + def test_can_detect_voc(self): + self.assertTrue(VocImporter.detect(DUMMY_DATASET_DIR)) + +class VocConverterTest(TestCase): + def _test_save_and_load(self, source_dataset, converter, test_dir, + target_dataset=None, importer_args=None): + return test_save_and_load(self, source_dataset, converter, test_dir, + importer='voc', + target_dataset=target_dataset, importer_args=importer_args) + + def test_can_save_voc_cls(self): + class TestExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id='a/0', subset='a', annotations=[ + Label(1), + Label(2), + Label(3), + ]), + + DatasetItem(id=1, subset='b', annotations=[ + Label(4), + ]), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(TestExtractor(), + partial(VocClassificationConverter.convert, label_map='voc'), + test_dir) + + def test_can_save_voc_det(self): + class TestExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id='a/1', subset='a', annotations=[ + Bbox(2, 3, 4, 5, label=2, + attributes={ 'occluded': True } + ), + Bbox(2, 3, 4, 5, label=3, + attributes={ 'truncated': True }, + ), + ]), + + DatasetItem(id=2, subset='b', annotations=[ + Bbox(5, 4, 6, 5, label=3, + attributes={ 'difficult': True }, + ), + ]), + ]) + + class DstExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id='a/1', subset='a', annotations=[ + Bbox(2, 3, 4, 5, label=2, id=1, group=1, + attributes={ + 'truncated': False, + 'difficult': False, + 'occluded': True, + } + ), + Bbox(2, 3, 4, 5, label=3, id=2, group=2, + attributes={ + 'truncated': True, + 'difficult': False, + 'occluded': False, + }, + ), + ]), + + DatasetItem(id=2, subset='b', annotations=[ + Bbox(5, 4, 6, 5, label=3, id=1, group=1, + attributes={ + 'truncated': False, + 'difficult': True, + 'occluded': False, + }, + ), + ]), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(TestExtractor(), + partial(VocDetectionConverter.convert, label_map='voc'), + test_dir, target_dataset=DstExtractor()) + + def test_can_save_voc_segm(self): + class TestExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id='a/b/1', subset='a', annotations=[ + # overlapping masks, the first should be truncated + # the second and third are different instances + Mask(image=np.array([[0, 0, 0, 1, 0]]), label=3, + z_order=3), + Mask(image=np.array([[0, 1, 1, 1, 0]]), label=4, + z_order=1), + Mask(image=np.array([[1, 1, 0, 0, 0]]), label=3, + z_order=2), + ]), + ]) + + class DstExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id='a/b/1', subset='a', annotations=[ + Mask(image=np.array([[0, 0, 1, 0, 0]]), label=4, + group=1), + Mask(image=np.array([[1, 1, 0, 0, 0]]), label=3, + group=2), + Mask(image=np.array([[0, 0, 0, 1, 0]]), label=3, + group=3), + ]), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(TestExtractor(), + partial(VocSegmentationConverter.convert, label_map='voc'), + test_dir, target_dataset=DstExtractor()) + + def test_can_save_voc_segm_unpainted(self): + class TestExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id=1, subset='a', annotations=[ + # overlapping masks, the first should be truncated + # the second and third are different instances + Mask(image=np.array([[0, 0, 0, 1, 0]]), label=3, + z_order=3), + Mask(image=np.array([[0, 1, 1, 1, 0]]), label=4, + z_order=1), + Mask(image=np.array([[1, 1, 0, 0, 0]]), label=3, + z_order=2), + ]), + ]) + + class DstExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id=1, subset='a', annotations=[ + Mask(image=np.array([[0, 0, 1, 0, 0]]), label=4, + group=1), + Mask(image=np.array([[1, 1, 0, 0, 0]]), label=3, + group=2), + Mask(image=np.array([[0, 0, 0, 1, 0]]), label=3, + group=3), + ]), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(TestExtractor(), + partial(VocSegmentationConverter.convert, + label_map='voc', apply_colormap=False), + test_dir, target_dataset=DstExtractor()) + + def test_can_save_voc_segm_with_many_instances(self): + def bit(x, y, shape): + mask = np.zeros(shape) + mask[y, x] = 1 + return mask + + class TestExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id=1, subset='a', annotations=[ + Mask(image=bit(x, y, shape=[10, 10]), + label=self._label(VOC.VocLabel(3).name), + z_order=10 * y + x + 1 + ) + for y in range(10) for x in range(10) + ]), + ]) + + class DstExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id=1, subset='a', annotations=[ + Mask(image=bit(x, y, shape=[10, 10]), + label=self._label(VOC.VocLabel(3).name), + group=10 * y + x + 1 + ) + for y in range(10) for x in range(10) + ]), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(TestExtractor(), + partial(VocSegmentationConverter.convert, label_map='voc'), + test_dir, target_dataset=DstExtractor()) + + def test_can_save_voc_layout(self): + class TestExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id='a/b/1', subset='a', annotations=[ + Bbox(2, 3, 4, 5, label=2, id=1, group=1, + attributes={ + 'pose': VOC.VocPose(1).name, + 'truncated': True, + 'difficult': False, + 'occluded': False, + } + ), + Bbox(2, 3, 1, 1, label=self._label( + VOC.VocBodyPart(1).name), group=1), + Bbox(5, 4, 3, 2, label=self._label( + VOC.VocBodyPart(2).name), group=1), + ]), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(TestExtractor(), + partial(VocLayoutConverter.convert, label_map='voc'), test_dir) + + def test_can_save_voc_action(self): + class TestExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id='a/b/1', subset='a', annotations=[ + Bbox(2, 3, 4, 5, label=2, + attributes={ + 'truncated': True, + VOC.VocAction(1).name: True, + VOC.VocAction(2).name: True, + } + ), + Bbox(5, 4, 3, 2, label=self._label('person'), + attributes={ + 'truncated': True, + VOC.VocAction(1).name: True, + VOC.VocAction(2).name: True, + } + ), + ]), + ]) + + class DstExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id='a/b/1', subset='a', annotations=[ + Bbox(2, 3, 4, 5, label=2, + id=1, group=1, attributes={ + 'truncated': True, + 'difficult': False, + 'occluded': False, + # no attributes here in the label categories + } + ), + Bbox(5, 4, 3, 2, label=self._label('person'), + id=2, group=2, attributes={ + 'truncated': True, + 'difficult': False, + 'occluded': False, + VOC.VocAction(1).name: True, + VOC.VocAction(2).name: True, + **{ + a.name: False for a in VOC.VocAction + if a.value not in {1, 2} + } + } + ), + ]), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(TestExtractor(), + partial(VocActionConverter.convert, + label_map='voc', allow_attributes=False), test_dir, + target_dataset=DstExtractor()) + + def test_can_save_dataset_with_no_subsets(self): + class TestExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id=1, annotations=[ + Label(2), + Label(3), + ]), + + DatasetItem(id=2, annotations=[ + Label(3), + ]), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(TestExtractor(), + partial(VocConverter.convert, label_map='voc'), test_dir) + + def test_can_save_dataset_with_images(self): + class TestExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id=1, subset='a', image=np.ones([4, 5, 3])), + DatasetItem(id=2, subset='a', image=np.ones([5, 4, 3])), + + DatasetItem(id=3, subset='b', image=np.ones([2, 6, 3])), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(TestExtractor(), + partial(VocConverter.convert, label_map='voc', save_images=True), + test_dir) + + def test_dataset_with_voc_labelmap(self): + class SrcExtractor(TestExtractorBase): + def __iter__(self): + yield DatasetItem(id=1, annotations=[ + Bbox(2, 3, 4, 5, label=self._label('cat'), id=1), + Bbox(1, 2, 3, 4, label=self._label('non_voc_label'), id=2), + ]) + + def categories(self): + label_cat = LabelCategories() + label_cat.add(VOC.VocLabel.cat.name) + label_cat.add('non_voc_label') + return { + AnnotationType.label: label_cat, + } + + class DstExtractor(TestExtractorBase): + def __iter__(self): + yield DatasetItem(id=1, annotations=[ + # drop non voc label + Bbox(2, 3, 4, 5, label=self._label('cat'), id=1, group=1, + attributes={ + 'truncated': False, + 'difficult': False, + 'occluded': False, + } + ), + ]) + + def categories(self): + return VOC.make_voc_categories() + + with TestDir() as test_dir: + self._test_save_and_load(SrcExtractor(), + partial(VocConverter.convert, label_map='voc'), + test_dir, target_dataset=DstExtractor()) + + def test_dataset_with_source_labelmap_undefined(self): + class SrcExtractor(TestExtractorBase): + def __iter__(self): + yield DatasetItem(id=1, annotations=[ + Bbox(2, 3, 4, 5, label=0, id=1), + Bbox(1, 2, 3, 4, label=1, id=2), + ]) + + def categories(self): + label_cat = LabelCategories() + label_cat.add('Label_1') + label_cat.add('label_2') + return { + AnnotationType.label: label_cat, + } + + class DstExtractor(TestExtractorBase): + def __iter__(self): + yield DatasetItem(id=1, annotations=[ + Bbox(2, 3, 4, 5, label=self._label('Label_1'), + id=1, group=1, attributes={ + 'truncated': False, + 'difficult': False, + 'occluded': False, + } + ), + Bbox(1, 2, 3, 4, label=self._label('label_2'), + id=2, group=2, attributes={ + 'truncated': False, + 'difficult': False, + 'occluded': False, + } + ), + ]) + + def categories(self): + label_map = OrderedDict() + label_map['background'] = [None, [], []] + label_map['Label_1'] = [None, [], []] + label_map['label_2'] = [None, [], []] + return VOC.make_voc_categories(label_map) + + with TestDir() as test_dir: + self._test_save_and_load(SrcExtractor(), + partial(VocConverter.convert, label_map='source'), + test_dir, target_dataset=DstExtractor()) + + def test_dataset_with_source_labelmap_defined(self): + class SrcExtractor(TestExtractorBase): + def __iter__(self): + yield DatasetItem(id=1, annotations=[ + Bbox(2, 3, 4, 5, label=0, id=1), + Bbox(1, 2, 3, 4, label=2, id=2), + ]) + + def categories(self): + label_map = OrderedDict() + label_map['label_1'] = [(1, 2, 3), [], []] + label_map['background'] = [(0, 0, 0), [], []] # can be not 0 + label_map['label_2'] = [(3, 2, 1), [], []] + return VOC.make_voc_categories(label_map) + + class DstExtractor(TestExtractorBase): + def __iter__(self): + yield DatasetItem(id=1, annotations=[ + Bbox(2, 3, 4, 5, label=self._label('label_1'), + id=1, group=1, attributes={ + 'truncated': False, + 'difficult': False, + 'occluded': False, + } + ), + Bbox(1, 2, 3, 4, label=self._label('label_2'), + id=2, group=2, attributes={ + 'truncated': False, + 'difficult': False, + 'occluded': False, + } + ), + ]) + + def categories(self): + label_map = OrderedDict() + label_map['background'] = [(0, 0, 0), [], []] + label_map['label_1'] = [(1, 2, 3), [], []] + label_map['label_2'] = [(3, 2, 1), [], []] + return VOC.make_voc_categories(label_map) + + with TestDir() as test_dir: + self._test_save_and_load(SrcExtractor(), + partial(VocConverter.convert, label_map='source'), + test_dir, target_dataset=DstExtractor()) + + def test_dataset_with_fixed_labelmap(self): + class SrcExtractor(TestExtractorBase): + def __iter__(self): + yield DatasetItem(id=1, annotations=[ + Bbox(2, 3, 4, 5, label=self._label('foreign_label'), id=1), + Bbox(1, 2, 3, 4, label=self._label('label'), id=2, group=2, + attributes={'act1': True}), + Bbox(2, 3, 4, 5, label=self._label('label_part1'), group=2), + Bbox(2, 3, 4, 6, label=self._label('label_part2'), group=2), + ]) + + def categories(self): + label_cat = LabelCategories() + label_cat.add('foreign_label') + label_cat.add('label', attributes=['act1', 'act2']) + label_cat.add('label_part1') + label_cat.add('label_part2') + return { + AnnotationType.label: label_cat, + } + + label_map = OrderedDict([ + ('label', [None, ['label_part1', 'label_part2'], ['act1', 'act2']]) + ]) + + dst_label_map = OrderedDict([ + ('background', [None, [], []]), + ('label', [None, ['label_part1', 'label_part2'], ['act1', 'act2']]) + ]) + + class DstExtractor(TestExtractorBase): + def __iter__(self): + yield DatasetItem(id=1, annotations=[ + Bbox(1, 2, 3, 4, label=self._label('label'), id=1, group=1, + attributes={ + 'act1': True, + 'act2': False, + 'truncated': False, + 'difficult': False, + 'occluded': False, + } + ), + Bbox(2, 3, 4, 5, label=self._label('label_part1'), group=1), + Bbox(2, 3, 4, 6, label=self._label('label_part2'), group=1), + ]) + + def categories(self): + return VOC.make_voc_categories(dst_label_map) + + with TestDir() as test_dir: + self._test_save_and_load(SrcExtractor(), + partial(VocConverter.convert, label_map=label_map), + test_dir, target_dataset=DstExtractor()) + + def test_can_save_dataset_with_image_info(self): + class TestExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id=1, image=Image(path='1.jpg', size=(10, 15))), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(TestExtractor(), + partial(VocConverter.convert, label_map='voc'), test_dir) + + def test_relative_paths(self): + class TestExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id='1', image=np.ones((4, 2, 3))), + DatasetItem(id='subdir1/1', image=np.ones((2, 6, 3))), + DatasetItem(id='subdir2/1', image=np.ones((5, 4, 3))), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(TestExtractor(), + partial(VocConverter.convert, + label_map='voc', save_images=True), + test_dir) + + def test_can_save_attributes(self): + class TestExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id='a', annotations=[ + Bbox(2, 3, 4, 5, label=2, + attributes={ 'occluded': True, 'x': 1, 'y': '2' } + ), + ]), + ]) + + class DstExtractor(TestExtractorBase): + def __iter__(self): + return iter([ + DatasetItem(id='a', annotations=[ + Bbox(2, 3, 4, 5, label=2, id=1, group=1, + attributes={ + 'truncated': False, + 'difficult': False, + 'occluded': True, + 'x': '1', 'y': '2', # can only read strings + } + ), + ]), + ]) + + with TestDir() as test_dir: + self._test_save_and_load(TestExtractor(), + partial(VocConverter.convert, label_map='voc'), test_dir, + target_dataset=DstExtractor()) diff --git a/testbed/openvinotoolkit__datumaro/tests/test_widerface_format.py b/testbed/openvinotoolkit__datumaro/tests/test_widerface_format.py new file mode 100644 index 0000000000000000000000000000000000000000..b3af94dda1cb64acd3d0ee03a3e9d24a81fbf74c --- /dev/null +++ b/testbed/openvinotoolkit__datumaro/tests/test_widerface_format.py @@ -0,0 +1,153 @@ +import os.path as osp +from unittest import TestCase + +import numpy as np +from datumaro.components.extractor import Bbox, DatasetItem +from datumaro.components.project import Dataset, Project +from datumaro.plugins.widerface_format import WiderFaceConverter, WiderFaceImporter +from datumaro.util.test_utils import TestDir, compare_datasets + + +class WiderFaceFormatTest(TestCase): + def test_can_save_and_load(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id='1', subset='train', image=np.ones((8, 8, 3)), + annotations=[ + Bbox(0, 2, 4, 2), + Bbox(0, 1, 2, 3, attributes = { + 'blur': 2, 'expression': 0, 'illumination': 0, + 'occluded': 0, 'pose': 2, 'invalid': 0}), + ] + ), + DatasetItem(id='2', subset='train', image=np.ones((10, 10, 3)), + annotations=[ + Bbox(0, 2, 4, 2, attributes = { + 'blur': 2, 'expression': 0, 'illumination': 1, + 'occluded': 0, 'pose': 1, 'invalid': 0}), + Bbox(3, 3, 2, 3, attributes = { + 'blur': 0, 'expression': 1, 'illumination': 0, + 'occluded': 0, 'pose': 2, 'invalid': 0}), + Bbox(2, 1, 2, 3, attributes = { + 'blur': 2, 'expression': 0, 'illumination': 0, + 'occluded': 0, 'pose': 0, 'invalid': 1}), + ] + ), + + DatasetItem(id='3', subset='val', image=np.ones((8, 8, 3)), + annotations=[ + Bbox(0, 1, 5, 2, attributes = { + 'blur': 2, 'expression': 1, 'illumination': 0, + 'occluded': 0, 'pose': 1, 'invalid': 0}), + Bbox(0, 2, 3, 2), + Bbox(0, 2, 4, 2), + Bbox(0, 7, 3, 2, attributes = { + 'blur': 2, 'expression': 1, 'illumination': 0, + 'occluded': 0, 'pose': 1, 'invalid': 0}), + ] + ), + + DatasetItem(id='4', subset='val', image=np.ones((8, 8, 3))), + ]) + + with TestDir() as test_dir: + WiderFaceConverter.convert(source_dataset, test_dir, save_images=True) + parsed_dataset = WiderFaceImporter()(test_dir).make_dataset() + + compare_datasets(self, source_dataset, parsed_dataset) + + def test_can_save_dataset_with_no_subsets(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id='a/b/1', image=np.ones((8, 8, 3)), + annotations=[ + Bbox(0, 2, 4, 2), + Bbox(0, 1, 2, 3, attributes = { + 'blur': 2, 'expression': 0, 'illumination': 0, + 'occluded': 0, 'pose': 2, 'invalid': 0}), + ] + ), + ]) + + with TestDir() as test_dir: + WiderFaceConverter.convert(source_dataset, test_dir, save_images=True) + parsed_dataset = WiderFaceImporter()(test_dir).make_dataset() + + compare_datasets(self, source_dataset, parsed_dataset) + + def test_can_save_dataset_with_non_widerface_attributes(self): + source_dataset = Dataset.from_iterable([ + DatasetItem(id='a/b/1', image=np.ones((8, 8, 3)), + annotations=[ + Bbox(0, 2, 4, 2), + Bbox(0, 1, 2, 3, attributes = { + 'non-widerface attribute': 0, + 'blur': 1, 'invalid': 1}), + Bbox(1, 1, 2, 2, attributes = { + 'non-widerface attribute': 0}), + ] + ), + ]) + + target_dataset = Dataset.from_iterable([ + DatasetItem(id='a/b/1', image=np.ones((8, 8, 3)), + annotations=[ + Bbox(0, 2, 4, 2), + Bbox(0, 1, 2, 3, attributes = { + 'blur': 1, 'invalid': 1}), + Bbox(1, 1, 2, 2), + ] + ), + ]) + + with TestDir() as test_dir: + WiderFaceConverter.convert(source_dataset, test_dir, save_images=True) + parsed_dataset = WiderFaceImporter()(test_dir).make_dataset() + + compare_datasets(self, target_dataset, parsed_dataset) + +DUMMY_DATASET_DIR = osp.join(osp.dirname(__file__), 'assets', 'widerface_dataset') + +class WiderFaceImporterTest(TestCase): + def test_can_detect(self): + self.assertTrue(WiderFaceImporter.detect(DUMMY_DATASET_DIR)) + + def test_can_import(self): + expected_dataset = Dataset.from_iterable([ + DatasetItem(id='0--Parade/0_Parade_image_01', subset='train', + image=np.ones((10, 15, 3)), + annotations=[ + Bbox(1, 2, 2, 2, attributes = { + 'blur': 0, 'expression': 0, 'illumination': 0, + 'occluded': 0, 'pose': 0, 'invalid': 0}), + ] + ), + DatasetItem(id='1--Handshaking/1_Handshaking_image_02', subset='train', + image=np.ones((10, 15, 3)), + annotations=[ + Bbox(1, 1, 2, 2, attributes = { + 'blur': 0, 'expression': 0, 'illumination': 1, + 'occluded': 0, 'pose': 0, 'invalid': 0}), + Bbox(5, 1, 2, 2, attributes = { + 'blur': 0, 'expression': 0, 'illumination': 1, + 'occluded': 0, 'pose': 0, 'invalid': 0}), + ] + ), + DatasetItem(id='0--Parade/0_Parade_image_03', subset='val', + image=np.ones((10, 15, 3)), + annotations=[ + Bbox(0, 0, 1, 1, attributes = { + 'blur': 2, 'expression': 0, 'illumination': 0, + 'occluded': 0, 'pose': 2, 'invalid': 0}), + Bbox(3, 2, 1, 2, attributes = { + 'blur': 0, 'expression': 0, 'illumination': 0, + 'occluded': 1, 'pose': 0, 'invalid': 0}), + Bbox(5, 6, 1, 1, attributes = { + 'blur': 2, 'expression': 0, 'illumination': 0, + 'occluded': 0, 'pose': 2, 'invalid': 0}), + ] + ), + ]) + + dataset = Project.import_from(DUMMY_DATASET_DIR, 'wider_face') \ + .make_dataset() + + compare_datasets(self, expected_dataset, dataset) diff --git a/testbed/pallets__flask/.github/ISSUE_TEMPLATE/bug-report.md b/testbed/pallets__flask/.github/ISSUE_TEMPLATE/bug-report.md new file mode 100644 index 0000000000000000000000000000000000000000..c2a15eeee8ddaa277247311d578119f3a2771c5b --- /dev/null +++ b/testbed/pallets__flask/.github/ISSUE_TEMPLATE/bug-report.md @@ -0,0 +1,27 @@ +--- +name: Bug report +about: Report a bug in Flask (not other projects which depend on Flask) +--- + + + + + + + +Environment: + +- Python version: +- Flask version: diff --git a/testbed/pallets__flask/.github/ISSUE_TEMPLATE/config.yml b/testbed/pallets__flask/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..abe3915622b4a599c8a422aff698654098cdb93c --- /dev/null +++ b/testbed/pallets__flask/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,11 @@ +blank_issues_enabled: false +contact_links: + - name: Security issue + url: security@palletsprojects.com + about: Do not report security issues publicly. Email our security contact. + - name: Questions + url: https://stackoverflow.com/questions/tagged/flask?tab=Frequent + about: Search for and ask questions about your code on Stack Overflow. + - name: Questions and discussions + url: https://discord.gg/pallets + about: Discuss questions about your code on our Discord chat. diff --git a/testbed/pallets__flask/.github/ISSUE_TEMPLATE/feature-request.md b/testbed/pallets__flask/.github/ISSUE_TEMPLATE/feature-request.md new file mode 100644 index 0000000000000000000000000000000000000000..52c2aed416c35b7bdeef1793a6a9fb2c4482682e --- /dev/null +++ b/testbed/pallets__flask/.github/ISSUE_TEMPLATE/feature-request.md @@ -0,0 +1,15 @@ +--- +name: Feature request +about: Suggest a new feature for Flask +--- + + + + diff --git a/testbed/pallets__flask/.github/SECURITY.md b/testbed/pallets__flask/.github/SECURITY.md new file mode 100644 index 0000000000000000000000000000000000000000..fcfac71bfcb33c0a53b3a30673f0c38fbad8c3cb --- /dev/null +++ b/testbed/pallets__flask/.github/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +If you believe you have identified a security issue with a Pallets +project, **do not open a public issue**. To responsibly report a +security issue, please email security@palletsprojects.com. A security +team member will contact you acknowledging the report and how to +continue. + +Be sure to include as much detail as necessary in your report. As with +reporting normal issues, a minimal reproducible example will help the +maintainers address the issue faster. If you are able, you may also +include a fix for the issue generated with `git format-patch`. + +The current and previous release will receive security patches, with +older versions evaluated based on usage information and severity. + +After fixing an issue, we will make a security release along with an +announcement on our blog. We may obtain a CVE id as well. You may +include a name and link if you would like to be credited for the report. diff --git a/testbed/pallets__flask/.github/dependabot.yml b/testbed/pallets__flask/.github/dependabot.yml new file mode 100644 index 0000000000000000000000000000000000000000..90f94bc32b917f4902e37ebf40553590c3b3a521 --- /dev/null +++ b/testbed/pallets__flask/.github/dependabot.yml @@ -0,0 +1,9 @@ +version: 2 +updates: +- package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "monthly" + day: "monday" + time: "16:00" + timezone: "UTC" diff --git a/testbed/pallets__flask/.github/pull_request_template.md b/testbed/pallets__flask/.github/pull_request_template.md new file mode 100644 index 0000000000000000000000000000000000000000..29fd35f8557158826f728f846f4c631a18469400 --- /dev/null +++ b/testbed/pallets__flask/.github/pull_request_template.md @@ -0,0 +1,30 @@ + + + + +- fixes # + + + +Checklist: + +- [ ] Add tests that demonstrate the correct behavior of the change. Tests should fail without the change. +- [ ] Add or update relevant docs, in the docs folder and in code. +- [ ] Add an entry in `CHANGES.rst` summarizing the change and linking to the issue. +- [ ] Add `.. versionchanged::` entries in any relevant code docs. +- [ ] Run `pre-commit` hooks and fix any issues. +- [ ] Run `pytest` and `tox`, no tests failed. diff --git a/testbed/pallets__flask/.github/workflows/lock.yaml b/testbed/pallets__flask/.github/workflows/lock.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b4f763387092883b68eeb5d77f74a396db3b9453 --- /dev/null +++ b/testbed/pallets__flask/.github/workflows/lock.yaml @@ -0,0 +1,15 @@ +name: 'Lock threads' + +on: + schedule: + - cron: '0 0 * * *' + +jobs: + lock: + runs-on: ubuntu-latest + steps: + - uses: dessant/lock-threads@v3 + with: + github-token: ${{ github.token }} + issue-inactive-days: 14 + pr-inactive-days: 14 diff --git a/testbed/pallets__flask/.github/workflows/tests.yaml b/testbed/pallets__flask/.github/workflows/tests.yaml new file mode 100644 index 0000000000000000000000000000000000000000..733676b48d787fab80e410ca6cf05cf15d486c69 --- /dev/null +++ b/testbed/pallets__flask/.github/workflows/tests.yaml @@ -0,0 +1,51 @@ +name: Tests +on: + push: + branches: + - main + - '*.x' + paths-ignore: + - 'docs/**' + - '*.md' + - '*.rst' + pull_request: + branches: + - main + - '*.x' + paths-ignore: + - 'docs/**' + - '*.md' + - '*.rst' +jobs: + tests: + name: ${{ matrix.name }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + include: + - {name: Linux, python: '3.10', os: ubuntu-latest, tox: py310} + - {name: Windows, python: '3.10', os: windows-latest, tox: py310} + - {name: Mac, python: '3.10', os: macos-latest, tox: py310} + - {name: '3.11-dev', python: '3.11-dev', os: ubuntu-latest, tox: py311} + - {name: '3.9', python: '3.9', os: ubuntu-latest, tox: py39} + - {name: '3.8', python: '3.8', os: ubuntu-latest, tox: py38} + - {name: '3.7', python: '3.7', os: ubuntu-latest, tox: py37} + - {name: 'PyPy', python: 'pypy-3.7', os: ubuntu-latest, tox: pypy37} + - {name: 'Pallets Minimum Versions', python: '3.10', os: ubuntu-latest, tox: py-min} + - {name: 'Pallets Development Versions', python: '3.7', os: ubuntu-latest, tox: py-dev} + - {name: Typing, python: '3.10', os: ubuntu-latest, tox: typing} + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python }} + cache: 'pip' + cache-dependency-path: 'requirements/*.txt' + - name: update pip + run: | + pip install -U wheel + pip install -U setuptools + python -m pip install -U pip + - run: pip install tox + - run: tox -e ${{ matrix.tox }} diff --git a/testbed/pallets__flask/docs/Makefile b/testbed/pallets__flask/docs/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..d4bb2cbb9eddb1bb1b4f366623044af8e4830919 --- /dev/null +++ b/testbed/pallets__flask/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/testbed/pallets__flask/docs/advanced_foreword.rst b/testbed/pallets__flask/docs/advanced_foreword.rst new file mode 100644 index 0000000000000000000000000000000000000000..9c36158a8df6604d5a014f9b06bc7483a8133901 --- /dev/null +++ b/testbed/pallets__flask/docs/advanced_foreword.rst @@ -0,0 +1,46 @@ +Foreword for Experienced Programmers +==================================== + +Thread-Locals in Flask +---------------------- + +One of the design decisions in Flask was that simple tasks should be simple; +they should not take a lot of code and yet they should not limit you. Because +of that, Flask has a few design choices that some people might find +surprising or unorthodox. For example, Flask uses thread-local objects +internally so that you don’t have to pass objects around from +function to function within a request in order to stay threadsafe. +This approach is convenient, but requires a valid +request context for dependency injection or when attempting to reuse code which +uses a value pegged to the request. The Flask project is honest about +thread-locals, does not hide them, and calls out in the code and documentation +where they are used. + +Develop for the Web with Caution +-------------------------------- + +Always keep security in mind when building web applications. + +If you write a web application, you are probably allowing users to register +and leave their data on your server. The users are entrusting you with data. +And even if you are the only user that might leave data in your application, +you still want that data to be stored securely. + +Unfortunately, there are many ways the security of a web application can be +compromised. Flask protects you against one of the most common security +problems of modern web applications: cross-site scripting (XSS). Unless you +deliberately mark insecure HTML as secure, Flask and the underlying Jinja2 +template engine have you covered. But there are many more ways to cause +security problems. + +The documentation will warn you about aspects of web development that require +attention to security. Some of these security concerns are far more complex +than one might think, and we all sometimes underestimate the likelihood that a +vulnerability will be exploited - until a clever attacker figures out a way to +exploit our applications. And don't think that your application is not +important enough to attract an attacker. Depending on the kind of attack, +chances are that automated bots are probing for ways to fill your database with +spam, links to malicious software, and the like. + +Flask is no different from any other framework in that you the developer must +build with caution, watching for exploits when building to your requirements. diff --git a/testbed/pallets__flask/docs/api.rst b/testbed/pallets__flask/docs/api.rst new file mode 100644 index 0000000000000000000000000000000000000000..b3cffde26f44af3ee6e858b2502f6db178fbe9e0 --- /dev/null +++ b/testbed/pallets__flask/docs/api.rst @@ -0,0 +1,773 @@ +API +=== + +.. module:: flask + +This part of the documentation covers all the interfaces of Flask. For +parts where Flask depends on external libraries, we document the most +important right here and provide links to the canonical documentation. + + +Application Object +------------------ + +.. autoclass:: Flask + :members: + :inherited-members: + + +Blueprint Objects +----------------- + +.. autoclass:: Blueprint + :members: + :inherited-members: + +Incoming Request Data +--------------------- + +.. autoclass:: Request + :members: + :inherited-members: + :exclude-members: json_module + +.. attribute:: request + + To access incoming request data, you can use the global `request` + object. Flask parses incoming request data for you and gives you + access to it through that global object. Internally Flask makes + sure that you always get the correct data for the active thread if you + are in a multithreaded environment. + + This is a proxy. See :ref:`notes-on-proxies` for more information. + + The request object is an instance of a :class:`~flask.Request`. + + +Response Objects +---------------- + +.. autoclass:: flask.Response + :members: + :inherited-members: + :exclude-members: json_module + +Sessions +-------- + +If you have set :attr:`Flask.secret_key` (or configured it from +:data:`SECRET_KEY`) you can use sessions in Flask applications. A session makes +it possible to remember information from one request to another. The way Flask +does this is by using a signed cookie. The user can look at the session +contents, but can't modify it unless they know the secret key, so make sure to +set that to something complex and unguessable. + +To access the current session you can use the :class:`session` object: + +.. class:: session + + The session object works pretty much like an ordinary dict, with the + difference that it keeps track of modifications. + + This is a proxy. See :ref:`notes-on-proxies` for more information. + + The following attributes are interesting: + + .. attribute:: new + + ``True`` if the session is new, ``False`` otherwise. + + .. attribute:: modified + + ``True`` if the session object detected a modification. Be advised + that modifications on mutable structures are not picked up + automatically, in that situation you have to explicitly set the + attribute to ``True`` yourself. Here an example:: + + # this change is not picked up because a mutable object (here + # a list) is changed. + session['objects'].append(42) + # so mark it as modified yourself + session.modified = True + + .. attribute:: permanent + + If set to ``True`` the session lives for + :attr:`~flask.Flask.permanent_session_lifetime` seconds. The + default is 31 days. If set to ``False`` (which is the default) the + session will be deleted when the user closes the browser. + + +Session Interface +----------------- + +.. versionadded:: 0.8 + +The session interface provides a simple way to replace the session +implementation that Flask is using. + +.. currentmodule:: flask.sessions + +.. autoclass:: SessionInterface + :members: + +.. autoclass:: SecureCookieSessionInterface + :members: + +.. autoclass:: SecureCookieSession + :members: + +.. autoclass:: NullSession + :members: + +.. autoclass:: SessionMixin + :members: + +.. admonition:: Notice + + The ``PERMANENT_SESSION_LIFETIME`` config key can also be an integer + starting with Flask 0.8. Either catch this down yourself or use + the :attr:`~flask.Flask.permanent_session_lifetime` attribute on the + app which converts the result to an integer automatically. + + +Test Client +----------- + +.. currentmodule:: flask.testing + +.. autoclass:: FlaskClient + :members: + + +Test CLI Runner +--------------- + +.. currentmodule:: flask.testing + +.. autoclass:: FlaskCliRunner + :members: + + +Application Globals +------------------- + +.. currentmodule:: flask + +To share data that is valid for one request only from one function to +another, a global variable is not good enough because it would break in +threaded environments. Flask provides you with a special object that +ensures it is only valid for the active request and that will return +different values for each request. In a nutshell: it does the right +thing, like it does for :class:`request` and :class:`session`. + +.. data:: g + + A namespace object that can store data during an + :doc:`application context `. This is an instance of + :attr:`Flask.app_ctx_globals_class`, which defaults to + :class:`ctx._AppCtxGlobals`. + + This is a good place to store resources during a request. For + example, a ``before_request`` function could load a user object from + a session id, then set ``g.user`` to be used in the view function. + + This is a proxy. See :ref:`notes-on-proxies` for more information. + + .. versionchanged:: 0.10 + Bound to the application context instead of the request context. + +.. autoclass:: flask.ctx._AppCtxGlobals + :members: + + +Useful Functions and Classes +---------------------------- + +.. data:: current_app + + A proxy to the application handling the current request. This is + useful to access the application without needing to import it, or if + it can't be imported, such as when using the application factory + pattern or in blueprints and extensions. + + This is only available when an + :doc:`application context ` is pushed. This happens + automatically during requests and CLI commands. It can be controlled + manually with :meth:`~flask.Flask.app_context`. + + This is a proxy. See :ref:`notes-on-proxies` for more information. + +.. autofunction:: has_request_context + +.. autofunction:: copy_current_request_context + +.. autofunction:: has_app_context + +.. autofunction:: url_for + +.. autofunction:: abort + +.. autofunction:: redirect + +.. autofunction:: make_response + +.. autofunction:: after_this_request + +.. autofunction:: send_file + +.. autofunction:: send_from_directory + +.. autofunction:: escape + +.. autoclass:: Markup + :members: escape, unescape, striptags + +Message Flashing +---------------- + +.. autofunction:: flash + +.. autofunction:: get_flashed_messages + + +JSON Support +------------ + +.. module:: flask.json + +Flask uses the built-in :mod:`json` module for handling JSON. It will +use the current blueprint's or application's JSON encoder and decoder +for easier customization. By default it handles some extra data types: + +- :class:`datetime.datetime` and :class:`datetime.date` are serialized + to :rfc:`822` strings. This is the same as the HTTP date format. +- :class:`uuid.UUID` is serialized to a string. +- :class:`dataclasses.dataclass` is passed to + :func:`dataclasses.asdict`. +- :class:`~markupsafe.Markup` (or any object with a ``__html__`` + method) will call the ``__html__`` method to get a string. + +Jinja's ``|tojson`` filter is configured to use Flask's :func:`dumps` +function. The filter marks the output with ``|safe`` automatically. Use +the filter to render data inside `` + +.. autofunction:: jsonify + +.. autofunction:: dumps + +.. autofunction:: dump + +.. autofunction:: loads + +.. autofunction:: load + +.. autoclass:: JSONEncoder + :members: + +.. autoclass:: JSONDecoder + :members: + +.. automodule:: flask.json.tag + + +Template Rendering +------------------ + +.. currentmodule:: flask + +.. autofunction:: render_template + +.. autofunction:: render_template_string + +.. autofunction:: get_template_attribute + +Configuration +------------- + +.. autoclass:: Config + :members: + + +Stream Helpers +-------------- + +.. autofunction:: stream_with_context + +Useful Internals +---------------- + +.. autoclass:: flask.ctx.RequestContext + :members: + +.. data:: _request_ctx_stack + + The internal :class:`~werkzeug.local.LocalStack` that holds + :class:`~flask.ctx.RequestContext` instances. Typically, the + :data:`request` and :data:`session` proxies should be accessed + instead of the stack. It may be useful to access the stack in + extension code. + + The following attributes are always present on each layer of the + stack: + + `app` + the active Flask application. + + `url_adapter` + the URL adapter that was used to match the request. + + `request` + the current request object. + + `session` + the active session object. + + `g` + an object with all the attributes of the :data:`flask.g` object. + + `flashes` + an internal cache for the flashed messages. + + Example usage:: + + from flask import _request_ctx_stack + + def get_session(): + ctx = _request_ctx_stack.top + if ctx is not None: + return ctx.session + +.. autoclass:: flask.ctx.AppContext + :members: + +.. data:: _app_ctx_stack + + The internal :class:`~werkzeug.local.LocalStack` that holds + :class:`~flask.ctx.AppContext` instances. Typically, the + :data:`current_app` and :data:`g` proxies should be accessed instead + of the stack. Extensions can access the contexts on the stack as a + namespace to store data. + + .. versionadded:: 0.9 + +.. autoclass:: flask.blueprints.BlueprintSetupState + :members: + +.. _core-signals-list: + +Signals +------- + +.. versionadded:: 0.6 + +.. data:: signals.signals_available + + ``True`` if the signaling system is available. This is the case + when `blinker`_ is installed. + +The following signals exist in Flask: + +.. data:: template_rendered + + This signal is sent when a template was successfully rendered. The + signal is invoked with the instance of the template as `template` + and the context as dictionary (named `context`). + + Example subscriber:: + + def log_template_renders(sender, template, context, **extra): + sender.logger.debug('Rendering template "%s" with context %s', + template.name or 'string template', + context) + + from flask import template_rendered + template_rendered.connect(log_template_renders, app) + +.. data:: flask.before_render_template + :noindex: + + This signal is sent before template rendering process. The + signal is invoked with the instance of the template as `template` + and the context as dictionary (named `context`). + + Example subscriber:: + + def log_template_renders(sender, template, context, **extra): + sender.logger.debug('Rendering template "%s" with context %s', + template.name or 'string template', + context) + + from flask import before_render_template + before_render_template.connect(log_template_renders, app) + +.. data:: request_started + + This signal is sent when the request context is set up, before + any request processing happens. Because the request context is already + bound, the subscriber can access the request with the standard global + proxies such as :class:`~flask.request`. + + Example subscriber:: + + def log_request(sender, **extra): + sender.logger.debug('Request context is set up') + + from flask import request_started + request_started.connect(log_request, app) + +.. data:: request_finished + + This signal is sent right before the response is sent to the client. + It is passed the response to be sent named `response`. + + Example subscriber:: + + def log_response(sender, response, **extra): + sender.logger.debug('Request context is about to close down. ' + 'Response: %s', response) + + from flask import request_finished + request_finished.connect(log_response, app) + +.. data:: got_request_exception + + This signal is sent when an unhandled exception happens during + request processing, including when debugging. The exception is + passed to the subscriber as ``exception``. + + This signal is not sent for + :exc:`~werkzeug.exceptions.HTTPException`, or other exceptions that + have error handlers registered, unless the exception was raised from + an error handler. + + This example shows how to do some extra logging if a theoretical + ``SecurityException`` was raised: + + .. code-block:: python + + from flask import got_request_exception + + def log_security_exception(sender, exception, **extra): + if not isinstance(exception, SecurityException): + return + + security_logger.exception( + f"SecurityException at {request.url!r}", + exc_info=exception, + ) + + got_request_exception.connect(log_security_exception, app) + +.. data:: request_tearing_down + + This signal is sent when the request is tearing down. This is always + called, even if an exception is caused. Currently functions listening + to this signal are called after the regular teardown handlers, but this + is not something you can rely on. + + Example subscriber:: + + def close_db_connection(sender, **extra): + session.close() + + from flask import request_tearing_down + request_tearing_down.connect(close_db_connection, app) + + As of Flask 0.9, this will also be passed an `exc` keyword argument + that has a reference to the exception that caused the teardown if + there was one. + +.. data:: appcontext_tearing_down + + This signal is sent when the app context is tearing down. This is always + called, even if an exception is caused. Currently functions listening + to this signal are called after the regular teardown handlers, but this + is not something you can rely on. + + Example subscriber:: + + def close_db_connection(sender, **extra): + session.close() + + from flask import appcontext_tearing_down + appcontext_tearing_down.connect(close_db_connection, app) + + This will also be passed an `exc` keyword argument that has a reference + to the exception that caused the teardown if there was one. + +.. data:: appcontext_pushed + + This signal is sent when an application context is pushed. The sender + is the application. This is usually useful for unittests in order to + temporarily hook in information. For instance it can be used to + set a resource early onto the `g` object. + + Example usage:: + + from contextlib import contextmanager + from flask import appcontext_pushed + + @contextmanager + def user_set(app, user): + def handler(sender, **kwargs): + g.user = user + with appcontext_pushed.connected_to(handler, app): + yield + + And in the testcode:: + + def test_user_me(self): + with user_set(app, 'john'): + c = app.test_client() + resp = c.get('/users/me') + assert resp.data == 'username=john' + + .. versionadded:: 0.10 + +.. data:: appcontext_popped + + This signal is sent when an application context is popped. The sender + is the application. This usually falls in line with the + :data:`appcontext_tearing_down` signal. + + .. versionadded:: 0.10 + + +.. data:: message_flashed + + This signal is sent when the application is flashing a message. The + messages is sent as `message` keyword argument and the category as + `category`. + + Example subscriber:: + + recorded = [] + def record(sender, message, category, **extra): + recorded.append((message, category)) + + from flask import message_flashed + message_flashed.connect(record, app) + + .. versionadded:: 0.10 + +.. class:: signals.Namespace + + An alias for :class:`blinker.base.Namespace` if blinker is available, + otherwise a dummy class that creates fake signals. This class is + available for Flask extensions that want to provide the same fallback + system as Flask itself. + + .. method:: signal(name, doc=None) + + Creates a new signal for this namespace if blinker is available, + otherwise returns a fake signal that has a send method that will + do nothing but will fail with a :exc:`RuntimeError` for all other + operations, including connecting. + + +.. _blinker: https://pypi.org/project/blinker/ + + +Class-Based Views +----------------- + +.. versionadded:: 0.7 + +.. currentmodule:: None + +.. autoclass:: flask.views.View + :members: + +.. autoclass:: flask.views.MethodView + :members: + +.. _url-route-registrations: + +URL Route Registrations +----------------------- + +Generally there are three ways to define rules for the routing system: + +1. You can use the :meth:`flask.Flask.route` decorator. +2. You can use the :meth:`flask.Flask.add_url_rule` function. +3. You can directly access the underlying Werkzeug routing system + which is exposed as :attr:`flask.Flask.url_map`. + +Variable parts in the route can be specified with angular brackets +(``/user/``). By default a variable part in the URL accepts any +string without a slash however a different converter can be specified as +well by using ````. + +Variable parts are passed to the view function as keyword arguments. + +The following converters are available: + +=========== =============================================== +`string` accepts any text without a slash (the default) +`int` accepts integers +`float` like `int` but for floating point values +`path` like the default but also accepts slashes +`any` matches one of the items provided +`uuid` accepts UUID strings +=========== =============================================== + +Custom converters can be defined using :attr:`flask.Flask.url_map`. + +Here are some examples:: + + @app.route('/') + def index(): + pass + + @app.route('/') + def show_user(username): + pass + + @app.route('/post/') + def show_post(post_id): + pass + +An important detail to keep in mind is how Flask deals with trailing +slashes. The idea is to keep each URL unique so the following rules +apply: + +1. If a rule ends with a slash and is requested without a slash by the + user, the user is automatically redirected to the same page with a + trailing slash attached. +2. If a rule does not end with a trailing slash and the user requests the + page with a trailing slash, a 404 not found is raised. + +This is consistent with how web servers deal with static files. This +also makes it possible to use relative link targets safely. + +You can also define multiple rules for the same function. They have to be +unique however. Defaults can also be specified. Here for example is a +definition for a URL that accepts an optional page:: + + @app.route('/users/', defaults={'page': 1}) + @app.route('/users/page/') + def show_users(page): + pass + +This specifies that ``/users/`` will be the URL for page one and +``/users/page/N`` will be the URL for page ``N``. + +If a URL contains a default value, it will be redirected to its simpler +form with a 301 redirect. In the above example, ``/users/page/1`` will +be redirected to ``/users/``. If your route handles ``GET`` and ``POST`` +requests, make sure the default route only handles ``GET``, as redirects +can't preserve form data. :: + + @app.route('/region/', defaults={'id': 1}) + @app.route('/region/', methods=['GET', 'POST']) + def region(id): + pass + +Here are the parameters that :meth:`~flask.Flask.route` and +:meth:`~flask.Flask.add_url_rule` accept. The only difference is that +with the route parameter the view function is defined with the decorator +instead of the `view_func` parameter. + +=============== ========================================================== +`rule` the URL rule as string +`endpoint` the endpoint for the registered URL rule. Flask itself + assumes that the name of the view function is the name + of the endpoint if not explicitly stated. +`view_func` the function to call when serving a request to the + provided endpoint. If this is not provided one can + specify the function later by storing it in the + :attr:`~flask.Flask.view_functions` dictionary with the + endpoint as key. +`defaults` A dictionary with defaults for this rule. See the + example above for how defaults work. +`subdomain` specifies the rule for the subdomain in case subdomain + matching is in use. If not specified the default + subdomain is assumed. +`**options` the options to be forwarded to the underlying + :class:`~werkzeug.routing.Rule` object. A change to + Werkzeug is handling of method options. methods is a list + of methods this rule should be limited to (``GET``, ``POST`` + etc.). By default a rule just listens for ``GET`` (and + implicitly ``HEAD``). Starting with Flask 0.6, ``OPTIONS`` is + implicitly added and handled by the standard request + handling. They have to be specified as keyword arguments. +=============== ========================================================== + + +View Function Options +--------------------- + +For internal usage the view functions can have some attributes attached to +customize behavior the view function would normally not have control over. +The following attributes can be provided optionally to either override +some defaults to :meth:`~flask.Flask.add_url_rule` or general behavior: + +- `__name__`: The name of a function is by default used as endpoint. If + endpoint is provided explicitly this value is used. Additionally this + will be prefixed with the name of the blueprint by default which + cannot be customized from the function itself. + +- `methods`: If methods are not provided when the URL rule is added, + Flask will look on the view function object itself if a `methods` + attribute exists. If it does, it will pull the information for the + methods from there. + +- `provide_automatic_options`: if this attribute is set Flask will + either force enable or disable the automatic implementation of the + HTTP ``OPTIONS`` response. This can be useful when working with + decorators that want to customize the ``OPTIONS`` response on a per-view + basis. + +- `required_methods`: if this attribute is set, Flask will always add + these methods when registering a URL rule even if the methods were + explicitly overridden in the ``route()`` call. + +Full example:: + + def index(): + if request.method == 'OPTIONS': + # custom options handling here + ... + return 'Hello World!' + index.provide_automatic_options = False + index.methods = ['GET', 'OPTIONS'] + + app.add_url_rule('/', index) + +.. versionadded:: 0.8 + The `provide_automatic_options` functionality was added. + +Command Line Interface +---------------------- + +.. currentmodule:: flask.cli + +.. autoclass:: FlaskGroup + :members: + +.. autoclass:: AppGroup + :members: + +.. autoclass:: ScriptInfo + :members: + +.. autofunction:: load_dotenv + +.. autofunction:: with_appcontext + +.. autofunction:: pass_script_info + + Marks a function so that an instance of :class:`ScriptInfo` is passed + as first argument to the click callback. + +.. autodata:: run_command + +.. autodata:: shell_command diff --git a/testbed/pallets__flask/docs/appcontext.rst b/testbed/pallets__flask/docs/appcontext.rst new file mode 100644 index 0000000000000000000000000000000000000000..b214f254e6ddac714a581207c8251c4be8dcd531 --- /dev/null +++ b/testbed/pallets__flask/docs/appcontext.rst @@ -0,0 +1,157 @@ +.. currentmodule:: flask + +The Application Context +======================= + +The application context keeps track of the application-level data during +a request, CLI command, or other activity. Rather than passing the +application around to each function, the :data:`current_app` and +:data:`g` proxies are accessed instead. + +This is similar to :doc:`/reqcontext`, which keeps track of +request-level data during a request. A corresponding application context +is pushed when a request context is pushed. + +Purpose of the Context +---------------------- + +The :class:`Flask` application object has attributes, such as +:attr:`~Flask.config`, that are useful to access within views and +:doc:`CLI commands `. However, importing the ``app`` instance +within the modules in your project is prone to circular import issues. +When using the :doc:`app factory pattern ` or +writing reusable :doc:`blueprints ` or +:doc:`extensions ` there won't be an ``app`` instance to +import at all. + +Flask solves this issue with the *application context*. Rather than +referring to an ``app`` directly, you use the :data:`current_app` +proxy, which points to the application handling the current activity. + +Flask automatically *pushes* an application context when handling a +request. View functions, error handlers, and other functions that run +during a request will have access to :data:`current_app`. + +Flask will also automatically push an app context when running CLI +commands registered with :attr:`Flask.cli` using ``@app.cli.command()``. + + +Lifetime of the Context +----------------------- + +The application context is created and destroyed as necessary. When a +Flask application begins handling a request, it pushes an application +context and a :doc:`request context `. When the request +ends it pops the request context then the application context. +Typically, an application context will have the same lifetime as a +request. + +See :doc:`/reqcontext` for more information about how the contexts work +and the full life cycle of a request. + + +Manually Push a Context +----------------------- + +If you try to access :data:`current_app`, or anything that uses it, +outside an application context, you'll get this error message: + +.. code-block:: pytb + + RuntimeError: Working outside of application context. + + This typically means that you attempted to use functionality that + needed to interface with the current application object in some way. + To solve this, set up an application context with app.app_context(). + +If you see that error while configuring your application, such as when +initializing an extension, you can push a context manually since you +have direct access to the ``app``. Use :meth:`~Flask.app_context` in a +``with`` block, and everything that runs in the block will have access +to :data:`current_app`. :: + + def create_app(): + app = Flask(__name__) + + with app.app_context(): + init_db() + + return app + +If you see that error somewhere else in your code not related to +configuring the application, it most likely indicates that you should +move that code into a view function or CLI command. + + +Storing Data +------------ + +The application context is a good place to store common data during a +request or CLI command. Flask provides the :data:`g object ` for this +purpose. It is a simple namespace object that has the same lifetime as +an application context. + +.. note:: + The ``g`` name stands for "global", but that is referring to the + data being global *within a context*. The data on ``g`` is lost + after the context ends, and it is not an appropriate place to store + data between requests. Use the :data:`session` or a database to + store data across requests. + +A common use for :data:`g` is to manage resources during a request. + +1. ``get_X()`` creates resource ``X`` if it does not exist, caching it + as ``g.X``. +2. ``teardown_X()`` closes or otherwise deallocates the resource if it + exists. It is registered as a :meth:`~Flask.teardown_appcontext` + handler. + +For example, you can manage a database connection using this pattern:: + + from flask import g + + def get_db(): + if 'db' not in g: + g.db = connect_to_database() + + return g.db + + @app.teardown_appcontext + def teardown_db(exception): + db = g.pop('db', None) + + if db is not None: + db.close() + +During a request, every call to ``get_db()`` will return the same +connection, and it will be closed automatically at the end of the +request. + +You can use :class:`~werkzeug.local.LocalProxy` to make a new context +local from ``get_db()``:: + + from werkzeug.local import LocalProxy + db = LocalProxy(get_db) + +Accessing ``db`` will call ``get_db`` internally, in the same way that +:data:`current_app` works. + +---- + +If you're writing an extension, :data:`g` should be reserved for user +code. You may store internal data on the context itself, but be sure to +use a sufficiently unique name. The current context is accessed with +:data:`_app_ctx_stack.top <_app_ctx_stack>`. For more information see +:doc:`/extensiondev`. + + +Events and Signals +------------------ + +The application will call functions registered with +:meth:`~Flask.teardown_appcontext` when the application context is +popped. + +If :data:`~signals.signals_available` is true, the following signals are +sent: :data:`appcontext_pushed`, :data:`appcontext_tearing_down`, and +:data:`appcontext_popped`. diff --git a/testbed/pallets__flask/docs/async-await.rst b/testbed/pallets__flask/docs/async-await.rst new file mode 100644 index 0000000000000000000000000000000000000000..4c70f96142c1df6a01b7c579c1d83457ca6ea7a6 --- /dev/null +++ b/testbed/pallets__flask/docs/async-await.rst @@ -0,0 +1,131 @@ +.. _async_await: + +Using ``async`` and ``await`` +============================= + +.. versionadded:: 2.0 + +Routes, error handlers, before request, after request, and teardown +functions can all be coroutine functions if Flask is installed with the +``async`` extra (``pip install flask[async]``). This allows views to be +defined with ``async def`` and use ``await``. + +.. code-block:: python + + @app.route("/get-data") + async def get_data(): + data = await async_db_query(...) + return jsonify(data) + +Pluggable class-based views also support handlers that are implemented as +coroutines. This applies to the :meth:`~flask.views.View.dispatch_request` +method in views that inherit from the :class:`flask.views.View` class, as +well as all the HTTP method handlers in views that inherit from the +:class:`flask.views.MethodView` class. + +.. admonition:: Using ``async`` on Windows on Python 3.8 + + Python 3.8 has a bug related to asyncio on Windows. If you encounter + something like ``ValueError: set_wakeup_fd only works in main thread``, + please upgrade to Python 3.9. + +.. admonition:: Using ``async`` with greenlet + + When using gevent or eventlet to serve an application or patch the + runtime, greenlet>=1.0 is required. When using PyPy, PyPy>=7.3.7 is + required. + + +Performance +----------- + +Async functions require an event loop to run. Flask, as a WSGI +application, uses one worker to handle one request/response cycle. +When a request comes in to an async view, Flask will start an event loop +in a thread, run the view function there, then return the result. + +Each request still ties up one worker, even for async views. The upside +is that you can run async code within a view, for example to make +multiple concurrent database queries, HTTP requests to an external API, +etc. However, the number of requests your application can handle at one +time will remain the same. + +**Async is not inherently faster than sync code.** Async is beneficial +when performing concurrent IO-bound tasks, but will probably not improve +CPU-bound tasks. Traditional Flask views will still be appropriate for +most use cases, but Flask's async support enables writing and using +code that wasn't possible natively before. + + +Background tasks +---------------- + +Async functions will run in an event loop until they complete, at +which stage the event loop will stop. This means any additional +spawned tasks that haven't completed when the async function completes +will be cancelled. Therefore you cannot spawn background tasks, for +example via ``asyncio.create_task``. + +If you wish to use background tasks it is best to use a task queue to +trigger background work, rather than spawn tasks in a view +function. With that in mind you can spawn asyncio tasks by serving +Flask with an ASGI server and utilising the asgiref WsgiToAsgi adapter +as described in :ref:`asgi`. This works as the adapter creates an +event loop that runs continually. + + +When to use Quart instead +------------------------- + +Flask's async support is less performant than async-first frameworks due +to the way it is implemented. If you have a mainly async codebase it +would make sense to consider `Quart`_. Quart is a reimplementation of +Flask based on the `ASGI`_ standard instead of WSGI. This allows it to +handle many concurrent requests, long running requests, and websockets +without requiring multiple worker processes or threads. + +It has also already been possible to run Flask with Gevent or Eventlet +to get many of the benefits of async request handling. These libraries +patch low-level Python functions to accomplish this, whereas ``async``/ +``await`` and ASGI use standard, modern Python capabilities. Deciding +whether you should use Flask, Quart, or something else is ultimately up +to understanding the specific needs of your project. + +.. _Quart: https://gitlab.com/pgjones/quart +.. _ASGI: https://asgi.readthedocs.io/en/latest/ + + +Extensions +---------- + +Flask extensions predating Flask's async support do not expect async views. +If they provide decorators to add functionality to views, those will probably +not work with async views because they will not await the function or be +awaitable. Other functions they provide will not be awaitable either and +will probably be blocking if called within an async view. + +Extension authors can support async functions by utilising the +:meth:`flask.Flask.ensure_sync` method. For example, if the extension +provides a view function decorator add ``ensure_sync`` before calling +the decorated function, + +.. code-block:: python + + def extension(func): + @wraps(func) + def wrapper(*args, **kwargs): + ... # Extension logic + return current_app.ensure_sync(func)(*args, **kwargs) + + return wrapper + +Check the changelog of the extension you want to use to see if they've +implemented async support, or make a feature request or PR to them. + + +Other event loops +----------------- + +At the moment Flask only supports :mod:`asyncio`. It's possible to +override :meth:`flask.Flask.ensure_sync` to change how async functions +are wrapped to use a different library. diff --git a/testbed/pallets__flask/docs/becomingbig.rst b/testbed/pallets__flask/docs/becomingbig.rst new file mode 100644 index 0000000000000000000000000000000000000000..5e7a88e0308ac84b3c6e30fc2fb7edc6aef5cfcf --- /dev/null +++ b/testbed/pallets__flask/docs/becomingbig.rst @@ -0,0 +1,100 @@ +Becoming Big +============ + +Here are your options when growing your codebase or scaling your application. + +Read the Source. +---------------- + +Flask started in part to demonstrate how to build your own framework on top of +existing well-used tools Werkzeug (WSGI) and Jinja (templating), and as it +developed, it became useful to a wide audience. As you grow your codebase, +don't just use Flask -- understand it. Read the source. Flask's code is +written to be read; its documentation is published so you can use its internal +APIs. Flask sticks to documented APIs in upstream libraries, and documents its +internal utilities so that you can find the hook points needed for your +project. + +Hook. Extend. +------------- + +The :doc:`/api` docs are full of available overrides, hook points, and +:doc:`/signals`. You can provide custom classes for things like the +request and response objects. Dig deeper on the APIs you use, and look +for the customizations which are available out of the box in a Flask +release. Look for ways in which your project can be refactored into a +collection of utilities and Flask extensions. Explore the many +:doc:`/extensions` in the community, and look for patterns to build your +own extensions if you do not find the tools you need. + +Subclass. +--------- + +The :class:`~flask.Flask` class has many methods designed for subclassing. You +can quickly add or customize behavior by subclassing :class:`~flask.Flask` (see +the linked method docs) and using that subclass wherever you instantiate an +application class. This works well with :doc:`/patterns/appfactories`. +See :doc:`/patterns/subclassing` for an example. + +Wrap with middleware. +--------------------- + +The :doc:`/patterns/appdispatch` pattern shows in detail how to apply middleware. You +can introduce WSGI middleware to wrap your Flask instances and introduce fixes +and changes at the layer between your Flask application and your HTTP +server. Werkzeug includes several `middlewares +`_. + +Fork. +----- + +If none of the above options work, fork Flask. The majority of code of Flask +is within Werkzeug and Jinja2. These libraries do the majority of the work. +Flask is just the paste that glues those together. For every project there is +the point where the underlying framework gets in the way (due to assumptions +the original developers had). This is natural because if this would not be the +case, the framework would be a very complex system to begin with which causes a +steep learning curve and a lot of user frustration. + +This is not unique to Flask. Many people use patched and modified +versions of their framework to counter shortcomings. This idea is also +reflected in the license of Flask. You don't have to contribute any +changes back if you decide to modify the framework. + +The downside of forking is of course that Flask extensions will most +likely break because the new framework has a different import name. +Furthermore integrating upstream changes can be a complex process, +depending on the number of changes. Because of that, forking should be +the very last resort. + +Scale like a pro. +----------------- + +For many web applications the complexity of the code is less an issue than +the scaling for the number of users or data entries expected. Flask by +itself is only limited in terms of scaling by your application code, the +data store you want to use and the Python implementation and webserver you +are running on. + +Scaling well means for example that if you double the amount of servers +you get about twice the performance. Scaling bad means that if you add a +new server the application won't perform any better or would not even +support a second server. + +There is only one limiting factor regarding scaling in Flask which are +the context local proxies. They depend on context which in Flask is +defined as being either a thread, process or greenlet. If your server +uses some kind of concurrency that is not based on threads or greenlets, +Flask will no longer be able to support these global proxies. However the +majority of servers are using either threads, greenlets or separate +processes to achieve concurrency which are all methods well supported by +the underlying Werkzeug library. + +Discuss with the community. +--------------------------- + +The Flask developers keep the framework accessible to users with codebases big +and small. If you find an obstacle in your way, caused by Flask, don't hesitate +to contact the developers on the mailing list or Discord server. The best way for +the Flask and Flask extension developers to improve the tools for larger +applications is getting feedback from users. diff --git a/testbed/pallets__flask/docs/blueprints.rst b/testbed/pallets__flask/docs/blueprints.rst new file mode 100644 index 0000000000000000000000000000000000000000..af368bacf1c2e6b77cd84edff052b176aea892ff --- /dev/null +++ b/testbed/pallets__flask/docs/blueprints.rst @@ -0,0 +1,302 @@ +Modular Applications with Blueprints +==================================== + +.. currentmodule:: flask + +.. versionadded:: 0.7 + +Flask uses a concept of *blueprints* for making application components and +supporting common patterns within an application or across applications. +Blueprints can greatly simplify how large applications work and provide a +central means for Flask extensions to register operations on applications. +A :class:`Blueprint` object works similarly to a :class:`Flask` +application object, but it is not actually an application. Rather it is a +*blueprint* of how to construct or extend an application. + +Why Blueprints? +--------------- + +Blueprints in Flask are intended for these cases: + +* Factor an application into a set of blueprints. This is ideal for + larger applications; a project could instantiate an application object, + initialize several extensions, and register a collection of blueprints. +* Register a blueprint on an application at a URL prefix and/or subdomain. + Parameters in the URL prefix/subdomain become common view arguments + (with defaults) across all view functions in the blueprint. +* Register a blueprint multiple times on an application with different URL + rules. +* Provide template filters, static files, templates, and other utilities + through blueprints. A blueprint does not have to implement applications + or view functions. +* Register a blueprint on an application for any of these cases when + initializing a Flask extension. + +A blueprint in Flask is not a pluggable app because it is not actually an +application -- it's a set of operations which can be registered on an +application, even multiple times. Why not have multiple application +objects? You can do that (see :doc:`/patterns/appdispatch`), but your +applications will have separate configs and will be managed at the WSGI +layer. + +Blueprints instead provide separation at the Flask level, share +application config, and can change an application object as necessary with +being registered. The downside is that you cannot unregister a blueprint +once an application was created without having to destroy the whole +application object. + +The Concept of Blueprints +------------------------- + +The basic concept of blueprints is that they record operations to execute +when registered on an application. Flask associates view functions with +blueprints when dispatching requests and generating URLs from one endpoint +to another. + +My First Blueprint +------------------ + +This is what a very basic blueprint looks like. In this case we want to +implement a blueprint that does simple rendering of static templates:: + + from flask import Blueprint, render_template, abort + from jinja2 import TemplateNotFound + + simple_page = Blueprint('simple_page', __name__, + template_folder='templates') + + @simple_page.route('/', defaults={'page': 'index'}) + @simple_page.route('/') + def show(page): + try: + return render_template(f'pages/{page}.html') + except TemplateNotFound: + abort(404) + +When you bind a function with the help of the ``@simple_page.route`` +decorator, the blueprint will record the intention of registering the +function ``show`` on the application when it's later registered. +Additionally it will prefix the endpoint of the function with the +name of the blueprint which was given to the :class:`Blueprint` +constructor (in this case also ``simple_page``). The blueprint's name +does not modify the URL, only the endpoint. + +Registering Blueprints +---------------------- + +So how do you register that blueprint? Like this:: + + from flask import Flask + from yourapplication.simple_page import simple_page + + app = Flask(__name__) + app.register_blueprint(simple_page) + +If you check the rules registered on the application, you will find +these:: + + >>> app.url_map + Map([' (HEAD, OPTIONS, GET) -> static>, + ' (HEAD, OPTIONS, GET) -> simple_page.show>, + simple_page.show>]) + +The first one is obviously from the application itself for the static +files. The other two are for the `show` function of the ``simple_page`` +blueprint. As you can see, they are also prefixed with the name of the +blueprint and separated by a dot (``.``). + +Blueprints however can also be mounted at different locations:: + + app.register_blueprint(simple_page, url_prefix='/pages') + +And sure enough, these are the generated rules:: + + >>> app.url_map + Map([' (HEAD, OPTIONS, GET) -> static>, + ' (HEAD, OPTIONS, GET) -> simple_page.show>, + simple_page.show>]) + +On top of that you can register blueprints multiple times though not every +blueprint might respond properly to that. In fact it depends on how the +blueprint is implemented if it can be mounted more than once. + +Nesting Blueprints +------------------ + +It is possible to register a blueprint on another blueprint. + +.. code-block:: python + + parent = Blueprint('parent', __name__, url_prefix='/parent') + child = Blueprint('child', __name__, url_prefix='/child') + parent.register_blueprint(child) + app.register_blueprint(parent) + +The child blueprint will gain the parent's name as a prefix to its +name, and child URLs will be prefixed with the parent's URL prefix. + +.. code-block:: python + + url_for('parent.child.create') + /parent/child/create + +Blueprint-specific before request functions, etc. registered with the +parent will trigger for the child. If a child does not have an error +handler that can handle a given exception, the parent's will be tried. + + +Blueprint Resources +------------------- + +Blueprints can provide resources as well. Sometimes you might want to +introduce a blueprint only for the resources it provides. + +Blueprint Resource Folder +````````````````````````` + +Like for regular applications, blueprints are considered to be contained +in a folder. While multiple blueprints can originate from the same folder, +it does not have to be the case and it's usually not recommended. + +The folder is inferred from the second argument to :class:`Blueprint` which +is usually `__name__`. This argument specifies what logical Python +module or package corresponds to the blueprint. If it points to an actual +Python package that package (which is a folder on the filesystem) is the +resource folder. If it's a module, the package the module is contained in +will be the resource folder. You can access the +:attr:`Blueprint.root_path` property to see what the resource folder is:: + + >>> simple_page.root_path + '/Users/username/TestProject/yourapplication' + +To quickly open sources from this folder you can use the +:meth:`~Blueprint.open_resource` function:: + + with simple_page.open_resource('static/style.css') as f: + code = f.read() + +Static Files +```````````` + +A blueprint can expose a folder with static files by providing the path +to the folder on the filesystem with the ``static_folder`` argument. +It is either an absolute path or relative to the blueprint's location:: + + admin = Blueprint('admin', __name__, static_folder='static') + +By default the rightmost part of the path is where it is exposed on the +web. This can be changed with the ``static_url_path`` argument. Because the +folder is called ``static`` here it will be available at the +``url_prefix`` of the blueprint + ``/static``. If the blueprint +has the prefix ``/admin``, the static URL will be ``/admin/static``. + +The endpoint is named ``blueprint_name.static``. You can generate URLs +to it with :func:`url_for` like you would with the static folder of the +application:: + + url_for('admin.static', filename='style.css') + +However, if the blueprint does not have a ``url_prefix``, it is not +possible to access the blueprint's static folder. This is because the +URL would be ``/static`` in this case, and the application's ``/static`` +route takes precedence. Unlike template folders, blueprint static +folders are not searched if the file does not exist in the application +static folder. + +Templates +````````` + +If you want the blueprint to expose templates you can do that by providing +the `template_folder` parameter to the :class:`Blueprint` constructor:: + + admin = Blueprint('admin', __name__, template_folder='templates') + +For static files, the path can be absolute or relative to the blueprint +resource folder. + +The template folder is added to the search path of templates but with a lower +priority than the actual application's template folder. That way you can +easily override templates that a blueprint provides in the actual application. +This also means that if you don't want a blueprint template to be accidentally +overridden, make sure that no other blueprint or actual application template +has the same relative path. When multiple blueprints provide the same relative +template path the first blueprint registered takes precedence over the others. + + +So if you have a blueprint in the folder ``yourapplication/admin`` and you +want to render the template ``'admin/index.html'`` and you have provided +``templates`` as a `template_folder` you will have to create a file like +this: :file:`yourapplication/admin/templates/admin/index.html`. The reason +for the extra ``admin`` folder is to avoid getting our template overridden +by a template named ``index.html`` in the actual application template +folder. + +To further reiterate this: if you have a blueprint named ``admin`` and you +want to render a template called :file:`index.html` which is specific to this +blueprint, the best idea is to lay out your templates like this:: + + yourpackage/ + blueprints/ + admin/ + templates/ + admin/ + index.html + __init__.py + +And then when you want to render the template, use :file:`admin/index.html` as +the name to look up the template by. If you encounter problems loading +the correct templates enable the ``EXPLAIN_TEMPLATE_LOADING`` config +variable which will instruct Flask to print out the steps it goes through +to locate templates on every ``render_template`` call. + +Building URLs +------------- + +If you want to link from one page to another you can use the +:func:`url_for` function just like you normally would do just that you +prefix the URL endpoint with the name of the blueprint and a dot (``.``):: + + url_for('admin.index') + +Additionally if you are in a view function of a blueprint or a rendered +template and you want to link to another endpoint of the same blueprint, +you can use relative redirects by prefixing the endpoint with a dot only:: + + url_for('.index') + +This will link to ``admin.index`` for instance in case the current request +was dispatched to any other admin blueprint endpoint. + + +Blueprint Error Handlers +------------------------ + +Blueprints support the ``errorhandler`` decorator just like the :class:`Flask` +application object, so it is easy to make Blueprint-specific custom error +pages. + +Here is an example for a "404 Page Not Found" exception:: + + @simple_page.errorhandler(404) + def page_not_found(e): + return render_template('pages/404.html') + +Most errorhandlers will simply work as expected; however, there is a caveat +concerning handlers for 404 and 405 exceptions. These errorhandlers are only +invoked from an appropriate ``raise`` statement or a call to ``abort`` in another +of the blueprint's view functions; they are not invoked by, e.g., an invalid URL +access. This is because the blueprint does not "own" a certain URL space, so +the application instance has no way of knowing which blueprint error handler it +should run if given an invalid URL. If you would like to execute different +handling strategies for these errors based on URL prefixes, they may be defined +at the application level using the ``request`` proxy object:: + + @app.errorhandler(404) + @app.errorhandler(405) + def _handle_api_error(ex): + if request.path.startswith('/api/'): + return jsonify(error=str(ex)), ex.code + else: + return ex + +See :doc:`/errorhandling`. diff --git a/testbed/pallets__flask/docs/changes.rst b/testbed/pallets__flask/docs/changes.rst new file mode 100644 index 0000000000000000000000000000000000000000..955deaf27bc821a6367e818f36c016892fed3d33 --- /dev/null +++ b/testbed/pallets__flask/docs/changes.rst @@ -0,0 +1,4 @@ +Changes +======= + +.. include:: ../CHANGES.rst diff --git a/testbed/pallets__flask/docs/cli.rst b/testbed/pallets__flask/docs/cli.rst new file mode 100644 index 0000000000000000000000000000000000000000..4b40307e98af099b238b135cdebec56e9c9006e1 --- /dev/null +++ b/testbed/pallets__flask/docs/cli.rst @@ -0,0 +1,720 @@ +.. currentmodule:: flask + +Command Line Interface +====================== + +Installing Flask installs the ``flask`` script, a `Click`_ command line +interface, in your virtualenv. Executed from the terminal, this script gives +access to built-in, extension, and application-defined commands. The ``--help`` +option will give more information about any commands and options. + +.. _Click: https://click.palletsprojects.com/ + + +Application Discovery +--------------------- + +The ``flask`` command is installed by Flask, not your application; it must be +told where to find your application in order to use it. The ``FLASK_APP`` +environment variable is used to specify how to load the application. + +.. tabs:: + + .. group-tab:: Bash + + .. code-block:: text + + $ export FLASK_APP=hello + $ flask run + + .. group-tab:: Fish + + .. code-block:: text + + $ set -x FLASK_APP hello + $ flask run + + .. group-tab:: CMD + + .. code-block:: text + + > set FLASK_APP=hello + > flask run + + .. group-tab:: Powershell + + .. code-block:: text + + > $env:FLASK_APP = "hello" + > flask run + +While ``FLASK_APP`` supports a variety of options for specifying your +application, most use cases should be simple. Here are the typical values: + +(nothing) + The name "app" or "wsgi" is imported (as a ".py" file, or package), + automatically detecting an app (``app`` or ``application``) or + factory (``create_app`` or ``make_app``). + +``FLASK_APP=hello`` + The given name is imported, automatically detecting an app (``app`` + or ``application``) or factory (``create_app`` or ``make_app``). + +---- + +``FLASK_APP`` has three parts: an optional path that sets the current working +directory, a Python file or dotted import path, and an optional variable +name of the instance or factory. If the name is a factory, it can optionally +be followed by arguments in parentheses. The following values demonstrate these +parts: + +``FLASK_APP=src/hello`` + Sets the current working directory to ``src`` then imports ``hello``. + +``FLASK_APP=hello.web`` + Imports the path ``hello.web``. + +``FLASK_APP=hello:app2`` + Uses the ``app2`` Flask instance in ``hello``. + +``FLASK_APP="hello:create_app('dev')"`` + The ``create_app`` factory in ``hello`` is called with the string ``'dev'`` + as the argument. + +If ``FLASK_APP`` is not set, the command will try to import "app" or +"wsgi" (as a ".py" file, or package) and try to detect an application +instance or factory. + +Within the given import, the command looks for an application instance named +``app`` or ``application``, then any application instance. If no instance is +found, the command looks for a factory function named ``create_app`` or +``make_app`` that returns an instance. + +If parentheses follow the factory name, their contents are parsed as +Python literals and passed as arguments and keyword arguments to the +function. This means that strings must still be in quotes. + + +Run the Development Server +-------------------------- + +The :func:`run ` command will start the development server. It +replaces the :meth:`Flask.run` method in most cases. :: + + $ flask run + * Serving Flask app "hello" + * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit) + +.. warning:: Do not use this command to run your application in production. + Only use the development server during development. The development server + is provided for convenience, but is not designed to be particularly secure, + stable, or efficient. See :doc:`/deploying/index` for how to run in production. + +If another program is already using port 5000, you'll see +``OSError: [Errno 98]`` or ``OSError: [WinError 10013]`` when the +server tries to start. See :ref:`address-already-in-use` for how to +handle that. + + +Open a Shell +------------ + +To explore the data in your application, you can start an interactive Python +shell with the :func:`shell ` command. An application +context will be active, and the app instance will be imported. :: + + $ flask shell + Python 3.10.0 (default, Oct 27 2021, 06:59:51) [GCC 11.1.0] on linux + App: example [production] + Instance: /home/david/Projects/pallets/flask/instance + >>> + +Use :meth:`~Flask.shell_context_processor` to add other automatic imports. + + +Environments +------------ + +.. versionadded:: 1.0 + +The environment in which the Flask app runs is set by the +:envvar:`FLASK_ENV` environment variable. If not set it defaults to +``production``. The other recognized environment is ``development``. +Flask and extensions may choose to enable behaviors based on the +environment. + +If the env is set to ``development``, the ``flask`` command will enable +debug mode and ``flask run`` will enable the interactive debugger and +reloader. + +.. tabs:: + + .. group-tab:: Bash + + .. code-block:: text + + $ export FLASK_ENV=development + $ flask run + * Serving Flask app "hello" + * Environment: development + * Debug mode: on + * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit) + * Restarting with inotify reloader + * Debugger is active! + * Debugger PIN: 223-456-919 + + .. group-tab:: Fish + + .. code-block:: text + + $ set -x FLASK_ENV development + $ flask run + * Serving Flask app "hello" + * Environment: development + * Debug mode: on + * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit) + * Restarting with inotify reloader + * Debugger is active! + * Debugger PIN: 223-456-919 + + .. group-tab:: CMD + + .. code-block:: text + + > set FLASK_ENV=development + > flask run + * Serving Flask app "hello" + * Environment: development + * Debug mode: on + * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit) + * Restarting with inotify reloader + * Debugger is active! + * Debugger PIN: 223-456-919 + + .. group-tab:: Powershell + + .. code-block:: text + + > $env:FLASK_ENV = "development" + > flask run + * Serving Flask app "hello" + * Environment: development + * Debug mode: on + * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit) + * Restarting with inotify reloader + * Debugger is active! + * Debugger PIN: 223-456-919 + + +Watch Extra Files with the Reloader +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When using development mode, the reloader will trigger whenever your +Python code or imported modules change. The reloader can watch +additional files with the ``--extra-files`` option, or the +``FLASK_RUN_EXTRA_FILES`` environment variable. Multiple paths are +separated with ``:``, or ``;`` on Windows. + +.. tabs:: + + .. group-tab:: Bash + + .. code-block:: text + + $ flask run --extra-files file1:dirA/file2:dirB/ + # or + $ export FLASK_RUN_EXTRA_FILES=file1:dirA/file2:dirB/ + $ flask run + * Running on http://127.0.0.1:8000/ + * Detected change in '/path/to/file1', reloading + + .. group-tab:: Fish + + .. code-block:: text + + $ flask run --extra-files file1:dirA/file2:dirB/ + # or + $ set -x FLASK_RUN_EXTRA_FILES file1 dirA/file2 dirB/ + $ flask run + * Running on http://127.0.0.1:8000/ + * Detected change in '/path/to/file1', reloading + + .. group-tab:: CMD + + .. code-block:: text + + > flask run --extra-files file1:dirA/file2:dirB/ + # or + > set FLASK_RUN_EXTRA_FILES=file1:dirA/file2:dirB/ + > flask run + * Running on http://127.0.0.1:8000/ + * Detected change in '/path/to/file1', reloading + + .. group-tab:: Powershell + + .. code-block:: text + + > flask run --extra-files file1:dirA/file2:dirB/ + # or + > $env:FLASK_RUN_EXTRA_FILES = "file1:dirA/file2:dirB/" + > flask run + * Running on http://127.0.0.1:8000/ + * Detected change in '/path/to/file1', reloading + + +Ignore files with the Reloader +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The reloader can also ignore files using :mod:`fnmatch` patterns with +the ``--exclude-patterns`` option, or the ``FLASK_RUN_EXCLUDE_PATTERNS`` +environment variable. Multiple patterns are separated with ``:``, or +``;`` on Windows. + + +Debug Mode +---------- + +Debug mode will be enabled when :envvar:`FLASK_ENV` is ``development``, +as described above. If you want to control debug mode separately, use +:envvar:`FLASK_DEBUG`. The value ``1`` enables it, ``0`` disables it. + + +.. _dotenv: + +Environment Variables From dotenv +--------------------------------- + +Rather than setting ``FLASK_APP`` each time you open a new terminal, you can +use Flask's dotenv support to set environment variables automatically. + +If `python-dotenv`_ is installed, running the ``flask`` command will set +environment variables defined in the files :file:`.env` and :file:`.flaskenv`. +This can be used to avoid having to set ``FLASK_APP`` manually every time you +open a new terminal, and to set configuration using environment variables +similar to how some deployment services work. + +Variables set on the command line are used over those set in :file:`.env`, +which are used over those set in :file:`.flaskenv`. :file:`.flaskenv` should be +used for public variables, such as ``FLASK_APP``, while :file:`.env` should not +be committed to your repository so that it can set private variables. + +Directories are scanned upwards from the directory you call ``flask`` +from to locate the files. The current working directory will be set to the +location of the file, with the assumption that that is the top level project +directory. + +The files are only loaded by the ``flask`` command or calling +:meth:`~Flask.run`. If you would like to load these files when running in +production, you should call :func:`~cli.load_dotenv` manually. + +.. _python-dotenv: https://github.com/theskumar/python-dotenv#readme + + +Setting Command Options +~~~~~~~~~~~~~~~~~~~~~~~ + +Click is configured to load default values for command options from +environment variables. The variables use the pattern +``FLASK_COMMAND_OPTION``. For example, to set the port for the run +command, instead of ``flask run --port 8000``: + +.. tabs:: + + .. group-tab:: Bash + + .. code-block:: text + + $ export FLASK_RUN_PORT=8000 + $ flask run + * Running on http://127.0.0.1:8000/ + + .. group-tab:: Fish + + .. code-block:: text + + $ set -x FLASK_RUN_PORT 8000 + $ flask run + * Running on http://127.0.0.1:8000/ + + .. group-tab:: CMD + + .. code-block:: text + + > set FLASK_RUN_PORT=8000 + > flask run + * Running on http://127.0.0.1:8000/ + + .. group-tab:: Powershell + + .. code-block:: text + + > $env:FLASK_RUN_PORT = 8000 + > flask run + * Running on http://127.0.0.1:8000/ + +These can be added to the ``.flaskenv`` file just like ``FLASK_APP`` to +control default command options. + + +Disable dotenv +~~~~~~~~~~~~~~ + +The ``flask`` command will show a message if it detects dotenv files but +python-dotenv is not installed. + +.. code-block:: bash + + $ flask run + * Tip: There are .env files present. Do "pip install python-dotenv" to use them. + +You can tell Flask not to load dotenv files even when python-dotenv is +installed by setting the ``FLASK_SKIP_DOTENV`` environment variable. +This can be useful if you want to load them manually, or if you're using +a project runner that loads them already. Keep in mind that the +environment variables must be set before the app loads or it won't +configure as expected. + +.. tabs:: + + .. group-tab:: Bash + + .. code-block:: text + + $ export FLASK_SKIP_DOTENV=1 + $ flask run + + .. group-tab:: Fish + + .. code-block:: text + + $ set -x FLASK_SKIP_DOTENV 1 + $ flask run + + .. group-tab:: CMD + + .. code-block:: text + + > set FLASK_SKIP_DOTENV=1 + > flask run + + .. group-tab:: Powershell + + .. code-block:: text + + > $env:FLASK_SKIP_DOTENV = 1 + > flask run + + +Environment Variables From virtualenv +------------------------------------- + +If you do not want to install dotenv support, you can still set environment +variables by adding them to the end of the virtualenv's :file:`activate` +script. Activating the virtualenv will set the variables. + +.. tabs:: + + .. group-tab:: Bash + + Unix Bash, :file:`venv/bin/activate`:: + + $ export FLASK_APP=hello + + .. group-tab:: Fish + + Fish, :file:`venv/bin/activate.fish`:: + + $ set -x FLASK_APP hello + + .. group-tab:: CMD + + Windows CMD, :file:`venv\\Scripts\\activate.bat`:: + + > set FLASK_APP=hello + + .. group-tab:: Powershell + + Windows Powershell, :file:`venv\\Scripts\\activate.ps1`:: + + > $env:FLASK_APP = "hello" + +It is preferred to use dotenv support over this, since :file:`.flaskenv` can be +committed to the repository so that it works automatically wherever the project +is checked out. + + +Custom Commands +--------------- + +The ``flask`` command is implemented using `Click`_. See that project's +documentation for full information about writing commands. + +This example adds the command ``create-user`` that takes the argument +``name``. :: + + import click + from flask import Flask + + app = Flask(__name__) + + @app.cli.command("create-user") + @click.argument("name") + def create_user(name): + ... + +:: + + $ flask create-user admin + +This example adds the same command, but as ``user create``, a command in a +group. This is useful if you want to organize multiple related commands. :: + + import click + from flask import Flask + from flask.cli import AppGroup + + app = Flask(__name__) + user_cli = AppGroup('user') + + @user_cli.command('create') + @click.argument('name') + def create_user(name): + ... + + app.cli.add_command(user_cli) + +:: + + $ flask user create demo + +See :ref:`testing-cli` for an overview of how to test your custom +commands. + + +Registering Commands with Blueprints +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If your application uses blueprints, you can optionally register CLI +commands directly onto them. When your blueprint is registered onto your +application, the associated commands will be available to the ``flask`` +command. By default, those commands will be nested in a group matching +the name of the blueprint. + +.. code-block:: python + + from flask import Blueprint + + bp = Blueprint('students', __name__) + + @bp.cli.command('create') + @click.argument('name') + def create(name): + ... + + app.register_blueprint(bp) + +.. code-block:: text + + $ flask students create alice + +You can alter the group name by specifying the ``cli_group`` parameter +when creating the :class:`Blueprint` object, or later with +:meth:`app.register_blueprint(bp, cli_group='...') `. +The following are equivalent: + +.. code-block:: python + + bp = Blueprint('students', __name__, cli_group='other') + # or + app.register_blueprint(bp, cli_group='other') + +.. code-block:: text + + $ flask other create alice + +Specifying ``cli_group=None`` will remove the nesting and merge the +commands directly to the application's level: + +.. code-block:: python + + bp = Blueprint('students', __name__, cli_group=None) + # or + app.register_blueprint(bp, cli_group=None) + +.. code-block:: text + + $ flask create alice + + +Application Context +~~~~~~~~~~~~~~~~~~~ + +Commands added using the Flask app's :attr:`~Flask.cli` +:meth:`~cli.AppGroup.command` decorator will be executed with an application +context pushed, so your command and extensions have access to the app and its +configuration. If you create a command using the Click :func:`~click.command` +decorator instead of the Flask decorator, you can use +:func:`~cli.with_appcontext` to get the same behavior. :: + + import click + from flask.cli import with_appcontext + + @click.command() + @with_appcontext + def do_work(): + ... + + app.cli.add_command(do_work) + +If you're sure a command doesn't need the context, you can disable it:: + + @app.cli.command(with_appcontext=False) + def do_work(): + ... + + +Plugins +------- + +Flask will automatically load commands specified in the ``flask.commands`` +`entry point`_. This is useful for extensions that want to add commands when +they are installed. Entry points are specified in :file:`setup.py` :: + + from setuptools import setup + + setup( + name='flask-my-extension', + ..., + entry_points={ + 'flask.commands': [ + 'my-command=flask_my_extension.commands:cli' + ], + }, + ) + + +.. _entry point: https://packaging.python.org/tutorials/packaging-projects/#entry-points + +Inside :file:`flask_my_extension/commands.py` you can then export a Click +object:: + + import click + + @click.command() + def cli(): + ... + +Once that package is installed in the same virtualenv as your Flask project, +you can run ``flask my-command`` to invoke the command. + + +.. _custom-scripts: + +Custom Scripts +-------------- + +When you are using the app factory pattern, it may be more convenient to define +your own Click script. Instead of using ``FLASK_APP`` and letting Flask load +your application, you can create your own Click object and export it as a +`console script`_ entry point. + +Create an instance of :class:`~cli.FlaskGroup` and pass it the factory:: + + import click + from flask import Flask + from flask.cli import FlaskGroup + + def create_app(): + app = Flask('wiki') + # other setup + return app + + @click.group(cls=FlaskGroup, create_app=create_app) + def cli(): + """Management script for the Wiki application.""" + +Define the entry point in :file:`setup.py`:: + + from setuptools import setup + + setup( + name='flask-my-extension', + ..., + entry_points={ + 'console_scripts': [ + 'wiki=wiki:cli' + ], + }, + ) + +Install the application in the virtualenv in editable mode and the custom +script is available. Note that you don't need to set ``FLASK_APP``. :: + + $ pip install -e . + $ wiki run + +.. admonition:: Errors in Custom Scripts + + When using a custom script, if you introduce an error in your + module-level code, the reloader will fail because it can no longer + load the entry point. + + The ``flask`` command, being separate from your code, does not have + this issue and is recommended in most cases. + +.. _console script: https://packaging.python.org/tutorials/packaging-projects/#console-scripts + + +PyCharm Integration +------------------- + +PyCharm Professional provides a special Flask run configuration. For +the Community Edition, we need to configure it to call the ``flask run`` +CLI command with the correct environment variables. These instructions +should be similar for any other IDE you might want to use. + +In PyCharm, with your project open, click on *Run* from the menu bar and +go to *Edit Configurations*. You'll be greeted by a screen similar to +this: + +.. image:: _static/pycharm-runconfig.png + :align: center + :class: screenshot + :alt: Screenshot of PyCharms's run configuration settings. + +There's quite a few options to change, but once we've done it for one +command, we can easily copy the entire configuration and make a single +tweak to give us access to other commands, including any custom ones you +may implement yourself. + +Click the + (*Add New Configuration*) button and select *Python*. Give +the configuration a name such as "flask run". For the ``flask run`` +command, check "Single instance only" since you can't run the server +more than once at the same time. + +Select *Module name* from the dropdown (**A**) then input ``flask``. + +The *Parameters* field (**B**) is set to the CLI command to execute +(with any arguments). In this example we use ``run``, which will run +the development server. + +You can skip this next step if you're using :ref:`dotenv`. We need to +add an environment variable (**C**) to identify our application. Click +on the browse button and add an entry with ``FLASK_APP`` on the left and +the Python import or file on the right (``hello`` for example). Add an +entry with ``FLASK_ENV`` and set it to ``development``. + +Next we need to set the working directory (**D**) to be the folder where +our application resides. + +If you have installed your project as a package in your virtualenv, you +may untick the *PYTHONPATH* options (**E**). This will more accurately +match how you deploy the app later. + +Click *Apply* to save the configuration, or *OK* to save and close the +window. Select the configuration in the main PyCharm window and click +the play button next to it to run the server. + +Now that we have a configuration which runs ``flask run`` from within +PyCharm, we can copy that configuration and alter the *Script* argument +to run a different CLI command, e.g. ``flask shell``. diff --git a/testbed/pallets__flask/docs/conf.py b/testbed/pallets__flask/docs/conf.py new file mode 100644 index 0000000000000000000000000000000000000000..ae2922d7995825a56fb794aaca6c8fec24a9a8bf --- /dev/null +++ b/testbed/pallets__flask/docs/conf.py @@ -0,0 +1,98 @@ +import packaging.version +from pallets_sphinx_themes import get_version +from pallets_sphinx_themes import ProjectLink + +# Project -------------------------------------------------------------- + +project = "Flask" +copyright = "2010 Pallets" +author = "Pallets" +release, version = get_version("Flask") + +# General -------------------------------------------------------------- + +master_doc = "index" +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.intersphinx", + "sphinxcontrib.log_cabinet", + "pallets_sphinx_themes", + "sphinx_issues", + "sphinx_tabs.tabs", +] +autodoc_typehints = "description" +intersphinx_mapping = { + "python": ("https://docs.python.org/3/", None), + "werkzeug": ("https://werkzeug.palletsprojects.com/", None), + "click": ("https://click.palletsprojects.com/", None), + "jinja": ("https://jinja.palletsprojects.com/", None), + "itsdangerous": ("https://itsdangerous.palletsprojects.com/", None), + "sqlalchemy": ("https://docs.sqlalchemy.org/", None), + "wtforms": ("https://wtforms.readthedocs.io/", None), + "blinker": ("https://pythonhosted.org/blinker/", None), +} +issues_github_path = "pallets/flask" + +# HTML ----------------------------------------------------------------- + +html_theme = "flask" +html_theme_options = {"index_sidebar_logo": False} +html_context = { + "project_links": [ + ProjectLink("Donate", "https://palletsprojects.com/donate"), + ProjectLink("PyPI Releases", "https://pypi.org/project/Flask/"), + ProjectLink("Source Code", "https://github.com/pallets/flask/"), + ProjectLink("Issue Tracker", "https://github.com/pallets/flask/issues/"), + ProjectLink("Website", "https://palletsprojects.com/p/flask/"), + ProjectLink("Twitter", "https://twitter.com/PalletsTeam"), + ProjectLink("Chat", "https://discord.gg/pallets"), + ] +} +html_sidebars = { + "index": ["project.html", "localtoc.html", "searchbox.html", "ethicalads.html"], + "**": ["localtoc.html", "relations.html", "searchbox.html", "ethicalads.html"], +} +singlehtml_sidebars = {"index": ["project.html", "localtoc.html", "ethicalads.html"]} +html_static_path = ["_static"] +html_favicon = "_static/flask-icon.png" +html_logo = "_static/flask-icon.png" +html_title = f"Flask Documentation ({version})" +html_show_sourcelink = False + +# LaTeX ---------------------------------------------------------------- + +latex_documents = [(master_doc, f"Flask-{version}.tex", html_title, author, "manual")] + +# Local Extensions ----------------------------------------------------- + + +def github_link(name, rawtext, text, lineno, inliner, options=None, content=None): + app = inliner.document.settings.env.app + release = app.config.release + base_url = "https://github.com/pallets/flask/tree/" + + if text.endswith(">"): + words, text = text[:-1].rsplit("<", 1) + words = words.strip() + else: + words = None + + if packaging.version.parse(release).is_devrelease: + url = f"{base_url}main/{text}" + else: + url = f"{base_url}{release}/{text}" + + if words is None: + words = url + + from docutils.nodes import reference + from docutils.parsers.rst.roles import set_classes + + options = options or {} + set_classes(options) + node = reference(rawtext, words, refuri=url, **options) + return [node], [] + + +def setup(app): + app.add_role("gh", github_link) diff --git a/testbed/pallets__flask/docs/config.rst b/testbed/pallets__flask/docs/config.rst new file mode 100644 index 0000000000000000000000000000000000000000..0b86674d88577ab6d8353cc0e2502a1d5d7efde1 --- /dev/null +++ b/testbed/pallets__flask/docs/config.rst @@ -0,0 +1,792 @@ +Configuration Handling +====================== + +Applications need some kind of configuration. There are different settings +you might want to change depending on the application environment like +toggling the debug mode, setting the secret key, and other such +environment-specific things. + +The way Flask is designed usually requires the configuration to be +available when the application starts up. You can hard code the +configuration in the code, which for many small applications is not +actually that bad, but there are better ways. + +Independent of how you load your config, there is a config object +available which holds the loaded configuration values: +The :attr:`~flask.Flask.config` attribute of the :class:`~flask.Flask` +object. This is the place where Flask itself puts certain configuration +values and also where extensions can put their configuration values. But +this is also where you can have your own configuration. + + +Configuration Basics +-------------------- + +The :attr:`~flask.Flask.config` is actually a subclass of a dictionary and +can be modified just like any dictionary:: + + app = Flask(__name__) + app.config['TESTING'] = True + +Certain configuration values are also forwarded to the +:attr:`~flask.Flask` object so you can read and write them from there:: + + app.testing = True + +To update multiple keys at once you can use the :meth:`dict.update` +method:: + + app.config.update( + TESTING=True, + SECRET_KEY='192b9bdd22ab9ed4d12e236c78afcb9a393ec15f71bbf5dc987d54727823bcbf' + ) + + +Environment and Debug Features +------------------------------ + +The :data:`ENV` and :data:`DEBUG` config values are special because they +may behave inconsistently if changed after the app has begun setting up. +In order to set the environment and debug mode reliably, Flask uses +environment variables. + +The environment is used to indicate to Flask, extensions, and other +programs, like Sentry, what context Flask is running in. It is +controlled with the :envvar:`FLASK_ENV` environment variable and +defaults to ``production``. + +Setting :envvar:`FLASK_ENV` to ``development`` will enable debug mode. +``flask run`` will use the interactive debugger and reloader by default +in debug mode. To control this separately from the environment, use the +:envvar:`FLASK_DEBUG` flag. + +.. versionchanged:: 1.0 + Added :envvar:`FLASK_ENV` to control the environment separately + from debug mode. The development environment enables debug mode. + +To switch Flask to the development environment and enable debug mode, +set :envvar:`FLASK_ENV`: + +.. tabs:: + + .. group-tab:: Bash + + .. code-block:: text + + $ export FLASK_ENV=development + $ flask run + + .. group-tab:: Fish + + .. code-block:: text + + $ set -x FLASK_ENV development + $ flask run + + .. group-tab:: CMD + + .. code-block:: text + + > set FLASK_ENV=development + > flask run + + .. group-tab:: Powershell + + .. code-block:: text + + > $env:FLASK_ENV = "development" + > flask run + +Using the environment variables as described above is recommended. While +it is possible to set :data:`ENV` and :data:`DEBUG` in your config or +code, this is strongly discouraged. They can't be read early by the +``flask`` command, and some systems or extensions may have already +configured themselves based on a previous value. + + +Builtin Configuration Values +---------------------------- + +The following configuration values are used internally by Flask: + +.. py:data:: ENV + + What environment the app is running in. Flask and extensions may + enable behaviors based on the environment, such as enabling debug + mode. The :attr:`~flask.Flask.env` attribute maps to this config + key. This is set by the :envvar:`FLASK_ENV` environment variable and + may not behave as expected if set in code. + + **Do not enable development when deploying in production.** + + Default: ``'production'`` + + .. versionadded:: 1.0 + +.. py:data:: DEBUG + + Whether debug mode is enabled. When using ``flask run`` to start the + development server, an interactive debugger will be shown for + unhandled exceptions, and the server will be reloaded when code + changes. The :attr:`~flask.Flask.debug` attribute maps to this + config key. This is enabled when :data:`ENV` is ``'development'`` + and is overridden by the ``FLASK_DEBUG`` environment variable. It + may not behave as expected if set in code. + + **Do not enable debug mode when deploying in production.** + + Default: ``True`` if :data:`ENV` is ``'development'``, or ``False`` + otherwise. + +.. py:data:: TESTING + + Enable testing mode. Exceptions are propagated rather than handled by the + the app's error handlers. Extensions may also change their behavior to + facilitate easier testing. You should enable this in your own tests. + + Default: ``False`` + +.. py:data:: PROPAGATE_EXCEPTIONS + + Exceptions are re-raised rather than being handled by the app's error + handlers. If not set, this is implicitly true if ``TESTING`` or ``DEBUG`` + is enabled. + + Default: ``None`` + +.. py:data:: PRESERVE_CONTEXT_ON_EXCEPTION + + Don't pop the request context when an exception occurs. If not set, this + is true if ``DEBUG`` is true. This allows debuggers to introspect the + request data on errors, and should normally not need to be set directly. + + Default: ``None`` + +.. py:data:: TRAP_HTTP_EXCEPTIONS + + If there is no handler for an ``HTTPException``-type exception, re-raise it + to be handled by the interactive debugger instead of returning it as a + simple error response. + + Default: ``False`` + +.. py:data:: TRAP_BAD_REQUEST_ERRORS + + Trying to access a key that doesn't exist from request dicts like ``args`` + and ``form`` will return a 400 Bad Request error page. Enable this to treat + the error as an unhandled exception instead so that you get the interactive + debugger. This is a more specific version of ``TRAP_HTTP_EXCEPTIONS``. If + unset, it is enabled in debug mode. + + Default: ``None`` + +.. py:data:: SECRET_KEY + + A secret key that will be used for securely signing the session cookie + and can be used for any other security related needs by extensions or your + application. It should be a long random ``bytes`` or ``str``. For + example, copy the output of this to your config:: + + $ python -c 'import secrets; print(secrets.token_hex())' + '192b9bdd22ab9ed4d12e236c78afcb9a393ec15f71bbf5dc987d54727823bcbf' + + **Do not reveal the secret key when posting questions or committing code.** + + Default: ``None`` + +.. py:data:: SESSION_COOKIE_NAME + + The name of the session cookie. Can be changed in case you already have a + cookie with the same name. + + Default: ``'session'`` + +.. py:data:: SESSION_COOKIE_DOMAIN + + The domain match rule that the session cookie will be valid for. If not + set, the cookie will be valid for all subdomains of :data:`SERVER_NAME`. + If ``False``, the cookie's domain will not be set. + + Default: ``None`` + +.. py:data:: SESSION_COOKIE_PATH + + The path that the session cookie will be valid for. If not set, the cookie + will be valid underneath ``APPLICATION_ROOT`` or ``/`` if that is not set. + + Default: ``None`` + +.. py:data:: SESSION_COOKIE_HTTPONLY + + Browsers will not allow JavaScript access to cookies marked as "HTTP only" + for security. + + Default: ``True`` + +.. py:data:: SESSION_COOKIE_SECURE + + Browsers will only send cookies with requests over HTTPS if the cookie is + marked "secure". The application must be served over HTTPS for this to make + sense. + + Default: ``False`` + +.. py:data:: SESSION_COOKIE_SAMESITE + + Restrict how cookies are sent with requests from external sites. Can + be set to ``'Lax'`` (recommended) or ``'Strict'``. + See :ref:`security-cookie`. + + Default: ``None`` + + .. versionadded:: 1.0 + +.. py:data:: PERMANENT_SESSION_LIFETIME + + If ``session.permanent`` is true, the cookie's expiration will be set this + number of seconds in the future. Can either be a + :class:`datetime.timedelta` or an ``int``. + + Flask's default cookie implementation validates that the cryptographic + signature is not older than this value. + + Default: ``timedelta(days=31)`` (``2678400`` seconds) + +.. py:data:: SESSION_REFRESH_EACH_REQUEST + + Control whether the cookie is sent with every response when + ``session.permanent`` is true. Sending the cookie every time (the default) + can more reliably keep the session from expiring, but uses more bandwidth. + Non-permanent sessions are not affected. + + Default: ``True`` + +.. py:data:: USE_X_SENDFILE + + When serving files, set the ``X-Sendfile`` header instead of serving the + data with Flask. Some web servers, such as Apache, recognize this and serve + the data more efficiently. This only makes sense when using such a server. + + Default: ``False`` + +.. py:data:: SEND_FILE_MAX_AGE_DEFAULT + + When serving files, set the cache control max age to this number of + seconds. Can be a :class:`datetime.timedelta` or an ``int``. + Override this value on a per-file basis using + :meth:`~flask.Flask.get_send_file_max_age` on the application or + blueprint. + + If ``None``, ``send_file`` tells the browser to use conditional + requests will be used instead of a timed cache, which is usually + preferable. + + Default: ``None`` + +.. py:data:: SERVER_NAME + + Inform the application what host and port it is bound to. Required + for subdomain route matching support. + + If set, will be used for the session cookie domain if + :data:`SESSION_COOKIE_DOMAIN` is not set. Modern web browsers will + not allow setting cookies for domains without a dot. To use a domain + locally, add any names that should route to the app to your + ``hosts`` file. :: + + 127.0.0.1 localhost.dev + + If set, ``url_for`` can generate external URLs with only an application + context instead of a request context. + + Default: ``None`` + +.. py:data:: APPLICATION_ROOT + + Inform the application what path it is mounted under by the application / + web server. This is used for generating URLs outside the context of a + request (inside a request, the dispatcher is responsible for setting + ``SCRIPT_NAME`` instead; see :doc:`/patterns/appdispatch` + for examples of dispatch configuration). + + Will be used for the session cookie path if ``SESSION_COOKIE_PATH`` is not + set. + + Default: ``'/'`` + +.. py:data:: PREFERRED_URL_SCHEME + + Use this scheme for generating external URLs when not in a request context. + + Default: ``'http'`` + +.. py:data:: MAX_CONTENT_LENGTH + + Don't read more than this many bytes from the incoming request data. If not + set and the request does not specify a ``CONTENT_LENGTH``, no data will be + read for security. + + Default: ``None`` + +.. py:data:: JSON_AS_ASCII + + Serialize objects to ASCII-encoded JSON. If this is disabled, the + JSON returned from ``jsonify`` will contain Unicode characters. This + has security implications when rendering the JSON into JavaScript in + templates, and should typically remain enabled. + + Default: ``True`` + +.. py:data:: JSON_SORT_KEYS + + Sort the keys of JSON objects alphabetically. This is useful for caching + because it ensures the data is serialized the same way no matter what + Python's hash seed is. While not recommended, you can disable this for a + possible performance improvement at the cost of caching. + + Default: ``True`` + +.. py:data:: JSONIFY_PRETTYPRINT_REGULAR + + ``jsonify`` responses will be output with newlines, spaces, and indentation + for easier reading by humans. Always enabled in debug mode. + + Default: ``False`` + +.. py:data:: JSONIFY_MIMETYPE + + The mimetype of ``jsonify`` responses. + + Default: ``'application/json'`` + +.. py:data:: TEMPLATES_AUTO_RELOAD + + Reload templates when they are changed. If not set, it will be enabled in + debug mode. + + Default: ``None`` + +.. py:data:: EXPLAIN_TEMPLATE_LOADING + + Log debugging information tracing how a template file was loaded. This can + be useful to figure out why a template was not loaded or the wrong file + appears to be loaded. + + Default: ``False`` + +.. py:data:: MAX_COOKIE_SIZE + + Warn if cookie headers are larger than this many bytes. Defaults to + ``4093``. Larger cookies may be silently ignored by browsers. Set to + ``0`` to disable the warning. + +.. versionadded:: 0.4 + ``LOGGER_NAME`` + +.. versionadded:: 0.5 + ``SERVER_NAME`` + +.. versionadded:: 0.6 + ``MAX_CONTENT_LENGTH`` + +.. versionadded:: 0.7 + ``PROPAGATE_EXCEPTIONS``, ``PRESERVE_CONTEXT_ON_EXCEPTION`` + +.. versionadded:: 0.8 + ``TRAP_BAD_REQUEST_ERRORS``, ``TRAP_HTTP_EXCEPTIONS``, + ``APPLICATION_ROOT``, ``SESSION_COOKIE_DOMAIN``, + ``SESSION_COOKIE_PATH``, ``SESSION_COOKIE_HTTPONLY``, + ``SESSION_COOKIE_SECURE`` + +.. versionadded:: 0.9 + ``PREFERRED_URL_SCHEME`` + +.. versionadded:: 0.10 + ``JSON_AS_ASCII``, ``JSON_SORT_KEYS``, ``JSONIFY_PRETTYPRINT_REGULAR`` + +.. versionadded:: 0.11 + ``SESSION_REFRESH_EACH_REQUEST``, ``TEMPLATES_AUTO_RELOAD``, + ``LOGGER_HANDLER_POLICY``, ``EXPLAIN_TEMPLATE_LOADING`` + +.. versionchanged:: 1.0 + ``LOGGER_NAME`` and ``LOGGER_HANDLER_POLICY`` were removed. See + :doc:`/logging` for information about configuration. + + Added :data:`ENV` to reflect the :envvar:`FLASK_ENV` environment + variable. + + Added :data:`SESSION_COOKIE_SAMESITE` to control the session + cookie's ``SameSite`` option. + + Added :data:`MAX_COOKIE_SIZE` to control a warning from Werkzeug. + + +Configuring from Python Files +----------------------------- + +Configuration becomes more useful if you can store it in a separate file, +ideally located outside the actual application package. This makes +packaging and distributing your application possible via various package +handling tools (:doc:`/patterns/distribute`) and finally modifying the +configuration file afterwards. + +So a common pattern is this:: + + app = Flask(__name__) + app.config.from_object('yourapplication.default_settings') + app.config.from_envvar('YOURAPPLICATION_SETTINGS') + +This first loads the configuration from the +`yourapplication.default_settings` module and then overrides the values +with the contents of the file the :envvar:`YOURAPPLICATION_SETTINGS` +environment variable points to. This environment variable can be set +in the shell before starting the server: + +.. tabs:: + + .. group-tab:: Bash + + .. code-block:: text + + $ export YOURAPPLICATION_SETTINGS=/path/to/settings.cfg + $ flask run + * Running on http://127.0.0.1:5000/ + + .. group-tab:: Fish + + .. code-block:: text + + $ set -x YOURAPPLICATION_SETTINGS /path/to/settings.cfg + $ flask run + * Running on http://127.0.0.1:5000/ + + .. group-tab:: CMD + + .. code-block:: text + + > set YOURAPPLICATION_SETTINGS=\path\to\settings.cfg + > flask run + * Running on http://127.0.0.1:5000/ + + .. group-tab:: Powershell + + .. code-block:: text + + > $env:YOURAPPLICATION_SETTINGS = "\path\to\settings.cfg" + > flask run + * Running on http://127.0.0.1:5000/ + +The configuration files themselves are actual Python files. Only values +in uppercase are actually stored in the config object later on. So make +sure to use uppercase letters for your config keys. + +Here is an example of a configuration file:: + + # Example configuration + SECRET_KEY = '192b9bdd22ab9ed4d12e236c78afcb9a393ec15f71bbf5dc987d54727823bcbf' + +Make sure to load the configuration very early on, so that extensions have +the ability to access the configuration when starting up. There are other +methods on the config object as well to load from individual files. For a +complete reference, read the :class:`~flask.Config` object's +documentation. + + +Configuring from Data Files +--------------------------- + +It is also possible to load configuration from a file in a format of +your choice using :meth:`~flask.Config.from_file`. For example to load +from a TOML file: + +.. code-block:: python + + import toml + app.config.from_file("config.toml", load=toml.load) + +Or from a JSON file: + +.. code-block:: python + + import json + app.config.from_file("config.json", load=json.load) + + +Configuring from Environment Variables +-------------------------------------- + +In addition to pointing to configuration files using environment variables, you +may find it useful (or necessary) to control your configuration values directly +from the environment. + +Environment variables can be set in the shell before starting the server: + +.. tabs:: + + .. group-tab:: Bash + + .. code-block:: text + + $ export SECRET_KEY="5f352379324c22463451387a0aec5d2f" + $ export MAIL_ENABLED=false + $ flask run + * Running on http://127.0.0.1:5000/ + + .. group-tab:: Fish + + .. code-block:: text + + $ set -x SECRET_KEY "5f352379324c22463451387a0aec5d2f" + $ set -x MAIL_ENABLED false + $ flask run + * Running on http://127.0.0.1:5000/ + + .. group-tab:: CMD + + .. code-block:: text + + > set SECRET_KEY="5f352379324c22463451387a0aec5d2f" + > set MAIL_ENABLED=false + > flask run + * Running on http://127.0.0.1:5000/ + + .. group-tab:: Powershell + + .. code-block:: text + + > $env:SECRET_KEY = "5f352379324c22463451387a0aec5d2f" + > $env:MAIL_ENABLED = "false" + > flask run + * Running on http://127.0.0.1:5000/ + +While this approach is straightforward to use, it is important to remember that +environment variables are strings -- they are not automatically deserialized +into Python types. + +Here is an example of a configuration file that uses environment variables:: + + import os + + _mail_enabled = os.environ.get("MAIL_ENABLED", default="true") + MAIL_ENABLED = _mail_enabled.lower() in {"1", "t", "true"} + + SECRET_KEY = os.environ.get("SECRET_KEY") + + if not SECRET_KEY: + raise ValueError("No SECRET_KEY set for Flask application") + + +Notice that any value besides an empty string will be interpreted as a boolean +``True`` value in Python, which requires care if an environment explicitly sets +values intended to be ``False``. + +Make sure to load the configuration very early on, so that extensions have the +ability to access the configuration when starting up. There are other methods +on the config object as well to load from individual files. For a complete +reference, read the :class:`~flask.Config` class documentation. + + +Configuration Best Practices +---------------------------- + +The downside with the approach mentioned earlier is that it makes testing +a little harder. There is no single 100% solution for this problem in +general, but there are a couple of things you can keep in mind to improve +that experience: + +1. Create your application in a function and register blueprints on it. + That way you can create multiple instances of your application with + different configurations attached which makes unit testing a lot + easier. You can use this to pass in configuration as needed. + +2. Do not write code that needs the configuration at import time. If you + limit yourself to request-only accesses to the configuration you can + reconfigure the object later on as needed. + +.. _config-dev-prod: + +Development / Production +------------------------ + +Most applications need more than one configuration. There should be at +least separate configurations for the production server and the one used +during development. The easiest way to handle this is to use a default +configuration that is always loaded and part of the version control, and a +separate configuration that overrides the values as necessary as mentioned +in the example above:: + + app = Flask(__name__) + app.config.from_object('yourapplication.default_settings') + app.config.from_envvar('YOURAPPLICATION_SETTINGS') + +Then you just have to add a separate :file:`config.py` file and export +``YOURAPPLICATION_SETTINGS=/path/to/config.py`` and you are done. However +there are alternative ways as well. For example you could use imports or +subclassing. + +What is very popular in the Django world is to make the import explicit in +the config file by adding ``from yourapplication.default_settings +import *`` to the top of the file and then overriding the changes by hand. +You could also inspect an environment variable like +``YOURAPPLICATION_MODE`` and set that to `production`, `development` etc +and import different hard-coded files based on that. + +An interesting pattern is also to use classes and inheritance for +configuration:: + + class Config(object): + TESTING = False + + class ProductionConfig(Config): + DATABASE_URI = 'mysql://user@localhost/foo' + + class DevelopmentConfig(Config): + DATABASE_URI = "sqlite:////tmp/foo.db" + + class TestingConfig(Config): + DATABASE_URI = 'sqlite:///:memory:' + TESTING = True + +To enable such a config you just have to call into +:meth:`~flask.Config.from_object`:: + + app.config.from_object('configmodule.ProductionConfig') + +Note that :meth:`~flask.Config.from_object` does not instantiate the class +object. If you need to instantiate the class, such as to access a property, +then you must do so before calling :meth:`~flask.Config.from_object`:: + + from configmodule import ProductionConfig + app.config.from_object(ProductionConfig()) + + # Alternatively, import via string: + from werkzeug.utils import import_string + cfg = import_string('configmodule.ProductionConfig')() + app.config.from_object(cfg) + +Instantiating the configuration object allows you to use ``@property`` in +your configuration classes:: + + class Config(object): + """Base config, uses staging database server.""" + TESTING = False + DB_SERVER = '192.168.1.56' + + @property + def DATABASE_URI(self): # Note: all caps + return f"mysql://user@{self.DB_SERVER}/foo" + + class ProductionConfig(Config): + """Uses production database server.""" + DB_SERVER = '192.168.19.32' + + class DevelopmentConfig(Config): + DB_SERVER = 'localhost' + + class TestingConfig(Config): + DB_SERVER = 'localhost' + DATABASE_URI = 'sqlite:///:memory:' + +There are many different ways and it's up to you how you want to manage +your configuration files. However here a list of good recommendations: + +- Keep a default configuration in version control. Either populate the + config with this default configuration or import it in your own + configuration files before overriding values. +- Use an environment variable to switch between the configurations. + This can be done from outside the Python interpreter and makes + development and deployment much easier because you can quickly and + easily switch between different configs without having to touch the + code at all. If you are working often on different projects you can + even create your own script for sourcing that activates a virtualenv + and exports the development configuration for you. +- Use a tool like `fabric`_ in production to push code and + configurations separately to the production server(s). For some + details about how to do that, head over to the + :doc:`/patterns/fabric` pattern. + +.. _fabric: https://www.fabfile.org/ + + +.. _instance-folders: + +Instance Folders +---------------- + +.. versionadded:: 0.8 + +Flask 0.8 introduces instance folders. Flask for a long time made it +possible to refer to paths relative to the application's folder directly +(via :attr:`Flask.root_path`). This was also how many developers loaded +configurations stored next to the application. Unfortunately however this +only works well if applications are not packages in which case the root +path refers to the contents of the package. + +With Flask 0.8 a new attribute was introduced: +:attr:`Flask.instance_path`. It refers to a new concept called the +“instance folder”. The instance folder is designed to not be under +version control and be deployment specific. It's the perfect place to +drop things that either change at runtime or configuration files. + +You can either explicitly provide the path of the instance folder when +creating the Flask application or you can let Flask autodetect the +instance folder. For explicit configuration use the `instance_path` +parameter:: + + app = Flask(__name__, instance_path='/path/to/instance/folder') + +Please keep in mind that this path *must* be absolute when provided. + +If the `instance_path` parameter is not provided the following default +locations are used: + +- Uninstalled module:: + + /myapp.py + /instance + +- Uninstalled package:: + + /myapp + /__init__.py + /instance + +- Installed module or package:: + + $PREFIX/lib/pythonX.Y/site-packages/myapp + $PREFIX/var/myapp-instance + + ``$PREFIX`` is the prefix of your Python installation. This can be + ``/usr`` or the path to your virtualenv. You can print the value of + ``sys.prefix`` to see what the prefix is set to. + +Since the config object provided loading of configuration files from +relative filenames we made it possible to change the loading via filenames +to be relative to the instance path if wanted. The behavior of relative +paths in config files can be flipped between “relative to the application +root” (the default) to “relative to instance folder” via the +`instance_relative_config` switch to the application constructor:: + + app = Flask(__name__, instance_relative_config=True) + +Here is a full example of how to configure Flask to preload the config +from a module and then override the config from a file in the instance +folder if it exists:: + + app = Flask(__name__, instance_relative_config=True) + app.config.from_object('yourapplication.default_settings') + app.config.from_pyfile('application.cfg', silent=True) + +The path to the instance folder can be found via the +:attr:`Flask.instance_path`. Flask also provides a shortcut to open a +file from the instance folder with :meth:`Flask.open_instance_resource`. + +Example usage for both:: + + filename = os.path.join(app.instance_path, 'application.cfg') + with open(filename) as f: + config = f.read() + + # or via open_instance_resource: + with app.open_instance_resource('application.cfg') as f: + config = f.read() diff --git a/testbed/pallets__flask/docs/contributing.rst b/testbed/pallets__flask/docs/contributing.rst new file mode 100644 index 0000000000000000000000000000000000000000..e582053ea018c369be05aae96cf730744f1dc616 --- /dev/null +++ b/testbed/pallets__flask/docs/contributing.rst @@ -0,0 +1 @@ +.. include:: ../CONTRIBUTING.rst diff --git a/testbed/pallets__flask/docs/debugging.rst b/testbed/pallets__flask/docs/debugging.rst new file mode 100644 index 0000000000000000000000000000000000000000..cd955312b8b7af6dba13f84961799a926b38a604 --- /dev/null +++ b/testbed/pallets__flask/docs/debugging.rst @@ -0,0 +1,147 @@ +Debugging Application Errors +============================ + + +In Production +------------- + +**Do not run the development server, or enable the built-in debugger, in +a production environment.** The debugger allows executing arbitrary +Python code from the browser. It's protected by a pin, but that should +not be relied on for security. + +Use an error logging tool, such as Sentry, as described in +:ref:`error-logging-tools`, or enable logging and notifications as +described in :doc:`/logging`. + +If you have access to the server, you could add some code to start an +external debugger if ``request.remote_addr`` matches your IP. Some IDE +debuggers also have a remote mode so breakpoints on the server can be +interacted with locally. Only enable a debugger temporarily. + + +The Built-In Debugger +--------------------- + +The built-in Werkzeug development server provides a debugger which shows +an interactive traceback in the browser when an unhandled error occurs +during a request. This debugger should only be used during development. + +.. image:: _static/debugger.png + :align: center + :class: screenshot + :alt: screenshot of debugger in action + +.. warning:: + + The debugger allows executing arbitrary Python code from the + browser. It is protected by a pin, but still represents a major + security risk. Do not run the development server or debugger in a + production environment. + +To enable the debugger, run the development server with the +``FLASK_ENV`` environment variable set to ``development``. This puts +Flask in debug mode, which changes how it handles some errors, and +enables the debugger and reloader. + +.. tabs:: + + .. group-tab:: Bash + + .. code-block:: text + + $ export FLASK_ENV=development + $ flask run + + .. group-tab:: Fish + + .. code-block:: text + + $ set -x FLASK_ENV development + $ flask run + + .. group-tab:: CMD + + .. code-block:: text + + > set FLASK_ENV=development + > flask run + + .. group-tab:: Powershell + + .. code-block:: text + + > $env:FLASK_ENV = "development" + > flask run + +``FLASK_ENV`` can only be set as an environment variable. When running +from Python code, passing ``debug=True`` enables debug mode, which is +mostly equivalent. Debug mode can be controlled separately from +``FLASK_ENV`` with the ``FLASK_DEBUG`` environment variable as well. + +.. code-block:: python + + app.run(debug=True) + +:doc:`/server` and :doc:`/cli` have more information about running the +debugger, debug mode, and development mode. More information about the +debugger can be found in the `Werkzeug documentation +`__. + + +External Debuggers +------------------ + +External debuggers, such as those provided by IDEs, can offer a more +powerful debugging experience than the built-in debugger. They can also +be used to step through code during a request before an error is raised, +or if no error is raised. Some even have a remote mode so you can debug +code running on another machine. + +When using an external debugger, the app should still be in debug mode, +but it can be useful to disable the built-in debugger and reloader, +which can interfere. + +When running from the command line: + +.. tabs:: + + .. group-tab:: Bash + + .. code-block:: text + + $ export FLASK_ENV=development + $ flask run --no-debugger --no-reload + + .. group-tab:: Fish + + .. code-block:: text + + $ set -x FLASK_ENV development + $ flask run --no-debugger --no-reload + + .. group-tab:: CMD + + .. code-block:: text + + > set FLASK_ENV=development + > flask run --no-debugger --no-reload + + .. group-tab:: Powershell + + .. code-block:: text + + > $env:FLASK_ENV = "development" + > flask run --no-debugger --no-reload + +When running from Python: + +.. code-block:: python + + app.run(debug=True, use_debugger=False, use_reloader=False) + +Disabling these isn't required, an external debugger will continue to +work with the following caveats. If the built-in debugger is not +disabled, it will catch unhandled exceptions before the external +debugger can. If the reloader is not disabled, it could cause an +unexpected reload if code changes during debugging. diff --git a/testbed/pallets__flask/docs/deploying/asgi.rst b/testbed/pallets__flask/docs/deploying/asgi.rst new file mode 100644 index 0000000000000000000000000000000000000000..39cd76b7298b5734ec4a9f09c174e026a1f97d61 --- /dev/null +++ b/testbed/pallets__flask/docs/deploying/asgi.rst @@ -0,0 +1,29 @@ +.. _asgi: + +ASGI +==== + +If you'd like to use an ASGI server you will need to utilise WSGI to +ASGI middleware. The asgiref +`WsgiToAsgi `_ +adapter is recommended as it integrates with the event loop used for +Flask's :ref:`async_await` support. You can use the adapter by +wrapping the Flask app, + +.. code-block:: python + + from asgiref.wsgi import WsgiToAsgi + from flask import Flask + + app = Flask(__name__) + + ... + + asgi_app = WsgiToAsgi(app) + +and then serving the ``asgi_app`` with the ASGI server, e.g. using +`Hypercorn `_, + +.. sourcecode:: text + + $ hypercorn module:asgi_app diff --git a/testbed/pallets__flask/docs/deploying/cgi.rst b/testbed/pallets__flask/docs/deploying/cgi.rst new file mode 100644 index 0000000000000000000000000000000000000000..4c1cdfbf0f3043db7d15212284e3f6c7f1a62586 --- /dev/null +++ b/testbed/pallets__flask/docs/deploying/cgi.rst @@ -0,0 +1,61 @@ +CGI +=== + +If all other deployment methods do not work, CGI will work for sure. +CGI is supported by all major servers but usually has a sub-optimal +performance. + +This is also the way you can use a Flask application on Google's `App +Engine`_, where execution happens in a CGI-like environment. + +.. admonition:: Watch Out + + Please make sure in advance that any ``app.run()`` calls you might + have in your application file are inside an ``if __name__ == + '__main__':`` block or moved to a separate file. Just make sure it's + not called because this will always start a local WSGI server which + we do not want if we deploy that application to CGI / app engine. + + With CGI, you will also have to make sure that your code does not contain + any ``print`` statements, or that ``sys.stdout`` is overridden by something + that doesn't write into the HTTP response. + +Creating a `.cgi` file +---------------------- + +First you need to create the CGI application file. Let's call it +:file:`yourapplication.cgi`:: + + #!/usr/bin/python + from wsgiref.handlers import CGIHandler + from yourapplication import app + + CGIHandler().run(app) + +Server Setup +------------ + +Usually there are two ways to configure the server. Either just copy the +``.cgi`` into a :file:`cgi-bin` (and use `mod_rewrite` or something similar to +rewrite the URL) or let the server point to the file directly. + +In Apache for example you can put something like this into the config: + +.. sourcecode:: apache + + ScriptAlias /app /path/to/the/application.cgi + +On shared webhosting, though, you might not have access to your Apache config. +In this case, a file called ``.htaccess``, sitting in the public directory +you want your app to be available, works too but the ``ScriptAlias`` directive +won't work in that case: + +.. sourcecode:: apache + + RewriteEngine On + RewriteCond %{REQUEST_FILENAME} !-f # Don't interfere with static files + RewriteRule ^(.*)$ /path/to/the/application.cgi/$1 [L] + +For more information consult the documentation of your webserver. + +.. _App Engine: https://cloud.google.com/appengine/docs/ diff --git a/testbed/pallets__flask/docs/deploying/fastcgi.rst b/testbed/pallets__flask/docs/deploying/fastcgi.rst new file mode 100644 index 0000000000000000000000000000000000000000..d3614d377df07323e1b6270dbde695a48aff298b --- /dev/null +++ b/testbed/pallets__flask/docs/deploying/fastcgi.rst @@ -0,0 +1,238 @@ +FastCGI +======= + +FastCGI is a deployment option on servers like `nginx`_, `lighttpd`_, and +`cherokee`_; see :doc:`uwsgi` and :doc:`wsgi-standalone` for other options. +To use your WSGI application with any of them you will need a FastCGI +server first. The most popular one is `flup`_ which we will use for +this guide. Make sure to have it installed to follow along. + +.. admonition:: Watch Out + + Please make sure in advance that any ``app.run()`` calls you might + have in your application file are inside an ``if __name__ == + '__main__':`` block or moved to a separate file. Just make sure it's + not called because this will always start a local WSGI server which + we do not want if we deploy that application to FastCGI. + +Creating a `.fcgi` file +----------------------- + +First you need to create the FastCGI server file. Let's call it +`yourapplication.fcgi`:: + + #!/usr/bin/python + from flup.server.fcgi import WSGIServer + from yourapplication import app + + if __name__ == '__main__': + WSGIServer(app).run() + +This is enough for Apache to work, however nginx and older versions of +lighttpd need a socket to be explicitly passed to communicate with the +FastCGI server. For that to work you need to pass the path to the +socket to the :class:`~flup.server.fcgi.WSGIServer`:: + + WSGIServer(application, bindAddress='/path/to/fcgi.sock').run() + +The path has to be the exact same path you define in the server +config. + +Save the :file:`yourapplication.fcgi` file somewhere you will find it again. +It makes sense to have that in :file:`/var/www/yourapplication` or something +similar. + +Make sure to set the executable bit on that file so that the servers +can execute it: + +.. sourcecode:: text + + $ chmod +x /var/www/yourapplication/yourapplication.fcgi + +Configuring Apache +------------------ + +The example above is good enough for a basic Apache deployment but your +`.fcgi` file will appear in your application URL e.g. +``example.com/yourapplication.fcgi/news/``. There are few ways to configure +your application so that yourapplication.fcgi does not appear in the URL. +A preferable way is to use the ScriptAlias and SetHandler configuration +directives to route requests to the FastCGI server. The following example +uses FastCgiServer to start 5 instances of the application which will +handle all incoming requests:: + + LoadModule fastcgi_module /usr/lib64/httpd/modules/mod_fastcgi.so + + FastCgiServer /var/www/html/yourapplication/app.fcgi -idle-timeout 300 -processes 5 + + + ServerName webapp1.mydomain.com + DocumentRoot /var/www/html/yourapplication + + AddHandler fastcgi-script fcgi + ScriptAlias / /var/www/html/yourapplication/app.fcgi/ + + + SetHandler fastcgi-script + + + +These processes will be managed by Apache. If you're using a standalone +FastCGI server, you can use the FastCgiExternalServer directive instead. +Note that in the following the path is not real, it's simply used as an +identifier to other +directives such as AliasMatch:: + + FastCgiServer /var/www/html/yourapplication -host 127.0.0.1:3000 + +If you cannot set ScriptAlias, for example on a shared web host, you can use +WSGI middleware to remove yourapplication.fcgi from the URLs. Set .htaccess:: + + + AddHandler fcgid-script .fcgi + + SetHandler fcgid-script + Options +FollowSymLinks +ExecCGI + + + + + Options +FollowSymlinks + RewriteEngine On + RewriteBase / + RewriteCond %{REQUEST_FILENAME} !-f + RewriteRule ^(.*)$ yourapplication.fcgi/$1 [QSA,L] + + +Set yourapplication.fcgi:: + + #!/usr/bin/python + #: optional path to your local python site-packages folder + import sys + sys.path.insert(0, '/lib/python/site-packages') + + from flup.server.fcgi import WSGIServer + from yourapplication import app + + class ScriptNameStripper(object): + def __init__(self, app): + self.app = app + + def __call__(self, environ, start_response): + environ['SCRIPT_NAME'] = '' + return self.app(environ, start_response) + + app = ScriptNameStripper(app) + + if __name__ == '__main__': + WSGIServer(app).run() + +Configuring lighttpd +-------------------- + +A basic FastCGI configuration for lighttpd looks like that:: + + fastcgi.server = ("/yourapplication.fcgi" => + (( + "socket" => "/tmp/yourapplication-fcgi.sock", + "bin-path" => "/var/www/yourapplication/yourapplication.fcgi", + "check-local" => "disable", + "max-procs" => 1 + )) + ) + + alias.url = ( + "/static/" => "/path/to/your/static/" + ) + + url.rewrite-once = ( + "^(/static($|/.*))$" => "$1", + "^(/.*)$" => "/yourapplication.fcgi$1" + ) + +Remember to enable the FastCGI, alias and rewrite modules. This configuration +binds the application to ``/yourapplication``. If you want the application to +work in the URL root you have to work around a lighttpd bug with the +:class:`~werkzeug.contrib.fixers.LighttpdCGIRootFix` middleware. + +Make sure to apply it only if you are mounting the application the URL +root. Also, see the Lighty docs for more information on `FastCGI and Python +`_ (note that +explicitly passing a socket to run() is no longer necessary). + +Configuring nginx +----------------- + +Installing FastCGI applications on nginx is a bit different because by +default no FastCGI parameters are forwarded. + +A basic Flask FastCGI configuration for nginx looks like this:: + + location = /yourapplication { rewrite ^ /yourapplication/ last; } + location /yourapplication { try_files $uri @yourapplication; } + location @yourapplication { + include fastcgi_params; + fastcgi_split_path_info ^(/yourapplication)(.*)$; + fastcgi_param PATH_INFO $fastcgi_path_info; + fastcgi_param SCRIPT_NAME $fastcgi_script_name; + fastcgi_pass unix:/tmp/yourapplication-fcgi.sock; + } + +This configuration binds the application to ``/yourapplication``. If you +want to have it in the URL root it's a bit simpler because you don't +have to figure out how to calculate ``PATH_INFO`` and ``SCRIPT_NAME``:: + + location / { try_files $uri @yourapplication; } + location @yourapplication { + include fastcgi_params; + fastcgi_param PATH_INFO $fastcgi_script_name; + fastcgi_param SCRIPT_NAME ""; + fastcgi_pass unix:/tmp/yourapplication-fcgi.sock; + } + +Running FastCGI Processes +------------------------- + +Since nginx and others do not load FastCGI apps, you have to do it by +yourself. `Supervisor can manage FastCGI processes. +`_ +You can look around for other FastCGI process managers or write a script +to run your `.fcgi` file at boot, e.g. using a SysV ``init.d`` script. +For a temporary solution, you can always run the ``.fcgi`` script inside +GNU screen. See ``man screen`` for details, and note that this is a +manual solution which does not persist across system restart:: + + $ screen + $ /var/www/yourapplication/yourapplication.fcgi + +Debugging +--------- + +FastCGI deployments tend to be hard to debug on most web servers. Very +often the only thing the server log tells you is something along the +lines of "premature end of headers". In order to debug the application +the only thing that can really give you ideas why it breaks is switching +to the correct user and executing the application by hand. + +This example assumes your application is called `application.fcgi` and +that your web server user is `www-data`:: + + $ su www-data + $ cd /var/www/yourapplication + $ python application.fcgi + Traceback (most recent call last): + File "yourapplication.fcgi", line 4, in + ImportError: No module named yourapplication + +In this case the error seems to be "yourapplication" not being on the +python path. Common problems are: + +- Relative paths being used. Don't rely on the current working directory. +- The code depending on environment variables that are not set by the + web server. +- Different python interpreters being used. + +.. _nginx: https://nginx.org/ +.. _lighttpd: https://www.lighttpd.net/ +.. _cherokee: https://cherokee-project.com/ +.. _flup: https://pypi.org/project/flup/ diff --git a/testbed/pallets__flask/docs/deploying/index.rst b/testbed/pallets__flask/docs/deploying/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..e1ed92694497d7f014a8d8950c50289950b36ebf --- /dev/null +++ b/testbed/pallets__flask/docs/deploying/index.rst @@ -0,0 +1,35 @@ +Deployment Options +================== + +While lightweight and easy to use, **Flask's built-in server is not suitable +for production** as it doesn't scale well. Some of the options available for +properly running Flask in production are documented here. + +If you want to deploy your Flask application to a WSGI server not listed here, +look up the server documentation about how to use a WSGI app with it. Just +remember that your :class:`Flask` application object is the actual WSGI +application. + + +Hosted options +-------------- + +- `Deploying Flask on Heroku `_ +- `Deploying Flask on Google App Engine `_ +- `Deploying Flask on Google Cloud Run `_ +- `Deploying Flask on AWS Elastic Beanstalk `_ +- `Deploying on Azure (IIS) `_ +- `Deploying on PythonAnywhere `_ + +Self-hosted options +------------------- + +.. toctree:: + :maxdepth: 2 + + wsgi-standalone + uwsgi + mod_wsgi + fastcgi + cgi + asgi diff --git a/testbed/pallets__flask/docs/deploying/mod_wsgi.rst b/testbed/pallets__flask/docs/deploying/mod_wsgi.rst new file mode 100644 index 0000000000000000000000000000000000000000..801dfa5c1f874a81832c67a32b2e5a02cf268c03 --- /dev/null +++ b/testbed/pallets__flask/docs/deploying/mod_wsgi.rst @@ -0,0 +1,216 @@ +mod_wsgi (Apache) +================= + +If you are using the `Apache`_ webserver, consider using `mod_wsgi`_. + +.. admonition:: Watch Out + + Please make sure in advance that any ``app.run()`` calls you might + have in your application file are inside an ``if __name__ == + '__main__':`` block or moved to a separate file. Just make sure it's + not called because this will always start a local WSGI server which + we do not want if we deploy that application to mod_wsgi. + +.. _Apache: https://httpd.apache.org/ + +Installing `mod_wsgi` +--------------------- + +If you don't have `mod_wsgi` installed yet you have to either install it +using a package manager or compile it yourself. The mod_wsgi +`installation instructions`_ cover source installations on UNIX systems. + +If you are using Ubuntu/Debian you can apt-get it and activate it as +follows: + +.. sourcecode:: text + + $ apt-get install libapache2-mod-wsgi-py3 + +If you are using a yum based distribution (Fedora, OpenSUSE, etc..) you +can install it as follows: + +.. sourcecode:: text + + $ yum install mod_wsgi + +On FreeBSD install `mod_wsgi` by compiling the `www/mod_wsgi` port or by +using pkg_add: + +.. sourcecode:: text + + $ pkg install ap24-py37-mod_wsgi + +If you are using pkgsrc you can install `mod_wsgi` by compiling the +`www/ap2-wsgi` package. + +If you encounter segfaulting child processes after the first apache +reload you can safely ignore them. Just restart the server. + +Creating a `.wsgi` file +----------------------- + +To run your application you need a :file:`yourapplication.wsgi` file. +This file contains the code `mod_wsgi` is executing on startup +to get the application object. The object called `application` +in that file is then used as application. + +For most applications the following file should be sufficient:: + + from yourapplication import app as application + +If a factory function is used in a :file:`__init__.py` file, then the function should be imported:: + + from yourapplication import create_app + application = create_app() + +If you don't have a factory function for application creation but a singleton +instance you can directly import that one as `application`. + +Store that file somewhere that you will find it again (e.g.: +:file:`/var/www/yourapplication`) and make sure that `yourapplication` and all +the libraries that are in use are on the python load path. If you don't +want to install it system wide consider using a `virtual python`_ +instance. Keep in mind that you will have to actually install your +application into the virtualenv as well. Alternatively there is the +option to just patch the path in the ``.wsgi`` file before the import:: + + import sys + sys.path.insert(0, '/path/to/the/application') + +Configuring Apache +------------------ + +The last thing you have to do is to create an Apache configuration file +for your application. In this example we are telling `mod_wsgi` to +execute the application under a different user for security reasons: + +.. sourcecode:: apache + + + ServerName example.com + + WSGIDaemonProcess yourapplication user=user1 group=group1 threads=5 + WSGIScriptAlias / /var/www/yourapplication/yourapplication.wsgi + + + WSGIProcessGroup yourapplication + WSGIApplicationGroup %{GLOBAL} + Order deny,allow + Allow from all + + + +Note: WSGIDaemonProcess isn't implemented in Windows and Apache will +refuse to run with the above configuration. On a Windows system, eliminate those lines: + +.. sourcecode:: apache + + + ServerName example.com + WSGIScriptAlias / C:\yourdir\yourapp.wsgi + + Order deny,allow + Allow from all + + + +Note: There have been some changes in access control configuration +for `Apache 2.4`_. + +.. _Apache 2.4: https://httpd.apache.org/docs/trunk/upgrading.html + +Most notably, the syntax for directory permissions has changed from httpd 2.2 + +.. sourcecode:: apache + + Order allow,deny + Allow from all + +to httpd 2.4 syntax + +.. sourcecode:: apache + + Require all granted + + +For more information consult the `mod_wsgi documentation`_. + +.. _mod_wsgi: https://github.com/GrahamDumpleton/mod_wsgi +.. _installation instructions: https://modwsgi.readthedocs.io/en/develop/installation.html +.. _virtual python: https://pypi.org/project/virtualenv/ +.. _mod_wsgi documentation: https://modwsgi.readthedocs.io/en/develop/index.html + +Troubleshooting +--------------- + +If your application does not run, follow this guide to troubleshoot: + +**Problem:** application does not run, errorlog shows SystemExit ignored + You have an ``app.run()`` call in your application file that is not + guarded by an ``if __name__ == '__main__':`` condition. Either + remove that :meth:`~flask.Flask.run` call from the file and move it + into a separate :file:`run.py` file or put it into such an if block. + +**Problem:** application gives permission errors + Probably caused by your application running as the wrong user. Make + sure the folders the application needs access to have the proper + privileges set and the application runs as the correct user + (``user`` and ``group`` parameter to the `WSGIDaemonProcess` + directive) + +**Problem:** application dies with an error on print + Keep in mind that mod_wsgi disallows doing anything with + :data:`sys.stdout` and :data:`sys.stderr`. You can disable this + protection from the config by setting the `WSGIRestrictStdout` to + ``off``: + + .. sourcecode:: apache + + WSGIRestrictStdout Off + + Alternatively you can also replace the standard out in the .wsgi file + with a different stream:: + + import sys + sys.stdout = sys.stderr + +**Problem:** accessing resources gives IO errors + Your application probably is a single .py file you symlinked into + the site-packages folder. Please be aware that this does not work, + instead you either have to put the folder into the pythonpath the + file is stored in, or convert your application into a package. + + The reason for this is that for non-installed packages, the module + filename is used to locate the resources and for symlinks the wrong + filename is picked up. + +Support for Automatic Reloading +------------------------------- + +To help deployment tools you can activate support for automatic +reloading. Whenever something changes the ``.wsgi`` file, `mod_wsgi` will +reload all the daemon processes for us. + +For that, just add the following directive to your `Directory` section: + +.. sourcecode:: apache + + WSGIScriptReloading On + +Working with Virtual Environments +--------------------------------- + +Virtual environments have the advantage that they never install the +required dependencies system wide so you have a better control over what +is used where. If you want to use a virtual environment with mod_wsgi +you have to modify your ``.wsgi`` file slightly. + +Add the following lines to the top of your ``.wsgi`` file:: + + activate_this = '/path/to/env/bin/activate_this.py' + with open(activate_this) as file_: + exec(file_.read(), dict(__file__=activate_this)) + +This sets up the load paths according to the settings of the virtual +environment. Keep in mind that the path has to be absolute. diff --git a/testbed/pallets__flask/docs/deploying/uwsgi.rst b/testbed/pallets__flask/docs/deploying/uwsgi.rst new file mode 100644 index 0000000000000000000000000000000000000000..b6958dc09dccb90e63e6b310cede760e26daf5c7 --- /dev/null +++ b/testbed/pallets__flask/docs/deploying/uwsgi.rst @@ -0,0 +1,71 @@ +uWSGI +===== + +uWSGI is a deployment option on servers like `nginx`_, `lighttpd`_, and +`cherokee`_; see :doc:`fastcgi` and :doc:`wsgi-standalone` for other options. +To use your WSGI application with uWSGI protocol you will need a uWSGI server +first. uWSGI is both a protocol and an application server; the application +server can serve uWSGI, FastCGI, and HTTP protocols. + +The most popular uWSGI server is `uwsgi`_, which we will use for this +guide. Make sure to have it installed to follow along. + +.. admonition:: Watch Out + + Please make sure in advance that any ``app.run()`` calls you might + have in your application file are inside an ``if __name__ == + '__main__':`` block or moved to a separate file. Just make sure it's + not called because this will always start a local WSGI server which + we do not want if we deploy that application to uWSGI. + +Starting your app with uwsgi +---------------------------- + +`uwsgi` is designed to operate on WSGI callables found in python modules. + +Given a flask application in myapp.py, use the following command: + +.. sourcecode:: text + + $ uwsgi -s /tmp/yourapplication.sock --manage-script-name --mount /yourapplication=myapp:app + +The ``--manage-script-name`` will move the handling of ``SCRIPT_NAME`` +to uwsgi, since it is smarter about that. +It is used together with the ``--mount`` directive which will make +requests to ``/yourapplication`` be directed to ``myapp:app``. +If your application is accessible at root level, you can use a +single ``/`` instead of ``/yourapplication``. ``myapp`` refers to the name of +the file of your flask application (without extension) or the module which +provides ``app``. ``app`` is the callable inside of your application (usually +the line reads ``app = Flask(__name__)``). + +If you want to deploy your flask application inside of a virtual environment, +you need to also add ``--virtualenv /path/to/virtual/environment``. You might +also need to add ``--plugin python`` or ``--plugin python3`` depending on which +python version you use for your project. + +Configuring nginx +----------------- + +A basic flask nginx configuration looks like this:: + + location = /yourapplication { rewrite ^ /yourapplication/; } + location /yourapplication { try_files $uri @yourapplication; } + location @yourapplication { + include uwsgi_params; + uwsgi_pass unix:/tmp/yourapplication.sock; + } + +This configuration binds the application to ``/yourapplication``. If you want +to have it in the URL root its a bit simpler:: + + location / { try_files $uri @yourapplication; } + location @yourapplication { + include uwsgi_params; + uwsgi_pass unix:/tmp/yourapplication.sock; + } + +.. _nginx: https://nginx.org/ +.. _lighttpd: https://www.lighttpd.net/ +.. _cherokee: https://cherokee-project.com/ +.. _uwsgi: https://uwsgi-docs.readthedocs.io/en/latest/ diff --git a/testbed/pallets__flask/docs/deploying/wsgi-standalone.rst b/testbed/pallets__flask/docs/deploying/wsgi-standalone.rst new file mode 100644 index 0000000000000000000000000000000000000000..823426ac5d2192192efcd6ce3128b2e42cd389a0 --- /dev/null +++ b/testbed/pallets__flask/docs/deploying/wsgi-standalone.rst @@ -0,0 +1,262 @@ +Standalone WSGI Servers +======================= + +Most WSGI servers also provide HTTP servers, so they can run a WSGI +application and make it available externally. + +It may still be a good idea to run the server behind a dedicated HTTP +server such as Apache or Nginx. See :ref:`deploying-proxy-setups` if you +run into issues with that. + + +Gunicorn +-------- + +`Gunicorn`_ is a WSGI and HTTP server for UNIX. To run a Flask +application, tell Gunicorn how to import your Flask app object. + +.. code-block:: text + + $ gunicorn -w 4 -b 0.0.0.0:5000 your_project:app + +The ``-w 4`` option uses 4 workers to handle 4 requests at once. The +``-b 0.0.0.0:5000`` serves the application on all interfaces on port +5000. + +Gunicorn provides many options for configuring the server, either +through a configuration file or with command line options. Use +``gunicorn --help`` or see the docs for more information. + +The command expects the name of your module or package to import and +the application instance within the module. If you use the application +factory pattern, you can pass a call to that. + +.. code-block:: text + + $ gunicorn -w 4 -b 0.0.0.0:5000 "myproject:create_app()" + + +Async with Gevent or Eventlet +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The default sync worker is appropriate for many use cases. If you need +asynchronous support, Gunicorn provides workers using either `gevent`_ +or `eventlet`_. This is not the same as Python's ``async/await``, or the +ASGI server spec. + +When using either gevent or eventlet, greenlet>=1.0 is required, +otherwise context locals such as ``request`` will not work as expected. +When using PyPy, PyPy>=7.3.7 is required. + +To use gevent: + +.. code-block:: text + + $ gunicorn -k gevent -b 0.0.0.0:5000 your_project:app + +To use eventlet: + +.. code-block:: text + + $ gunicorn -k eventlet -b 0.0.0.0:5000 your_project:app + + +.. _Gunicorn: https://gunicorn.org/ +.. _gevent: http://www.gevent.org/ +.. _eventlet: https://eventlet.net/ +.. _greenlet: https://greenlet.readthedocs.io/en/latest/ + + +uWSGI +----- + +`uWSGI`_ is a fast application server written in C. It is very +configurable, which makes it more complicated to setup than Gunicorn. +It also provides many other utilities for writing robust web +applications. To run a Flask application, tell Gunicorn how to import +your Flask app object. + +.. code-block:: text + + $ uwsgi --master -p 4 --http 0.0.0.0:5000 -w your_project:app + +The ``-p 4`` option uses 4 workers to handle 4 requests at once. The +``--http 0.0.0.0:5000`` serves the application on all interfaces on port +5000. + +uWSGI has optimized integration with Nginx and Apache instead of using +a standard HTTP proxy. See :doc:`configuring uWSGI and Nginx `. + + +Async with Gevent +~~~~~~~~~~~~~~~~~ + +The default sync worker is appropriate for many use cases. If you need +asynchronous support, uWSGI provides workers using `gevent`_. It also +supports other async modes, see the docs for more information. This is +not the same as Python's ``async/await``, or the ASGI server spec. + +When using gevent, greenlet>=1.0 is required, otherwise context locals +such as ``request`` will not work as expected. When using PyPy, +PyPy>=7.3.7 is required. + +.. code-block:: text + + $ uwsgi --master --gevent 100 --http 0.0.0.0:5000 -w your_project:app + +.. _uWSGI: https://uwsgi-docs.readthedocs.io/en/latest/ + + +Gevent +------ + +Prefer using `Gunicorn`_ with Gevent workers rather than using Gevent +directly. Gunicorn provides a much more configurable and +production-tested server. See the section on Gunicorn above. + +`Gevent`_ allows writing asynchronous, coroutine-based code that looks +like standard synchronous Python. It uses `greenlet`_ to enable task +switching without writing ``async/await`` or using ``asyncio``. + +It provides a WSGI server that can handle many connections at once +instead of one per worker process. + +`Eventlet`_, described below, is another library that does the same +thing. Certain dependencies you have, or other consideration, may affect +which of the two you choose to use + +To use gevent to serve your application, import its ``WSGIServer`` and +use it to run your ``app``. + +.. code-block:: python + + from gevent.pywsgi import WSGIServer + from your_project import app + + http_server = WSGIServer(("", 5000), app) + http_server.serve_forever() + + +Eventlet +-------- + +Prefer using `Gunicorn`_ with Eventlet workers rather than using +Eventlet directly. Gunicorn provides a much more configurable and +production-tested server. See the section on Gunicorn above. + +`Eventlet`_ allows writing asynchronous, coroutine-based code that looks +like standard synchronous Python. It uses `greenlet`_ to enable task +switching without writing ``async/await`` or using ``asyncio``. + +It provides a WSGI server that can handle many connections at once +instead of one per worker process. + +`Gevent`_, described above, is another library that does the same +thing. Certain dependencies you have, or other consideration, may affect +which of the two you choose to use + +To use eventlet to serve your application, import its ``wsgi.server`` +and use it to run your ``app``. + +.. code-block:: python + + import eventlet + from eventlet import wsgi + from your_project import app + + wsgi.server(eventlet.listen(("", 5000), app) + + +Twisted Web +----------- + +`Twisted Web`_ is the web server shipped with `Twisted`_, a mature, +non-blocking event-driven networking library. Twisted Web comes with a +standard WSGI container which can be controlled from the command line using +the ``twistd`` utility: + +.. code-block:: text + + $ twistd web --wsgi myproject.app + +This example will run a Flask application called ``app`` from a module named +``myproject``. + +Twisted Web supports many flags and options, and the ``twistd`` utility does +as well; see ``twistd -h`` and ``twistd web -h`` for more information. For +example, to run a Twisted Web server in the foreground, on port 8080, with an +application from ``myproject``: + +.. code-block:: text + + $ twistd -n web --port tcp:8080 --wsgi myproject.app + +.. _Twisted: https://twistedmatrix.com/trac/ +.. _Twisted Web: https://twistedmatrix.com/trac/wiki/TwistedWeb + + +.. _deploying-proxy-setups: + +Proxy Setups +------------ + +If you deploy your application using one of these servers behind an HTTP proxy +you will need to rewrite a few headers in order for the application to work. +The two problematic values in the WSGI environment usually are ``REMOTE_ADDR`` +and ``HTTP_HOST``. You can configure your httpd to pass these headers, or you +can fix them in middleware. Werkzeug ships a fixer that will solve some common +setups, but you might want to write your own WSGI middleware for specific +setups. + +Here's a simple nginx configuration which proxies to an application served on +localhost at port 8000, setting appropriate headers: + +.. sourcecode:: nginx + + server { + listen 80; + + server_name _; + + access_log /var/log/nginx/access.log; + error_log /var/log/nginx/error.log; + + location / { + proxy_pass http://127.0.0.1:8000/; + proxy_redirect off; + + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + } + +If your httpd is not providing these headers, the most common setup invokes the +host being set from ``X-Forwarded-Host`` and the remote address from +``X-Forwarded-For``:: + + from werkzeug.middleware.proxy_fix import ProxyFix + app.wsgi_app = ProxyFix(app.wsgi_app, x_proto=1, x_host=1) + +.. admonition:: Trusting Headers + + Please keep in mind that it is a security issue to use such a middleware in + a non-proxy setup because it will blindly trust the incoming headers which + might be forged by malicious clients. + +If you want to rewrite the headers from another header, you might want to +use a fixer like this:: + + class CustomProxyFix(object): + + def __init__(self, app): + self.app = app + + def __call__(self, environ, start_response): + host = environ.get('HTTP_X_FHOST', '') + if host: + environ['HTTP_HOST'] = host + return self.app(environ, start_response) + + app.wsgi_app = CustomProxyFix(app.wsgi_app) diff --git a/testbed/pallets__flask/docs/design.rst b/testbed/pallets__flask/docs/design.rst new file mode 100644 index 0000000000000000000000000000000000000000..5d57063e20456b763f59eaeab23e1e75a46184bc --- /dev/null +++ b/testbed/pallets__flask/docs/design.rst @@ -0,0 +1,208 @@ +Design Decisions in Flask +========================= + +If you are curious why Flask does certain things the way it does and not +differently, this section is for you. This should give you an idea about +some of the design decisions that may appear arbitrary and surprising at +first, especially in direct comparison with other frameworks. + + +The Explicit Application Object +------------------------------- + +A Python web application based on WSGI has to have one central callable +object that implements the actual application. In Flask this is an +instance of the :class:`~flask.Flask` class. Each Flask application has +to create an instance of this class itself and pass it the name of the +module, but why can't Flask do that itself? + +Without such an explicit application object the following code:: + + from flask import Flask + app = Flask(__name__) + + @app.route('/') + def index(): + return 'Hello World!' + +Would look like this instead:: + + from hypothetical_flask import route + + @route('/') + def index(): + return 'Hello World!' + +There are three major reasons for this. The most important one is that +implicit application objects require that there may only be one instance at +the time. There are ways to fake multiple applications with a single +application object, like maintaining a stack of applications, but this +causes some problems I won't outline here in detail. Now the question is: +when does a microframework need more than one application at the same +time? A good example for this is unit testing. When you want to test +something it can be very helpful to create a minimal application to test +specific behavior. When the application object is deleted everything it +allocated will be freed again. + +Another thing that becomes possible when you have an explicit object lying +around in your code is that you can subclass the base class +(:class:`~flask.Flask`) to alter specific behavior. This would not be +possible without hacks if the object were created ahead of time for you +based on a class that is not exposed to you. + +But there is another very important reason why Flask depends on an +explicit instantiation of that class: the package name. Whenever you +create a Flask instance you usually pass it `__name__` as package name. +Flask depends on that information to properly load resources relative +to your module. With Python's outstanding support for reflection it can +then access the package to figure out where the templates and static files +are stored (see :meth:`~flask.Flask.open_resource`). Now obviously there +are frameworks around that do not need any configuration and will still be +able to load templates relative to your application module. But they have +to use the current working directory for that, which is a very unreliable +way to determine where the application is. The current working directory +is process-wide and if you are running multiple applications in one +process (which could happen in a webserver without you knowing) the paths +will be off. Worse: many webservers do not set the working directory to +the directory of your application but to the document root which does not +have to be the same folder. + +The third reason is "explicit is better than implicit". That object is +your WSGI application, you don't have to remember anything else. If you +want to apply a WSGI middleware, just wrap it and you're done (though +there are better ways to do that so that you do not lose the reference +to the application object :meth:`~flask.Flask.wsgi_app`). + +Furthermore this design makes it possible to use a factory function to +create the application which is very helpful for unit testing and similar +things (:doc:`/patterns/appfactories`). + +The Routing System +------------------ + +Flask uses the Werkzeug routing system which was designed to +automatically order routes by complexity. This means that you can declare +routes in arbitrary order and they will still work as expected. This is a +requirement if you want to properly implement decorator based routing +since decorators could be fired in undefined order when the application is +split into multiple modules. + +Another design decision with the Werkzeug routing system is that routes +in Werkzeug try to ensure that URLs are unique. Werkzeug will go quite far +with that in that it will automatically redirect to a canonical URL if a route +is ambiguous. + + +One Template Engine +------------------- + +Flask decides on one template engine: Jinja2. Why doesn't Flask have a +pluggable template engine interface? You can obviously use a different +template engine, but Flask will still configure Jinja2 for you. While +that limitation that Jinja2 is *always* configured will probably go away, +the decision to bundle one template engine and use that will not. + +Template engines are like programming languages and each of those engines +has a certain understanding about how things work. On the surface they +all work the same: you tell the engine to evaluate a template with a set +of variables and take the return value as string. + +But that's about where similarities end. Jinja2 for example has an +extensive filter system, a certain way to do template inheritance, +support for reusable blocks (macros) that can be used from inside +templates and also from Python code, supports iterative template +rendering, configurable syntax and more. On the other hand an engine +like Genshi is based on XML stream evaluation, template inheritance by +taking the availability of XPath into account and more. Mako on the +other hand treats templates similar to Python modules. + +When it comes to connecting a template engine with an application or +framework there is more than just rendering templates. For instance, +Flask uses Jinja2's extensive autoescaping support. Also it provides +ways to access macros from Jinja2 templates. + +A template abstraction layer that would not take the unique features of +the template engines away is a science on its own and a too large +undertaking for a microframework like Flask. + +Furthermore extensions can then easily depend on one template language +being present. You can easily use your own templating language, but an +extension could still depend on Jinja itself. + + +Micro with Dependencies +----------------------- + +Why does Flask call itself a microframework and yet it depends on two +libraries (namely Werkzeug and Jinja2). Why shouldn't it? If we look +over to the Ruby side of web development there we have a protocol very +similar to WSGI. Just that it's called Rack there, but besides that it +looks very much like a WSGI rendition for Ruby. But nearly all +applications in Ruby land do not work with Rack directly, but on top of a +library with the same name. This Rack library has two equivalents in +Python: WebOb (formerly Paste) and Werkzeug. Paste is still around but +from my understanding it's sort of deprecated in favour of WebOb. The +development of WebOb and Werkzeug started side by side with similar ideas +in mind: be a good implementation of WSGI for other applications to take +advantage. + +Flask is a framework that takes advantage of the work already done by +Werkzeug to properly interface WSGI (which can be a complex task at +times). Thanks to recent developments in the Python package +infrastructure, packages with dependencies are no longer an issue and +there are very few reasons against having libraries that depend on others. + + +Thread Locals +------------- + +Flask uses thread local objects (context local objects in fact, they +support greenlet contexts as well) for request, session and an extra +object you can put your own things on (:data:`~flask.g`). Why is that and +isn't that a bad idea? + +Yes it is usually not such a bright idea to use thread locals. They cause +troubles for servers that are not based on the concept of threads and make +large applications harder to maintain. However Flask is just not designed +for large applications or asynchronous servers. Flask wants to make it +quick and easy to write a traditional web application. + +Also see the :doc:`/becomingbig` section of the documentation for some +inspiration for larger applications based on Flask. + + +Async/await and ASGI support +---------------------------- + +Flask supports ``async`` coroutines for view functions by executing the +coroutine on a separate thread instead of using an event loop on the +main thread as an async-first (ASGI) framework would. This is necessary +for Flask to remain backwards compatible with extensions and code built +before ``async`` was introduced into Python. This compromise introduces +a performance cost compared with the ASGI frameworks, due to the +overhead of the threads. + +Due to how tied to WSGI Flask's code is, it's not clear if it's possible +to make the ``Flask`` class support ASGI and WSGI at the same time. Work +is currently being done in Werkzeug to work with ASGI, which may +eventually enable support in Flask as well. + +See :doc:`/async-await` for more discussion. + + +What Flask is, What Flask is Not +-------------------------------- + +Flask will never have a database layer. It will not have a form library +or anything else in that direction. Flask itself just bridges to Werkzeug +to implement a proper WSGI application and to Jinja2 to handle templating. +It also binds to a few common standard library packages such as logging. +Everything else is up for extensions. + +Why is this the case? Because people have different preferences and +requirements and Flask could not meet those if it would force any of this +into the core. The majority of web applications will need a template +engine in some sort. However not every application needs a SQL database. + +The idea of Flask is to build a good foundation for all applications. +Everything else is up to you or extensions. diff --git a/testbed/pallets__flask/docs/errorhandling.rst b/testbed/pallets__flask/docs/errorhandling.rst new file mode 100644 index 0000000000000000000000000000000000000000..4aa512eaad913e165d8048762f056477a3a01f58 --- /dev/null +++ b/testbed/pallets__flask/docs/errorhandling.rst @@ -0,0 +1,524 @@ +Handling Application Errors +=========================== + +Applications fail, servers fail. Sooner or later you will see an exception +in production. Even if your code is 100% correct, you will still see +exceptions from time to time. Why? Because everything else involved will +fail. Here are some situations where perfectly fine code can lead to server +errors: + +- the client terminated the request early and the application was still + reading from the incoming data +- the database server was overloaded and could not handle the query +- a filesystem is full +- a harddrive crashed +- a backend server overloaded +- a programming error in a library you are using +- network connection of the server to another system failed + +And that's just a small sample of issues you could be facing. So how do we +deal with that sort of problem? By default if your application runs in +production mode, and an exception is raised Flask will display a very simple +page for you and log the exception to the :attr:`~flask.Flask.logger`. + +But there is more you can do, and we will cover some better setups to deal +with errors including custom exceptions and 3rd party tools. + + +.. _error-logging-tools: + +Error Logging Tools +------------------- + +Sending error mails, even if just for critical ones, can become +overwhelming if enough users are hitting the error and log files are +typically never looked at. This is why we recommend using `Sentry +`_ for dealing with application errors. It's +available as a source-available project `on GitHub +`_ and is also available as a `hosted version +`_ which you can try for free. Sentry +aggregates duplicate errors, captures the full stack trace and local +variables for debugging, and sends you mails based on new errors or +frequency thresholds. + +To use Sentry you need to install the ``sentry-sdk`` client with extra +``flask`` dependencies. + +.. code-block:: text + + $ pip install sentry-sdk[flask] + +And then add this to your Flask app: + +.. code-block:: python + + import sentry_sdk + from sentry_sdk.integrations.flask import FlaskIntegration + + sentry_sdk.init('YOUR_DSN_HERE', integrations=[FlaskIntegration()]) + +The ``YOUR_DSN_HERE`` value needs to be replaced with the DSN value you +get from your Sentry installation. + +After installation, failures leading to an Internal Server Error +are automatically reported to Sentry and from there you can +receive error notifications. + +See also: + +- Sentry also supports catching errors from a worker queue + (RQ, Celery, etc.) in a similar fashion. See the `Python SDK docs + `__ for more information. +- `Getting started with Sentry `__ +- `Flask-specific documentation `__ + + +Error Handlers +-------------- + +When an error occurs in Flask, an appropriate `HTTP status code +`__ will be +returned. 400-499 indicate errors with the client's request data, or +about the data requested. 500-599 indicate errors with the server or +application itself. + +You might want to show custom error pages to the user when an error occurs. +This can be done by registering error handlers. + +An error handler is a function that returns a response when a type of error is +raised, similar to how a view is a function that returns a response when a +request URL is matched. It is passed the instance of the error being handled, +which is most likely a :exc:`~werkzeug.exceptions.HTTPException`. + +The status code of the response will not be set to the handler's code. Make +sure to provide the appropriate HTTP status code when returning a response from +a handler. + + +Registering +``````````` + +Register handlers by decorating a function with +:meth:`~flask.Flask.errorhandler`. Or use +:meth:`~flask.Flask.register_error_handler` to register the function later. +Remember to set the error code when returning the response. + +.. code-block:: python + + @app.errorhandler(werkzeug.exceptions.BadRequest) + def handle_bad_request(e): + return 'bad request!', 400 + + # or, without the decorator + app.register_error_handler(400, handle_bad_request) + +:exc:`werkzeug.exceptions.HTTPException` subclasses like +:exc:`~werkzeug.exceptions.BadRequest` and their HTTP codes are interchangeable +when registering handlers. (``BadRequest.code == 400``) + +Non-standard HTTP codes cannot be registered by code because they are not known +by Werkzeug. Instead, define a subclass of +:class:`~werkzeug.exceptions.HTTPException` with the appropriate code and +register and raise that exception class. + +.. code-block:: python + + class InsufficientStorage(werkzeug.exceptions.HTTPException): + code = 507 + description = 'Not enough storage space.' + + app.register_error_handler(InsufficientStorage, handle_507) + + raise InsufficientStorage() + +Handlers can be registered for any exception class, not just +:exc:`~werkzeug.exceptions.HTTPException` subclasses or HTTP status +codes. Handlers can be registered for a specific class, or for all subclasses +of a parent class. + + +Handling +```````` + +When building a Flask application you *will* run into exceptions. If some part +of your code breaks while handling a request (and you have no error handlers +registered), a "500 Internal Server Error" +(:exc:`~werkzeug.exceptions.InternalServerError`) will be returned by default. +Similarly, "404 Not Found" +(:exc:`~werkzeug.exceptions.NotFound`) error will occur if a request is sent to an unregistered route. +If a route receives an unallowed request method, a "405 Method Not Allowed" +(:exc:`~werkzeug.exceptions.MethodNotAllowed`) will be raised. These are all +subclasses of :class:`~werkzeug.exceptions.HTTPException` and are provided by +default in Flask. + +Flask gives you the ability to raise any HTTP exception registered by +Werkzeug. However, the default HTTP exceptions return simple exception +pages. You might want to show custom error pages to the user when an error occurs. +This can be done by registering error handlers. + +When Flask catches an exception while handling a request, it is first looked up by code. +If no handler is registered for the code, Flask looks up the error by its class hierarchy; the most specific handler is chosen. +If no handler is registered, :class:`~werkzeug.exceptions.HTTPException` subclasses show a +generic message about their code, while other exceptions are converted to a +generic "500 Internal Server Error". + +For example, if an instance of :exc:`ConnectionRefusedError` is raised, +and a handler is registered for :exc:`ConnectionError` and +:exc:`ConnectionRefusedError`, the more specific :exc:`ConnectionRefusedError` +handler is called with the exception instance to generate the response. + +Handlers registered on the blueprint take precedence over those registered +globally on the application, assuming a blueprint is handling the request that +raises the exception. However, the blueprint cannot handle 404 routing errors +because the 404 occurs at the routing level before the blueprint can be +determined. + + +Generic Exception Handlers +`````````````````````````` + +It is possible to register error handlers for very generic base classes +such as ``HTTPException`` or even ``Exception``. However, be aware that +these will catch more than you might expect. + +For example, an error handler for ``HTTPException`` might be useful for turning +the default HTML errors pages into JSON. However, this +handler will trigger for things you don't cause directly, such as 404 +and 405 errors during routing. Be sure to craft your handler carefully +so you don't lose information about the HTTP error. + +.. code-block:: python + + from flask import json + from werkzeug.exceptions import HTTPException + + @app.errorhandler(HTTPException) + def handle_exception(e): + """Return JSON instead of HTML for HTTP errors.""" + # start with the correct headers and status code from the error + response = e.get_response() + # replace the body with JSON + response.data = json.dumps({ + "code": e.code, + "name": e.name, + "description": e.description, + }) + response.content_type = "application/json" + return response + +An error handler for ``Exception`` might seem useful for changing how +all errors, even unhandled ones, are presented to the user. However, +this is similar to doing ``except Exception:`` in Python, it will +capture *all* otherwise unhandled errors, including all HTTP status +codes. + +In most cases it will be safer to register handlers for more +specific exceptions. Since ``HTTPException`` instances are valid WSGI +responses, you could also pass them through directly. + +.. code-block:: python + + from werkzeug.exceptions import HTTPException + + @app.errorhandler(Exception) + def handle_exception(e): + # pass through HTTP errors + if isinstance(e, HTTPException): + return e + + # now you're handling non-HTTP exceptions only + return render_template("500_generic.html", e=e), 500 + +Error handlers still respect the exception class hierarchy. If you +register handlers for both ``HTTPException`` and ``Exception``, the +``Exception`` handler will not handle ``HTTPException`` subclasses +because it the ``HTTPException`` handler is more specific. + + +Unhandled Exceptions +```````````````````` + +When there is no error handler registered for an exception, a 500 +Internal Server Error will be returned instead. See +:meth:`flask.Flask.handle_exception` for information about this +behavior. + +If there is an error handler registered for ``InternalServerError``, +this will be invoked. As of Flask 1.1.0, this error handler will always +be passed an instance of ``InternalServerError``, not the original +unhandled error. + +The original error is available as ``e.original_exception``. + +An error handler for "500 Internal Server Error" will be passed uncaught +exceptions in addition to explicit 500 errors. In debug mode, a handler +for "500 Internal Server Error" will not be used. Instead, the +interactive debugger will be shown. + + +Custom Error Pages +------------------ + +Sometimes when building a Flask application, you might want to raise a +:exc:`~werkzeug.exceptions.HTTPException` to signal to the user that +something is wrong with the request. Fortunately, Flask comes with a handy +:func:`~flask.abort` function that aborts a request with a HTTP error from +werkzeug as desired. It will also provide a plain black and white error page +for you with a basic description, but nothing fancy. + +Depending on the error code it is less or more likely for the user to +actually see such an error. + +Consider the code below, we might have a user profile route, and if the user +fails to pass a username we can raise a "400 Bad Request". If the user passes a +username and we can't find it, we raise a "404 Not Found". + +.. code-block:: python + + from flask import abort, render_template, request + + # a username needs to be supplied in the query args + # a successful request would be like /profile?username=jack + @app.route("/profile") + def user_profile(): + username = request.arg.get("username") + # if a username isn't supplied in the request, return a 400 bad request + if username is None: + abort(400) + + user = get_user(username=username) + # if a user can't be found by their username, return 404 not found + if user is None: + abort(404) + + return render_template("profile.html", user=user) + +Here is another example implementation for a "404 Page Not Found" exception: + +.. code-block:: python + + from flask import render_template + + @app.errorhandler(404) + def page_not_found(e): + # note that we set the 404 status explicitly + return render_template('404.html'), 404 + +When using :doc:`/patterns/appfactories`: + +.. code-block:: python + + from flask import Flask, render_template + + def page_not_found(e): + return render_template('404.html'), 404 + + def create_app(config_filename): + app = Flask(__name__) + app.register_error_handler(404, page_not_found) + return app + +An example template might be this: + +.. code-block:: html+jinja + + {% extends "layout.html" %} + {% block title %}Page Not Found{% endblock %} + {% block body %} +

Page Not Found

+

What you were looking for is just not there. +

go somewhere nice + {% endblock %} + + +Further Examples +```````````````` + +The above examples wouldn't actually be an improvement on the default +exception pages. We can create a custom 500.html template like this: + +.. code-block:: html+jinja + + {% extends "layout.html" %} + {% block title %}Internal Server Error{% endblock %} + {% block body %} +

Internal Server Error

+

Oops... we seem to have made a mistake, sorry!

+

Go somewhere nice instead + {% endblock %} + +It can be implemented by rendering the template on "500 Internal Server Error": + +.. code-block:: python + + from flask import render_template + + @app.errorhandler(500) + def internal_server_error(e): + # note that we set the 500 status explicitly + return render_template('500.html'), 500 + +When using :doc:`/patterns/appfactories`: + +.. code-block:: python + + from flask import Flask, render_template + + def internal_server_error(e): + return render_template('500.html'), 500 + + def create_app(): + app = Flask(__name__) + app.register_error_handler(500, internal_server_error) + return app + +When using :doc:`/blueprints`: + +.. code-block:: python + + from flask import Blueprint + + blog = Blueprint('blog', __name__) + + # as a decorator + @blog.errorhandler(500) + def internal_server_error(e): + return render_template('500.html'), 500 + + # or with register_error_handler + blog.register_error_handler(500, internal_server_error) + + +Blueprint Error Handlers +------------------------ + +In :doc:`/blueprints`, most error handlers will work as expected. +However, there is a caveat concerning handlers for 404 and 405 +exceptions. These error handlers are only invoked from an appropriate +``raise`` statement or a call to ``abort`` in another of the blueprint's +view functions; they are not invoked by, e.g., an invalid URL access. + +This is because the blueprint does not "own" a certain URL space, so +the application instance has no way of knowing which blueprint error +handler it should run if given an invalid URL. If you would like to +execute different handling strategies for these errors based on URL +prefixes, they may be defined at the application level using the +``request`` proxy object. + +.. code-block:: python + + from flask import jsonify, render_template + + # at the application level + # not the blueprint level + @app.errorhandler(404) + def page_not_found(e): + # if a request is in our blog URL space + if request.path.startswith('/blog/'): + # we return a custom blog 404 page + return render_template("blog/404.html"), 404 + else: + # otherwise we return our generic site-wide 404 page + return render_template("404.html"), 404 + + @app.errorhandler(405) + def method_not_allowed(e): + # if a request has the wrong method to our API + if request.path.startswith('/api/'): + # we return a json saying so + return jsonify(message="Method Not Allowed"), 405 + else: + # otherwise we return a generic site-wide 405 page + return render_template("405.html"), 405 + + +Returning API Errors as JSON +---------------------------- + +When building APIs in Flask, some developers realise that the built-in +exceptions are not expressive enough for APIs and that the content type of +:mimetype:`text/html` they are emitting is not very useful for API consumers. + +Using the same techniques as above and :func:`~flask.json.jsonify` we can return JSON +responses to API errors. :func:`~flask.abort` is called +with a ``description`` parameter. The error handler will +use that as the JSON error message, and set the status code to 404. + +.. code-block:: python + + from flask import abort, jsonify + + @app.errorhandler(404) + def resource_not_found(e): + return jsonify(error=str(e)), 404 + + @app.route("/cheese") + def get_one_cheese(): + resource = get_resource() + + if resource is None: + abort(404, description="Resource not found") + + return jsonify(resource) + +We can also create custom exception classes. For instance, we can +introduce a new custom exception for an API that can take a proper human readable message, +a status code for the error and some optional payload to give more context +for the error. + +This is a simple example: + +.. code-block:: python + + from flask import jsonify, request + + class InvalidAPIUsage(Exception): + status_code = 400 + + def __init__(self, message, status_code=None, payload=None): + super().__init__() + self.message = message + if status_code is not None: + self.status_code = status_code + self.payload = payload + + def to_dict(self): + rv = dict(self.payload or ()) + rv['message'] = self.message + return rv + + @app.errorhandler(InvalidAPIUsage) + def invalid_api_usage(e): + return jsonify(e.to_dict()) + + # an API app route for getting user information + # a correct request might be /api/user?user_id=420 + @app.route("/api/user") + def user_api(user_id): + user_id = request.arg.get("user_id") + if not user_id: + raise InvalidAPIUsage("No user id provided!") + + user = get_user(user_id=user_id) + if not user: + raise InvalidAPIUsage("No such user!", status_code=404) + + return jsonify(user.to_dict()) + +A view can now raise that exception with an error message. Additionally +some extra payload can be provided as a dictionary through the `payload` +parameter. + + +Logging +------- + +See :doc:`/logging` for information about how to log exceptions, such as +by emailing them to admins. + + +Debugging +--------- + +See :doc:`/debugging` for information about how to debug errors in +development and production. diff --git a/testbed/pallets__flask/docs/extensiondev.rst b/testbed/pallets__flask/docs/extensiondev.rst new file mode 100644 index 0000000000000000000000000000000000000000..dbaf62cb1a93b012f5063cec2774249b3cbea9f1 --- /dev/null +++ b/testbed/pallets__flask/docs/extensiondev.rst @@ -0,0 +1,332 @@ +Flask Extension Development +=========================== + +Flask, being a microframework, often requires some repetitive steps to get +a third party library working. Many such extensions are already available +on `PyPI`_. + +If you want to create your own Flask extension for something that does not +exist yet, this guide to extension development will help you get your +extension running in no time and to feel like users would expect your +extension to behave. + +Anatomy of an Extension +----------------------- + +Extensions are all located in a package called ``flask_something`` +where "something" is the name of the library you want to bridge. So for +example if you plan to add support for a library named `simplexml` to +Flask, you would name your extension's package ``flask_simplexml``. + +The name of the actual extension (the human readable name) however would +be something like "Flask-SimpleXML". Make sure to include the name +"Flask" somewhere in that name and that you check the capitalization. +This is how users can then register dependencies to your extension in +their :file:`setup.py` files. + +But what do extensions look like themselves? An extension has to ensure +that it works with multiple Flask application instances at once. This is +a requirement because many people will use patterns like the +:doc:`/patterns/appfactories` pattern to create their application as +needed to aid unittests and to support multiple configurations. Because +of that it is crucial that your application supports that kind of +behavior. + +Most importantly the extension must be shipped with a :file:`setup.py` file and +registered on PyPI. Also the development checkout link should work so +that people can easily install the development version into their +virtualenv without having to download the library by hand. + +Flask extensions must be licensed under a BSD, MIT or more liberal license +in order to be listed in the Flask Extension Registry. Keep in mind +that the Flask Extension Registry is a moderated place and libraries will +be reviewed upfront if they behave as required. + +"Hello Flaskext!" +----------------- + +So let's get started with creating such a Flask extension. The extension +we want to create here will provide very basic support for SQLite3. + +First we create the following folder structure:: + + flask-sqlite3/ + flask_sqlite3.py + LICENSE + README + +Here's the contents of the most important files: + +setup.py +```````` + +The next file that is absolutely required is the :file:`setup.py` file which is +used to install your Flask extension. The following contents are +something you can work with:: + + """ + Flask-SQLite3 + ------------- + + This is the description for that library + """ + from setuptools import setup + + + setup( + name='Flask-SQLite3', + version='1.0', + url='http://example.com/flask-sqlite3/', + license='BSD', + author='Your Name', + author_email='your-email@example.com', + description='Very short description', + long_description=__doc__, + py_modules=['flask_sqlite3'], + # if you would be using a package instead use packages instead + # of py_modules: + # packages=['flask_sqlite3'], + zip_safe=False, + include_package_data=True, + platforms='any', + install_requires=[ + 'Flask' + ], + classifiers=[ + 'Environment :: Web Environment', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: BSD License', + 'Operating System :: OS Independent', + 'Programming Language :: Python', + 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', + 'Topic :: Software Development :: Libraries :: Python Modules' + ] + ) + +That's a lot of code but you can really just copy/paste that from existing +extensions and adapt. + +flask_sqlite3.py +```````````````` + +Now this is where your extension code goes. But how exactly should such +an extension look like? What are the best practices? Continue reading +for some insight. + +Initializing Extensions +----------------------- + +Many extensions will need some kind of initialization step. For example, +consider an application that's currently connecting to SQLite like the +documentation suggests (:doc:`/patterns/sqlite3`). So how does the +extension know the name of the application object? + +Quite simple: you pass it to it. + +There are two recommended ways for an extension to initialize: + +initialization functions: + + If your extension is called `helloworld` you might have a function + called ``init_helloworld(app[, extra_args])`` that initializes the + extension for that application. It could attach before / after + handlers etc. + +classes: + + Classes work mostly like initialization functions but can later be + used to further change the behavior. + +What to use depends on what you have in mind. For the SQLite 3 extension +we will use the class-based approach because it will provide users with an +object that handles opening and closing database connections. + +When designing your classes, it's important to make them easily reusable +at the module level. This means the object itself must not under any +circumstances store any application specific state and must be shareable +between different applications. + +The Extension Code +------------------ + +Here's the contents of the `flask_sqlite3.py` for copy/paste:: + + import sqlite3 + from flask import current_app, _app_ctx_stack + + + class SQLite3(object): + def __init__(self, app=None): + self.app = app + if app is not None: + self.init_app(app) + + def init_app(self, app): + app.config.setdefault('SQLITE3_DATABASE', ':memory:') + app.teardown_appcontext(self.teardown) + + def connect(self): + return sqlite3.connect(current_app.config['SQLITE3_DATABASE']) + + def teardown(self, exception): + ctx = _app_ctx_stack.top + if hasattr(ctx, 'sqlite3_db'): + ctx.sqlite3_db.close() + + @property + def connection(self): + ctx = _app_ctx_stack.top + if ctx is not None: + if not hasattr(ctx, 'sqlite3_db'): + ctx.sqlite3_db = self.connect() + return ctx.sqlite3_db + + +So here's what these lines of code do: + +1. The ``__init__`` method takes an optional app object and, if supplied, will + call ``init_app``. +2. The ``init_app`` method exists so that the ``SQLite3`` object can be + instantiated without requiring an app object. This method supports the + factory pattern for creating applications. The ``init_app`` will set the + configuration for the database, defaulting to an in memory database if + no configuration is supplied. In addition, the ``init_app`` method + attaches the ``teardown`` handler. +3. Next, we define a ``connect`` method that opens a database connection. +4. Finally, we add a ``connection`` property that on first access opens + the database connection and stores it on the context. This is also + the recommended way to handling resources: fetch resources lazily the + first time they are used. + + Note here that we're attaching our database connection to the top + application context via ``_app_ctx_stack.top``. Extensions should use + the top context for storing their own information with a sufficiently + complex name. + +So why did we decide on a class-based approach here? Because using our +extension looks something like this:: + + from flask import Flask + from flask_sqlite3 import SQLite3 + + app = Flask(__name__) + app.config.from_pyfile('the-config.cfg') + db = SQLite3(app) + +You can then use the database from views like this:: + + @app.route('/') + def show_all(): + cur = db.connection.cursor() + cur.execute(...) + +Likewise if you are outside of a request you can use the database by +pushing an app context:: + + with app.app_context(): + cur = db.connection.cursor() + cur.execute(...) + +At the end of the ``with`` block the teardown handles will be executed +automatically. + +Additionally, the ``init_app`` method is used to support the factory pattern +for creating apps:: + + db = SQLite3() + # Then later on. + app = create_app('the-config.cfg') + db.init_app(app) + +Keep in mind that supporting this factory pattern for creating apps is required +for approved flask extensions (described below). + +.. admonition:: Note on ``init_app`` + + As you noticed, ``init_app`` does not assign ``app`` to ``self``. This + is intentional! Class based Flask extensions must only store the + application on the object when the application was passed to the + constructor. This tells the extension: I am not interested in using + multiple applications. + + When the extension needs to find the current application and it does + not have a reference to it, it must either use the + :data:`~flask.current_app` context local or change the API in a way + that you can pass the application explicitly. + + +Using _app_ctx_stack +-------------------- + +In the example above, before every request, a ``sqlite3_db`` variable is +assigned to ``_app_ctx_stack.top``. In a view function, this variable is +accessible using the ``connection`` property of ``SQLite3``. During the +teardown of a request, the ``sqlite3_db`` connection is closed. By using +this pattern, the *same* connection to the sqlite3 database is accessible +to anything that needs it for the duration of the request. + + +Learn from Others +----------------- + +This documentation only touches the bare minimum for extension development. +If you want to learn more, it's a very good idea to check out existing extensions +on the `PyPI`_. If you feel lost there is still the `mailinglist`_ and the +`Discord server`_ to get some ideas for nice looking APIs. Especially if you do +something nobody before you did, it might be a very good idea to get some more +input. This not only generates useful feedback on what people might want from +an extension, but also avoids having multiple developers working in isolation +on pretty much the same problem. + +Remember: good API design is hard, so introduce your project on the +mailing list, and let other developers give you a helping hand with +designing the API. + +The best Flask extensions are extensions that share common idioms for the +API. And this can only work if collaboration happens early. + +Approved Extensions +------------------- + +Flask previously had the concept of approved extensions. These came with +some vetting of support and compatibility. While this list became too +difficult to maintain over time, the guidelines are still relevant to +all extensions maintained and developed today, as they help the Flask +ecosystem remain consistent and compatible. + +0. An approved Flask extension requires a maintainer. In the event an + extension author would like to move beyond the project, the project + should find a new maintainer and transfer access to the repository, + documentation, PyPI, and any other services. If no maintainer + is available, give access to the Pallets core team. +1. The naming scheme is *Flask-ExtensionName* or *ExtensionName-Flask*. + It must provide exactly one package or module named + ``flask_extension_name``. +2. The extension must be BSD or MIT licensed. It must be open source + and publicly available. +3. The extension's API must have the following characteristics: + + - It must support multiple applications running in the same Python + process. Use ``current_app`` instead of ``self.app``, store + configuration and state per application instance. + - It must be possible to use the factory pattern for creating + applications. Use the ``ext.init_app()`` pattern. + +4. From a clone of the repository, an extension with its dependencies + must be installable with ``pip install -e .``. +5. It must ship a testing suite that can be invoked with ``tox -e py`` + or ``pytest``. If not using ``tox``, the test dependencies should be + specified in a ``requirements.txt`` file. The tests must be part of + the sdist distribution. +6. The documentation must use the ``flask`` theme from the + `Official Pallets Themes`_. A link to the documentation or project + website must be in the PyPI metadata or the readme. +7. For maximum compatibility, the extension should support the same + versions of Python that Flask supports. 3.7+ is recommended as of + December 2021. Use ``python_requires=">= 3.7"`` in ``setup.py`` to + indicate supported versions. + +.. _PyPI: https://pypi.org/search/?c=Framework+%3A%3A+Flask +.. _mailinglist: https://mail.python.org/mailman/listinfo/flask +.. _Discord server: https://discord.gg/pallets +.. _Official Pallets Themes: https://pypi.org/project/Pallets-Sphinx-Themes/ diff --git a/testbed/pallets__flask/docs/extensions.rst b/testbed/pallets__flask/docs/extensions.rst new file mode 100644 index 0000000000000000000000000000000000000000..784fd807a756a4b79a1069cd2ce761f0b5a761b8 --- /dev/null +++ b/testbed/pallets__flask/docs/extensions.rst @@ -0,0 +1,48 @@ +Extensions +========== + +Extensions are extra packages that add functionality to a Flask +application. For example, an extension might add support for sending +email or connecting to a database. Some extensions add entire new +frameworks to help build certain types of applications, like a REST API. + + +Finding Extensions +------------------ + +Flask extensions are usually named "Flask-Foo" or "Foo-Flask". You can +search PyPI for packages tagged with `Framework :: Flask `_. + + +Using Extensions +---------------- + +Consult each extension's documentation for installation, configuration, +and usage instructions. Generally, extensions pull their own +configuration from :attr:`app.config ` and are +passed an application instance during initialization. For example, +an extension called "Flask-Foo" might be used like this:: + + from flask_foo import Foo + + foo = Foo() + + app = Flask(__name__) + app.config.update( + FOO_BAR='baz', + FOO_SPAM='eggs', + ) + + foo.init_app(app) + + +Building Extensions +------------------- + +While the `PyPI `_ contains many Flask extensions, you may +not find an extension that fits your need. If this is the case, you can +create your own. Read :doc:`/extensiondev` to develop your own Flask +extension. + + +.. _pypi: https://pypi.org/search/?c=Framework+%3A%3A+Flask diff --git a/testbed/pallets__flask/docs/foreword.rst b/testbed/pallets__flask/docs/foreword.rst new file mode 100644 index 0000000000000000000000000000000000000000..6a6d17f974a6cff755e7e8583ff69ed815cb3f7c --- /dev/null +++ b/testbed/pallets__flask/docs/foreword.rst @@ -0,0 +1,53 @@ +Foreword +======== + +Read this before you get started with Flask. This hopefully answers some +questions about the purpose and goals of the project, and when you +should or should not be using it. + +What does "micro" mean? +----------------------- + +“Micro” does not mean that your whole web application has to fit into a single +Python file (although it certainly can), nor does it mean that Flask is lacking +in functionality. The "micro" in microframework means Flask aims to keep the +core simple but extensible. Flask won't make many decisions for you, such as +what database to use. Those decisions that it does make, such as what +templating engine to use, are easy to change. Everything else is up to you, so +that Flask can be everything you need and nothing you don't. + +By default, Flask does not include a database abstraction layer, form +validation or anything else where different libraries already exist that can +handle that. Instead, Flask supports extensions to add such functionality to +your application as if it was implemented in Flask itself. Numerous extensions +provide database integration, form validation, upload handling, various open +authentication technologies, and more. Flask may be "micro", but it's ready for +production use on a variety of needs. + +Configuration and Conventions +----------------------------- + +Flask has many configuration values, with sensible defaults, and a few +conventions when getting started. By convention, templates and static +files are stored in subdirectories within the application's Python +source tree, with the names :file:`templates` and :file:`static` +respectively. While this can be changed, you usually don't have to, +especially when getting started. + +Growing with Flask +------------------ + +Once you have Flask up and running, you'll find a variety of extensions +available in the community to integrate your project for production. + +As your codebase grows, you are free to make the design decisions appropriate +for your project. Flask will continue to provide a very simple glue layer to +the best that Python has to offer. You can implement advanced patterns in +SQLAlchemy or another database tool, introduce non-relational data persistence +as appropriate, and take advantage of framework-agnostic tools built for WSGI, +the Python web interface. + +Flask includes many hooks to customize its behavior. Should you need more +customization, the Flask class is built for subclassing. If you are interested +in that, check out the :doc:`becomingbig` chapter. If you are curious about +the Flask design principles, head over to the section about :doc:`design`. diff --git a/testbed/pallets__flask/docs/htmlfaq.rst b/testbed/pallets__flask/docs/htmlfaq.rst new file mode 100644 index 0000000000000000000000000000000000000000..4807c2662ddd349ea8c387a0609c9ed5dada26d1 --- /dev/null +++ b/testbed/pallets__flask/docs/htmlfaq.rst @@ -0,0 +1,206 @@ +HTML/XHTML FAQ +============== + +The Flask documentation and example applications are using HTML5. You +may notice that in many situations, when end tags are optional they are +not used, so that the HTML is cleaner and faster to load. Because there +is much confusion about HTML and XHTML among developers, this document tries +to answer some of the major questions. + + +History of XHTML +---------------- + +For a while, it appeared that HTML was about to be replaced by XHTML. +However, barely any websites on the Internet are actual XHTML (which is +HTML processed using XML rules). There are a couple of major reasons +why this is the case. One of them is Internet Explorer's lack of proper +XHTML support. The XHTML spec states that XHTML must be served with the MIME +type :mimetype:`application/xhtml+xml`, but Internet Explorer refuses +to read files with that MIME type. +While it is relatively easy to configure Web servers to serve XHTML properly, +few people do. This is likely because properly using XHTML can be quite +painful. + +One of the most important causes of pain is XML's draconian (strict and +ruthless) error handling. When an XML parsing error is encountered, +the browser is supposed to show the user an ugly error message, instead +of attempting to recover from the error and display what it can. Most of +the (X)HTML generation on the web is based on non-XML template engines +(such as Jinja, the one used in Flask) which do not protect you from +accidentally creating invalid XHTML. There are XML based template engines, +such as Kid and the popular Genshi, but they often come with a larger +runtime overhead and are not as straightforward to use because they have +to obey XML rules. + +The majority of users, however, assumed they were properly using XHTML. +They wrote an XHTML doctype at the top of the document and self-closed all +the necessary tags (``
`` becomes ``
`` or ``

`` in XHTML). +However, even if the document properly validates as XHTML, what really +determines XHTML/HTML processing in browsers is the MIME type, which as +said before is often not set properly. So the valid XHTML was being treated +as invalid HTML. + +XHTML also changed the way JavaScript is used. To properly work with XHTML, +programmers have to use the namespaced DOM interface with the XHTML +namespace to query for HTML elements. + +History of HTML5 +---------------- + +Development of the HTML5 specification was started in 2004 under the name +"Web Applications 1.0" by the Web Hypertext Application Technology Working +Group, or WHATWG (which was formed by the major browser vendors Apple, +Mozilla, and Opera) with the goal of writing a new and improved HTML +specification, based on existing browser behavior instead of unrealistic +and backwards-incompatible specifications. + +For example, in HTML4 ``Hello``. However, since people were using +XHTML-like tags along the lines of ````, browser vendors implemented +the XHTML syntax over the syntax defined by the specification. + +In 2007, the specification was adopted as the basis of a new HTML +specification under the umbrella of the W3C, known as HTML5. Currently, +it appears that XHTML is losing traction, as the XHTML 2 working group has +been disbanded and HTML5 is being implemented by all major browser vendors. + +HTML versus XHTML +----------------- + +The following table gives you a quick overview of features available in +HTML 4.01, XHTML 1.1 and HTML5. (XHTML 1.0 is not included, as it was +superseded by XHTML 1.1 and the barely-used XHTML5.) + +.. tabularcolumns:: |p{9cm}|p{2cm}|p{2cm}|p{2cm}| + ++-----------------------------------------+----------+----------+----------+ +| | HTML4.01 | XHTML1.1 | HTML5 | ++=========================================+==========+==========+==========+ +| ``value`` | |Y| [1]_ | |N| | |N| | ++-----------------------------------------+----------+----------+----------+ +| ``
`` supported | |N| | |Y| | |Y| [2]_ | ++-----------------------------------------+----------+----------+----------+ +| `` + +Another method is using Google's `AJAX Libraries API +`_ to load jQuery: + +.. sourcecode:: html + + + + +In this case you have to put jQuery into your static folder as a fallback, but it will +first try to load it directly from Google. This has the advantage that your +website will probably load faster for users if they went to at least one +other website before using the same jQuery version from Google because it +will already be in the browser cache. + +Where is My Site? +----------------- + +Do you know where your application is? If you are developing the answer +is quite simple: it's on localhost port something and directly on the root +of that server. But what if you later decide to move your application to +a different location? For example to ``http://example.com/myapp``? On +the server side this never was a problem because we were using the handy +:func:`~flask.url_for` function that could answer that question for +us, but if we are using jQuery we should not hardcode the path to +the application but make that dynamic, so how can we do that? + +A simple method would be to add a script tag to our page that sets a +global variable to the prefix to the root of the application. Something +like this: + +.. sourcecode:: html+jinja + + + + +JSON View Functions +------------------- + +Now let's create a server side function that accepts two URL arguments of +numbers which should be added together and then sent back to the +application in a JSON object. This is a really ridiculous example and is +something you usually would do on the client side alone, but a simple +example that shows how you would use jQuery and Flask nonetheless:: + + from flask import Flask, jsonify, render_template, request + app = Flask(__name__) + + @app.route('/_add_numbers') + def add_numbers(): + a = request.args.get('a', 0, type=int) + b = request.args.get('b', 0, type=int) + return jsonify(result=a + b) + + @app.route('/') + def index(): + return render_template('index.html') + +As you can see I also added an `index` method here that renders a +template. This template will load jQuery as above and have a little form where +we can add two numbers and a link to trigger the function on the server +side. + +Note that we are using the :meth:`~werkzeug.datastructures.MultiDict.get` method here +which will never fail. If the key is missing a default value (here ``0``) +is returned. Furthermore it can convert values to a specific type (like +in our case `int`). This is especially handy for code that is +triggered by a script (APIs, JavaScript etc.) because you don't need +special error reporting in that case. + +The HTML +-------- + +Your index.html template either has to extend a :file:`layout.html` template with +jQuery loaded and the `$SCRIPT_ROOT` variable set, or do that on the top. +Here's the HTML code needed for our little application (:file:`index.html`). +Notice that we also drop the script directly into the HTML here. It is +usually a better idea to have that in a separate script file: + +.. sourcecode:: html + + +

jQuery Example

+

+ + = + ? +

calculate server side + +I won't go into detail here about how jQuery works, just a very quick +explanation of the little bit of code above: + +1. ``$(function() { ... })`` specifies code that should run once the + browser is done loading the basic parts of the page. +2. ``$('selector')`` selects an element and lets you operate on it. +3. ``element.bind('event', func)`` specifies a function that should run + when the user clicked on the element. If that function returns + `false`, the default behavior will not kick in (in this case, navigate + to the `#` URL). +4. ``$.getJSON(url, data, func)`` sends a ``GET`` request to `url` and will + send the contents of the `data` object as query parameters. Once the + data arrived, it will call the given function with the return value as + argument. Note that we can use the `$SCRIPT_ROOT` variable here that + we set earlier. + +Check out the :gh:`example source ` for a full +application demonstrating the code on this page, as well as the same +thing using ``XMLHttpRequest`` and ``fetch``. diff --git a/testbed/pallets__flask/docs/patterns/lazyloading.rst b/testbed/pallets__flask/docs/patterns/lazyloading.rst new file mode 100644 index 0000000000000000000000000000000000000000..658a1cd43c890251140d69db7b938368c245ca26 --- /dev/null +++ b/testbed/pallets__flask/docs/patterns/lazyloading.rst @@ -0,0 +1,109 @@ +Lazily Loading Views +==================== + +Flask is usually used with the decorators. Decorators are simple and you +have the URL right next to the function that is called for that specific +URL. However there is a downside to this approach: it means all your code +that uses decorators has to be imported upfront or Flask will never +actually find your function. + +This can be a problem if your application has to import quick. It might +have to do that on systems like Google's App Engine or other systems. So +if you suddenly notice that your application outgrows this approach you +can fall back to a centralized URL mapping. + +The system that enables having a central URL map is the +:meth:`~flask.Flask.add_url_rule` function. Instead of using decorators, +you have a file that sets up the application with all URLs. + +Converting to Centralized URL Map +--------------------------------- + +Imagine the current application looks somewhat like this:: + + from flask import Flask + app = Flask(__name__) + + @app.route('/') + def index(): + pass + + @app.route('/user/') + def user(username): + pass + +Then, with the centralized approach you would have one file with the views +(:file:`views.py`) but without any decorator:: + + def index(): + pass + + def user(username): + pass + +And then a file that sets up an application which maps the functions to +URLs:: + + from flask import Flask + from yourapplication import views + app = Flask(__name__) + app.add_url_rule('/', view_func=views.index) + app.add_url_rule('/user/', view_func=views.user) + +Loading Late +------------ + +So far we only split up the views and the routing, but the module is still +loaded upfront. The trick is to actually load the view function as needed. +This can be accomplished with a helper class that behaves just like a +function but internally imports the real function on first use:: + + from werkzeug.utils import import_string, cached_property + + class LazyView(object): + + def __init__(self, import_name): + self.__module__, self.__name__ = import_name.rsplit('.', 1) + self.import_name = import_name + + @cached_property + def view(self): + return import_string(self.import_name) + + def __call__(self, *args, **kwargs): + return self.view(*args, **kwargs) + +What's important here is is that `__module__` and `__name__` are properly +set. This is used by Flask internally to figure out how to name the +URL rules in case you don't provide a name for the rule yourself. + +Then you can define your central place to combine the views like this:: + + from flask import Flask + from yourapplication.helpers import LazyView + app = Flask(__name__) + app.add_url_rule('/', + view_func=LazyView('yourapplication.views.index')) + app.add_url_rule('/user/', + view_func=LazyView('yourapplication.views.user')) + +You can further optimize this in terms of amount of keystrokes needed to +write this by having a function that calls into +:meth:`~flask.Flask.add_url_rule` by prefixing a string with the project +name and a dot, and by wrapping `view_func` in a `LazyView` as needed. :: + + def url(import_name, url_rules=[], **options): + view = LazyView(f"yourapplication.{import_name}") + for url_rule in url_rules: + app.add_url_rule(url_rule, view_func=view, **options) + + # add a single route to the index view + url('views.index', ['/']) + + # add two routes to a single function endpoint + url_rules = ['/user/','/user/'] + url('views.user', url_rules) + +One thing to keep in mind is that before and after request handlers have +to be in a file that is imported upfront to work properly on the first +request. The same goes for any kind of remaining decorator. diff --git a/testbed/pallets__flask/docs/patterns/methodoverrides.rst b/testbed/pallets__flask/docs/patterns/methodoverrides.rst new file mode 100644 index 0000000000000000000000000000000000000000..45dbb87e20ba5e0361a12bec97e11aae9d5ebb3a --- /dev/null +++ b/testbed/pallets__flask/docs/patterns/methodoverrides.rst @@ -0,0 +1,42 @@ +Adding HTTP Method Overrides +============================ + +Some HTTP proxies do not support arbitrary HTTP methods or newer HTTP +methods (such as PATCH). In that case it's possible to "proxy" HTTP +methods through another HTTP method in total violation of the protocol. + +The way this works is by letting the client do an HTTP POST request and +set the ``X-HTTP-Method-Override`` header. Then the method is replaced +with the header value before being passed to Flask. + +This can be accomplished with an HTTP middleware:: + + class HTTPMethodOverrideMiddleware(object): + allowed_methods = frozenset([ + 'GET', + 'HEAD', + 'POST', + 'DELETE', + 'PUT', + 'PATCH', + 'OPTIONS' + ]) + bodyless_methods = frozenset(['GET', 'HEAD', 'OPTIONS', 'DELETE']) + + def __init__(self, app): + self.app = app + + def __call__(self, environ, start_response): + method = environ.get('HTTP_X_HTTP_METHOD_OVERRIDE', '').upper() + if method in self.allowed_methods: + environ['REQUEST_METHOD'] = method + if method in self.bodyless_methods: + environ['CONTENT_LENGTH'] = '0' + return self.app(environ, start_response) + +To use this with Flask, wrap the app object with the middleware:: + + from flask import Flask + + app = Flask(__name__) + app.wsgi_app = HTTPMethodOverrideMiddleware(app.wsgi_app) diff --git a/testbed/pallets__flask/docs/patterns/mongoengine.rst b/testbed/pallets__flask/docs/patterns/mongoengine.rst new file mode 100644 index 0000000000000000000000000000000000000000..015e7b613b49d8aa15db4bd0295ce654f1c765bd --- /dev/null +++ b/testbed/pallets__flask/docs/patterns/mongoengine.rst @@ -0,0 +1,103 @@ +MongoDB with MongoEngine +======================== + +Using a document database like MongoDB is a common alternative to +relational SQL databases. This pattern shows how to use +`MongoEngine`_, a document mapper library, to integrate with MongoDB. + +A running MongoDB server and `Flask-MongoEngine`_ are required. :: + + pip install flask-mongoengine + +.. _MongoEngine: http://mongoengine.org +.. _Flask-MongoEngine: https://flask-mongoengine.readthedocs.io + + +Configuration +------------- + +Basic setup can be done by defining ``MONGODB_SETTINGS`` on +``app.config`` and creating a ``MongoEngine`` instance. :: + + from flask import Flask + from flask_mongoengine import MongoEngine + + app = Flask(__name__) + app.config['MONGODB_SETTINGS'] = { + "db": "myapp", + } + db = MongoEngine(app) + + +Mapping Documents +----------------- + +To declare a model that represents a Mongo document, create a class that +inherits from ``Document`` and declare each of the fields. :: + + import mongoengine as me + + class Movie(me.Document): + title = me.StringField(required=True) + year = me.IntField() + rated = me.StringField() + director = me.StringField() + actors = me.ListField() + +If the document has nested fields, use ``EmbeddedDocument`` to +defined the fields of the embedded document and +``EmbeddedDocumentField`` to declare it on the parent document. :: + + class Imdb(me.EmbeddedDocument): + imdb_id = me.StringField() + rating = me.DecimalField() + votes = me.IntField() + + class Movie(me.Document): + ... + imdb = me.EmbeddedDocumentField(Imdb) + + +Creating Data +------------- + +Instantiate your document class with keyword arguments for the fields. +You can also assign values to the field attributes after instantiation. +Then call ``doc.save()``. :: + + bttf = Movie(title="Back To The Future", year=1985) + bttf.actors = [ + "Michael J. Fox", + "Christopher Lloyd" + ] + bttf.imdb = Imdb(imdb_id="tt0088763", rating=8.5) + bttf.save() + + +Queries +------- + +Use the class ``objects`` attribute to make queries. A keyword argument +looks for an equal value on the field. :: + + bttf = Movies.objects(title="Back To The Future").get_or_404() + +Query operators may be used by concatenating them with the field name +using a double-underscore. ``objects``, and queries returned by +calling it, are iterable. :: + + some_theron_movie = Movie.objects(actors__in=["Charlize Theron"]).first() + + for recents in Movie.objects(year__gte=2017): + print(recents.title) + + +Documentation +------------- + +There are many more ways to define and query documents with MongoEngine. +For more information, check out the `official documentation +`_. + +Flask-MongoEngine adds helpful utilities on top of MongoEngine. Check +out their `documentation `_ as well. diff --git a/testbed/pallets__flask/docs/patterns/packages.rst b/testbed/pallets__flask/docs/patterns/packages.rst new file mode 100644 index 0000000000000000000000000000000000000000..7c3a34cf7c5622fef09b2fd04b2e94ca65391298 --- /dev/null +++ b/testbed/pallets__flask/docs/patterns/packages.rst @@ -0,0 +1,187 @@ +Large Applications as Packages +============================== + +Imagine a simple flask application structure that looks like this:: + + /yourapplication + yourapplication.py + /static + style.css + /templates + layout.html + index.html + login.html + ... + +While this is fine for small applications, for larger applications +it's a good idea to use a package instead of a module. +The :doc:`/tutorial/index` is structured to use the package pattern, +see the :gh:`example code `. + +Simple Packages +--------------- + +To convert that into a larger one, just create a new folder +:file:`yourapplication` inside the existing one and move everything below it. +Then rename :file:`yourapplication.py` to :file:`__init__.py`. (Make sure to delete +all ``.pyc`` files first, otherwise things would most likely break) + +You should then end up with something like that:: + + /yourapplication + /yourapplication + __init__.py + /static + style.css + /templates + layout.html + index.html + login.html + ... + +But how do you run your application now? The naive ``python +yourapplication/__init__.py`` will not work. Let's just say that Python +does not want modules in packages to be the startup file. But that is not +a big problem, just add a new file called :file:`setup.py` next to the inner +:file:`yourapplication` folder with the following contents:: + + from setuptools import setup + + setup( + name='yourapplication', + packages=['yourapplication'], + include_package_data=True, + install_requires=[ + 'flask', + ], + ) + +In order to run the application you need to export an environment variable +that tells Flask where to find the application instance: + +.. tabs:: + + .. group-tab:: Bash + + .. code-block:: text + + $ export FLASK_APP=yourapplication + + .. group-tab:: Fish + + .. code-block:: text + + $ set -x FLASK_APP yourapplication + + .. group-tab:: CMD + + .. code-block:: text + + > set FLASK_APP=yourapplication + + .. group-tab:: Powershell + + .. code-block:: text + + > $env:FLASK_APP = "yourapplication" + +If you are outside of the project directory make sure to provide the exact +path to your application directory. Similarly you can turn on the +development features like this: + +.. tabs:: + + .. group-tab:: Bash + + .. code-block:: text + + $ export FLASK_ENV=development + + .. group-tab:: Fish + + .. code-block:: text + + $ set -x FLASK_ENV development + + .. group-tab:: CMD + + .. code-block:: text + + > set FLASK_ENV=development + + .. group-tab:: Powershell + + .. code-block:: text + + > $env:FLASK_ENV = "development" + +In order to install and run the application you need to issue the following +commands:: + + $ pip install -e . + $ flask run + +What did we gain from this? Now we can restructure the application a bit +into multiple modules. The only thing you have to remember is the +following quick checklist: + +1. the `Flask` application object creation has to be in the + :file:`__init__.py` file. That way each module can import it safely and the + `__name__` variable will resolve to the correct package. +2. all the view functions (the ones with a :meth:`~flask.Flask.route` + decorator on top) have to be imported in the :file:`__init__.py` file. + Not the object itself, but the module it is in. Import the view module + **after the application object is created**. + +Here's an example :file:`__init__.py`:: + + from flask import Flask + app = Flask(__name__) + + import yourapplication.views + +And this is what :file:`views.py` would look like:: + + from yourapplication import app + + @app.route('/') + def index(): + return 'Hello World!' + +You should then end up with something like that:: + + /yourapplication + setup.py + /yourapplication + __init__.py + views.py + /static + style.css + /templates + layout.html + index.html + login.html + ... + +.. admonition:: Circular Imports + + Every Python programmer hates them, and yet we just added some: + circular imports (That's when two modules depend on each other. In this + case :file:`views.py` depends on :file:`__init__.py`). Be advised that this is a + bad idea in general but here it is actually fine. The reason for this is + that we are not actually using the views in :file:`__init__.py` and just + ensuring the module is imported and we are doing that at the bottom of + the file. + + There are still some problems with that approach but if you want to use + decorators there is no way around that. Check out the + :doc:`/becomingbig` section for some inspiration how to deal with that. + + +Working with Blueprints +----------------------- + +If you have larger applications it's recommended to divide them into +smaller groups where each group is implemented with the help of a +blueprint. For a gentle introduction into this topic refer to the +:doc:`/blueprints` chapter of the documentation. diff --git a/testbed/pallets__flask/docs/patterns/requestchecksum.rst b/testbed/pallets__flask/docs/patterns/requestchecksum.rst new file mode 100644 index 0000000000000000000000000000000000000000..25bc38b2a43b220597ca8c2e4a3b87730173a95a --- /dev/null +++ b/testbed/pallets__flask/docs/patterns/requestchecksum.rst @@ -0,0 +1,55 @@ +Request Content Checksums +========================= + +Various pieces of code can consume the request data and preprocess it. +For instance JSON data ends up on the request object already read and +processed, form data ends up there as well but goes through a different +code path. This seems inconvenient when you want to calculate the +checksum of the incoming request data. This is necessary sometimes for +some APIs. + +Fortunately this is however very simple to change by wrapping the input +stream. + +The following example calculates the SHA1 checksum of the incoming data as +it gets read and stores it in the WSGI environment:: + + import hashlib + + class ChecksumCalcStream(object): + + def __init__(self, stream): + self._stream = stream + self._hash = hashlib.sha1() + + def read(self, bytes): + rv = self._stream.read(bytes) + self._hash.update(rv) + return rv + + def readline(self, size_hint): + rv = self._stream.readline(size_hint) + self._hash.update(rv) + return rv + + def generate_checksum(request): + env = request.environ + stream = ChecksumCalcStream(env['wsgi.input']) + env['wsgi.input'] = stream + return stream._hash + +To use this, all you need to do is to hook the calculating stream in +before the request starts consuming data. (Eg: be careful accessing +``request.form`` or anything of that nature. ``before_request_handlers`` +for instance should be careful not to access it). + +Example usage:: + + @app.route('/special-api', methods=['POST']) + def special_api(): + hash = generate_checksum(request) + # Accessing this parses the input stream + files = request.files + # At this point the hash is fully constructed. + checksum = hash.hexdigest() + return f"Hash was: {checksum}" diff --git a/testbed/pallets__flask/docs/patterns/singlepageapplications.rst b/testbed/pallets__flask/docs/patterns/singlepageapplications.rst new file mode 100644 index 0000000000000000000000000000000000000000..1cb779b33bd145c2341772f9676f88d8779dd502 --- /dev/null +++ b/testbed/pallets__flask/docs/patterns/singlepageapplications.rst @@ -0,0 +1,24 @@ +Single-Page Applications +======================== + +Flask can be used to serve Single-Page Applications (SPA) by placing static +files produced by your frontend framework in a subfolder inside of your +project. You will also need to create a catch-all endpoint that routes all +requests to your SPA. + +The following example demonstrates how to serve an SPA along with an API:: + + from flask import Flask, jsonify + + app = Flask(__name__, static_folder='app', static_url_path="/app") + + + @app.route("/heartbeat") + def heartbeat(): + return jsonify({"status": "healthy"}) + + + @app.route('/', defaults={'path': ''}) + @app.route('/') + def catch_all(path): + return app.send_static_file("index.html") diff --git a/testbed/pallets__flask/docs/patterns/sqlalchemy.rst b/testbed/pallets__flask/docs/patterns/sqlalchemy.rst new file mode 100644 index 0000000000000000000000000000000000000000..734d550c2bd4a7575cde4f4468fc6520ac6c9122 --- /dev/null +++ b/testbed/pallets__flask/docs/patterns/sqlalchemy.rst @@ -0,0 +1,215 @@ +SQLAlchemy in Flask +=================== + +Many people prefer `SQLAlchemy`_ for database access. In this case it's +encouraged to use a package instead of a module for your flask application +and drop the models into a separate module (:doc:`packages`). While that +is not necessary, it makes a lot of sense. + +There are four very common ways to use SQLAlchemy. I will outline each +of them here: + +Flask-SQLAlchemy Extension +-------------------------- + +Because SQLAlchemy is a common database abstraction layer and object +relational mapper that requires a little bit of configuration effort, +there is a Flask extension that handles that for you. This is recommended +if you want to get started quickly. + +You can download `Flask-SQLAlchemy`_ from `PyPI +`_. + +.. _Flask-SQLAlchemy: https://flask-sqlalchemy.palletsprojects.com/ + + +Declarative +----------- + +The declarative extension in SQLAlchemy is the most recent method of using +SQLAlchemy. It allows you to define tables and models in one go, similar +to how Django works. In addition to the following text I recommend the +official documentation on the `declarative`_ extension. + +Here's the example :file:`database.py` module for your application:: + + from sqlalchemy import create_engine + from sqlalchemy.orm import scoped_session, sessionmaker + from sqlalchemy.ext.declarative import declarative_base + + engine = create_engine('sqlite:////tmp/test.db') + db_session = scoped_session(sessionmaker(autocommit=False, + autoflush=False, + bind=engine)) + Base = declarative_base() + Base.query = db_session.query_property() + + def init_db(): + # import all modules here that might define models so that + # they will be registered properly on the metadata. Otherwise + # you will have to import them first before calling init_db() + import yourapplication.models + Base.metadata.create_all(bind=engine) + +To define your models, just subclass the `Base` class that was created by +the code above. If you are wondering why we don't have to care about +threads here (like we did in the SQLite3 example above with the +:data:`~flask.g` object): that's because SQLAlchemy does that for us +already with the :class:`~sqlalchemy.orm.scoped_session`. + +To use SQLAlchemy in a declarative way with your application, you just +have to put the following code into your application module. Flask will +automatically remove database sessions at the end of the request or +when the application shuts down:: + + from yourapplication.database import db_session + + @app.teardown_appcontext + def shutdown_session(exception=None): + db_session.remove() + +Here is an example model (put this into :file:`models.py`, e.g.):: + + from sqlalchemy import Column, Integer, String + from yourapplication.database import Base + + class User(Base): + __tablename__ = 'users' + id = Column(Integer, primary_key=True) + name = Column(String(50), unique=True) + email = Column(String(120), unique=True) + + def __init__(self, name=None, email=None): + self.name = name + self.email = email + + def __repr__(self): + return f'' + +To create the database you can use the `init_db` function: + +>>> from yourapplication.database import init_db +>>> init_db() + +You can insert entries into the database like this: + +>>> from yourapplication.database import db_session +>>> from yourapplication.models import User +>>> u = User('admin', 'admin@localhost') +>>> db_session.add(u) +>>> db_session.commit() + +Querying is simple as well: + +>>> User.query.all() +[] +>>> User.query.filter(User.name == 'admin').first() + + +.. _SQLAlchemy: https://www.sqlalchemy.org/ +.. _declarative: https://docs.sqlalchemy.org/en/latest/orm/extensions/declarative/ + +Manual Object Relational Mapping +-------------------------------- + +Manual object relational mapping has a few upsides and a few downsides +versus the declarative approach from above. The main difference is that +you define tables and classes separately and map them together. It's more +flexible but a little more to type. In general it works like the +declarative approach, so make sure to also split up your application into +multiple modules in a package. + +Here is an example :file:`database.py` module for your application:: + + from sqlalchemy import create_engine, MetaData + from sqlalchemy.orm import scoped_session, sessionmaker + + engine = create_engine('sqlite:////tmp/test.db') + metadata = MetaData() + db_session = scoped_session(sessionmaker(autocommit=False, + autoflush=False, + bind=engine)) + def init_db(): + metadata.create_all(bind=engine) + +As in the declarative approach, you need to close the session after +each request or application context shutdown. Put this into your +application module:: + + from yourapplication.database import db_session + + @app.teardown_appcontext + def shutdown_session(exception=None): + db_session.remove() + +Here is an example table and model (put this into :file:`models.py`):: + + from sqlalchemy import Table, Column, Integer, String + from sqlalchemy.orm import mapper + from yourapplication.database import metadata, db_session + + class User(object): + query = db_session.query_property() + + def __init__(self, name=None, email=None): + self.name = name + self.email = email + + def __repr__(self): + return f'' + + users = Table('users', metadata, + Column('id', Integer, primary_key=True), + Column('name', String(50), unique=True), + Column('email', String(120), unique=True) + ) + mapper(User, users) + +Querying and inserting works exactly the same as in the example above. + + +SQL Abstraction Layer +--------------------- + +If you just want to use the database system (and SQL) abstraction layer +you basically only need the engine:: + + from sqlalchemy import create_engine, MetaData, Table + + engine = create_engine('sqlite:////tmp/test.db') + metadata = MetaData(bind=engine) + +Then you can either declare the tables in your code like in the examples +above, or automatically load them:: + + from sqlalchemy import Table + + users = Table('users', metadata, autoload=True) + +To insert data you can use the `insert` method. We have to get a +connection first so that we can use a transaction: + +>>> con = engine.connect() +>>> con.execute(users.insert(), name='admin', email='admin@localhost') + +SQLAlchemy will automatically commit for us. + +To query your database, you use the engine directly or use a connection: + +>>> users.select(users.c.id == 1).execute().first() +(1, 'admin', 'admin@localhost') + +These results are also dict-like tuples: + +>>> r = users.select(users.c.id == 1).execute().first() +>>> r['name'] +'admin' + +You can also pass strings of SQL statements to the +:meth:`~sqlalchemy.engine.base.Connection.execute` method: + +>>> engine.execute('select * from users where id = :1', [1]).first() +(1, 'admin', 'admin@localhost') + +For more information about SQLAlchemy, head over to the +`website `_. diff --git a/testbed/pallets__flask/docs/patterns/sqlite3.rst b/testbed/pallets__flask/docs/patterns/sqlite3.rst new file mode 100644 index 0000000000000000000000000000000000000000..12336fb1b4546540f3a7881c095139262e51fc07 --- /dev/null +++ b/testbed/pallets__flask/docs/patterns/sqlite3.rst @@ -0,0 +1,151 @@ +Using SQLite 3 with Flask +========================= + +In Flask you can easily implement the opening of database connections on +demand and closing them when the context dies (usually at the end of the +request). + +Here is a simple example of how you can use SQLite 3 with Flask:: + + import sqlite3 + from flask import g + + DATABASE = '/path/to/database.db' + + def get_db(): + db = getattr(g, '_database', None) + if db is None: + db = g._database = sqlite3.connect(DATABASE) + return db + + @app.teardown_appcontext + def close_connection(exception): + db = getattr(g, '_database', None) + if db is not None: + db.close() + +Now, to use the database, the application must either have an active +application context (which is always true if there is a request in flight) +or create an application context itself. At that point the ``get_db`` +function can be used to get the current database connection. Whenever the +context is destroyed the database connection will be terminated. + +Note: if you use Flask 0.9 or older you need to use +``flask._app_ctx_stack.top`` instead of ``g`` as the :data:`flask.g` +object was bound to the request and not application context. + +Example:: + + @app.route('/') + def index(): + cur = get_db().cursor() + ... + + +.. note:: + + Please keep in mind that the teardown request and appcontext functions + are always executed, even if a before-request handler failed or was + never executed. Because of this we have to make sure here that the + database is there before we close it. + +Connect on Demand +----------------- + +The upside of this approach (connecting on first use) is that this will +only open the connection if truly necessary. If you want to use this +code outside a request context you can use it in a Python shell by opening +the application context by hand:: + + with app.app_context(): + # now you can use get_db() + + +Easy Querying +------------- + +Now in each request handling function you can access `get_db()` to get the +current open database connection. To simplify working with SQLite, a +row factory function is useful. It is executed for every result returned +from the database to convert the result. For instance, in order to get +dictionaries instead of tuples, this could be inserted into the ``get_db`` +function we created above:: + + def make_dicts(cursor, row): + return dict((cursor.description[idx][0], value) + for idx, value in enumerate(row)) + + db.row_factory = make_dicts + +This will make the sqlite3 module return dicts for this database connection, which are much nicer to deal with. Even more simply, we could place this in ``get_db`` instead:: + + db.row_factory = sqlite3.Row + +This would use Row objects rather than dicts to return the results of queries. These are ``namedtuple`` s, so we can access them either by index or by key. For example, assuming we have a ``sqlite3.Row`` called ``r`` for the rows ``id``, ``FirstName``, ``LastName``, and ``MiddleInitial``:: + + >>> # You can get values based on the row's name + >>> r['FirstName'] + John + >>> # Or, you can get them based on index + >>> r[1] + John + # Row objects are also iterable: + >>> for value in r: + ... print(value) + 1 + John + Doe + M + +Additionally, it is a good idea to provide a query function that combines +getting the cursor, executing and fetching the results:: + + def query_db(query, args=(), one=False): + cur = get_db().execute(query, args) + rv = cur.fetchall() + cur.close() + return (rv[0] if rv else None) if one else rv + +This handy little function, in combination with a row factory, makes +working with the database much more pleasant than it is by just using the +raw cursor and connection objects. + +Here is how you can use it:: + + for user in query_db('select * from users'): + print(user['username'], 'has the id', user['user_id']) + +Or if you just want a single result:: + + user = query_db('select * from users where username = ?', + [the_username], one=True) + if user is None: + print('No such user') + else: + print(the_username, 'has the id', user['user_id']) + +To pass variable parts to the SQL statement, use a question mark in the +statement and pass in the arguments as a list. Never directly add them to +the SQL statement with string formatting because this makes it possible +to attack the application using `SQL Injections +`_. + +Initial Schemas +--------------- + +Relational databases need schemas, so applications often ship a +`schema.sql` file that creates the database. It's a good idea to provide +a function that creates the database based on that schema. This function +can do that for you:: + + def init_db(): + with app.app_context(): + db = get_db() + with app.open_resource('schema.sql', mode='r') as f: + db.cursor().executescript(f.read()) + db.commit() + +You can then create such a database from the Python shell: + +>>> from yourapplication import init_db +>>> init_db() diff --git a/testbed/pallets__flask/docs/patterns/streaming.rst b/testbed/pallets__flask/docs/patterns/streaming.rst new file mode 100644 index 0000000000000000000000000000000000000000..e8571ffdadff2511d7bcf1a69b55bf911a62f79a --- /dev/null +++ b/testbed/pallets__flask/docs/patterns/streaming.rst @@ -0,0 +1,80 @@ +Streaming Contents +================== + +Sometimes you want to send an enormous amount of data to the client, much +more than you want to keep in memory. When you are generating the data on +the fly though, how do you send that back to the client without the +roundtrip to the filesystem? + +The answer is by using generators and direct responses. + +Basic Usage +----------- + +This is a basic view function that generates a lot of CSV data on the fly. +The trick is to have an inner function that uses a generator to generate +data and to then invoke that function and pass it to a response object:: + + @app.route('/large.csv') + def generate_large_csv(): + def generate(): + for row in iter_all_rows(): + yield f"{','.join(row)}\n" + return app.response_class(generate(), mimetype='text/csv') + +Each ``yield`` expression is directly sent to the browser. Note though +that some WSGI middlewares might break streaming, so be careful there in +debug environments with profilers and other things you might have enabled. + +Streaming from Templates +------------------------ + +The Jinja2 template engine also supports rendering templates piece by +piece. This functionality is not directly exposed by Flask because it is +quite uncommon, but you can easily do it yourself:: + + def stream_template(template_name, **context): + app.update_template_context(context) + t = app.jinja_env.get_template(template_name) + rv = t.stream(context) + rv.enable_buffering(5) + return rv + + @app.route('/my-large-page.html') + def render_large_template(): + rows = iter_all_rows() + return app.response_class(stream_template('the_template.html', rows=rows)) + +The trick here is to get the template object from the Jinja2 environment +on the application and to call :meth:`~jinja2.Template.stream` instead of +:meth:`~jinja2.Template.render` which returns a stream object instead of a +string. Since we're bypassing the Flask template render functions and +using the template object itself we have to make sure to update the render +context ourselves by calling :meth:`~flask.Flask.update_template_context`. +The template is then evaluated as the stream is iterated over. Since each +time you do a yield the server will flush the content to the client you +might want to buffer up a few items in the template which you can do with +``rv.enable_buffering(size)``. ``5`` is a sane default. + +Streaming with Context +---------------------- + +.. versionadded:: 0.9 + +Note that when you stream data, the request context is already gone the +moment the function executes. Flask 0.9 provides you with a helper that +can keep the request context around during the execution of the +generator:: + + from flask import stream_with_context, request + + @app.route('/stream') + def streamed_response(): + def generate(): + yield 'Hello ' + yield request.args['name'] + yield '!' + return app.response_class(stream_with_context(generate())) + +Without the :func:`~flask.stream_with_context` function you would get a +:class:`RuntimeError` at that point. diff --git a/testbed/pallets__flask/docs/patterns/subclassing.rst b/testbed/pallets__flask/docs/patterns/subclassing.rst new file mode 100644 index 0000000000000000000000000000000000000000..d8de2335925f44e079eff7f0558ee13911e2cc64 --- /dev/null +++ b/testbed/pallets__flask/docs/patterns/subclassing.rst @@ -0,0 +1,17 @@ +Subclassing Flask +================= + +The :class:`~flask.Flask` class is designed for subclassing. + +For example, you may want to override how request parameters are handled to preserve their order:: + + from flask import Flask, Request + from werkzeug.datastructures import ImmutableOrderedMultiDict + class MyRequest(Request): + """Request subclass to override request parameter storage""" + parameter_storage_class = ImmutableOrderedMultiDict + class MyFlask(Flask): + """Flask subclass using the custom request class""" + request_class = MyRequest + +This is the recommended approach for overriding or augmenting Flask's internal functionality. diff --git a/testbed/pallets__flask/docs/patterns/templateinheritance.rst b/testbed/pallets__flask/docs/patterns/templateinheritance.rst new file mode 100644 index 0000000000000000000000000000000000000000..bb5cba270617fdc9070c7c39c27ef6866c5d516f --- /dev/null +++ b/testbed/pallets__flask/docs/patterns/templateinheritance.rst @@ -0,0 +1,68 @@ +Template Inheritance +==================== + +The most powerful part of Jinja is template inheritance. Template inheritance +allows you to build a base "skeleton" template that contains all the common +elements of your site and defines **blocks** that child templates can override. + +Sounds complicated but is very basic. It's easiest to understand it by starting +with an example. + + +Base Template +------------- + +This template, which we'll call :file:`layout.html`, defines a simple HTML skeleton +document that you might use for a simple two-column page. It's the job of +"child" templates to fill the empty blocks with content: + +.. sourcecode:: html+jinja + + + + + {% block head %} + + {% block title %}{% endblock %} - My Webpage + {% endblock %} + + +

{% block content %}{% endblock %}
+ + + + +In this example, the ``{% block %}`` tags define four blocks that child templates +can fill in. All the `block` tag does is tell the template engine that a +child template may override those portions of the template. + +Child Template +-------------- + +A child template might look like this: + +.. sourcecode:: html+jinja + + {% extends "layout.html" %} + {% block title %}Index{% endblock %} + {% block head %} + {{ super() }} + + {% endblock %} + {% block content %} +

Index

+

+ Welcome on my awesome homepage. + {% endblock %} + +The ``{% extends %}`` tag is the key here. It tells the template engine that +this template "extends" another template. When the template system evaluates +this template, first it locates the parent. The extends tag must be the +first tag in the template. To render the contents of a block defined in +the parent template, use ``{{ super() }}``. diff --git a/testbed/pallets__flask/docs/patterns/urlprocessors.rst b/testbed/pallets__flask/docs/patterns/urlprocessors.rst new file mode 100644 index 0000000000000000000000000000000000000000..0d743205fd28c0a821fac05cb561cc73ef6cd94d --- /dev/null +++ b/testbed/pallets__flask/docs/patterns/urlprocessors.rst @@ -0,0 +1,126 @@ +Using URL Processors +==================== + +.. versionadded:: 0.7 + +Flask 0.7 introduces the concept of URL processors. The idea is that you +might have a bunch of resources with common parts in the URL that you +don't always explicitly want to provide. For instance you might have a +bunch of URLs that have the language code in it but you don't want to have +to handle it in every single function yourself. + +URL processors are especially helpful when combined with blueprints. We +will handle both application specific URL processors here as well as +blueprint specifics. + +Internationalized Application URLs +---------------------------------- + +Consider an application like this:: + + from flask import Flask, g + + app = Flask(__name__) + + @app.route('//') + def index(lang_code): + g.lang_code = lang_code + ... + + @app.route('//about') + def about(lang_code): + g.lang_code = lang_code + ... + +This is an awful lot of repetition as you have to handle the language code +setting on the :data:`~flask.g` object yourself in every single function. +Sure, a decorator could be used to simplify this, but if you want to +generate URLs from one function to another you would have to still provide +the language code explicitly which can be annoying. + +For the latter, this is where :func:`~flask.Flask.url_defaults` functions +come in. They can automatically inject values into a call to +:func:`~flask.url_for`. The code below checks if the +language code is not yet in the dictionary of URL values and if the +endpoint wants a value named ``'lang_code'``:: + + @app.url_defaults + def add_language_code(endpoint, values): + if 'lang_code' in values or not g.lang_code: + return + if app.url_map.is_endpoint_expecting(endpoint, 'lang_code'): + values['lang_code'] = g.lang_code + +The method :meth:`~werkzeug.routing.Map.is_endpoint_expecting` of the URL +map can be used to figure out if it would make sense to provide a language +code for the given endpoint. + +The reverse of that function are +:meth:`~flask.Flask.url_value_preprocessor`\s. They are executed right +after the request was matched and can execute code based on the URL +values. The idea is that they pull information out of the values +dictionary and put it somewhere else:: + + @app.url_value_preprocessor + def pull_lang_code(endpoint, values): + g.lang_code = values.pop('lang_code', None) + +That way you no longer have to do the `lang_code` assignment to +:data:`~flask.g` in every function. You can further improve that by +writing your own decorator that prefixes URLs with the language code, but +the more beautiful solution is using a blueprint. Once the +``'lang_code'`` is popped from the values dictionary and it will no longer +be forwarded to the view function reducing the code to this:: + + from flask import Flask, g + + app = Flask(__name__) + + @app.url_defaults + def add_language_code(endpoint, values): + if 'lang_code' in values or not g.lang_code: + return + if app.url_map.is_endpoint_expecting(endpoint, 'lang_code'): + values['lang_code'] = g.lang_code + + @app.url_value_preprocessor + def pull_lang_code(endpoint, values): + g.lang_code = values.pop('lang_code', None) + + @app.route('//') + def index(): + ... + + @app.route('//about') + def about(): + ... + +Internationalized Blueprint URLs +-------------------------------- + +Because blueprints can automatically prefix all URLs with a common string +it's easy to automatically do that for every function. Furthermore +blueprints can have per-blueprint URL processors which removes a whole lot +of logic from the :meth:`~flask.Flask.url_defaults` function because it no +longer has to check if the URL is really interested in a ``'lang_code'`` +parameter:: + + from flask import Blueprint, g + + bp = Blueprint('frontend', __name__, url_prefix='/') + + @bp.url_defaults + def add_language_code(endpoint, values): + values.setdefault('lang_code', g.lang_code) + + @bp.url_value_preprocessor + def pull_lang_code(endpoint, values): + g.lang_code = values.pop('lang_code') + + @bp.route('/') + def index(): + ... + + @bp.route('/about') + def about(): + ... diff --git a/testbed/pallets__flask/docs/patterns/viewdecorators.rst b/testbed/pallets__flask/docs/patterns/viewdecorators.rst new file mode 100644 index 0000000000000000000000000000000000000000..0b0479ef8141f76e3fe83d6452021e4c5be3e55c --- /dev/null +++ b/testbed/pallets__flask/docs/patterns/viewdecorators.rst @@ -0,0 +1,171 @@ +View Decorators +=============== + +Python has a really interesting feature called function decorators. This +allows some really neat things for web applications. Because each view in +Flask is a function, decorators can be used to inject additional +functionality to one or more functions. The :meth:`~flask.Flask.route` +decorator is the one you probably used already. But there are use cases +for implementing your own decorator. For instance, imagine you have a +view that should only be used by people that are logged in. If a user +goes to the site and is not logged in, they should be redirected to the +login page. This is a good example of a use case where a decorator is an +excellent solution. + +Login Required Decorator +------------------------ + +So let's implement such a decorator. A decorator is a function that +wraps and replaces another function. Since the original function is +replaced, you need to remember to copy the original function's information +to the new function. Use :func:`functools.wraps` to handle this for you. + +This example assumes that the login page is called ``'login'`` and that +the current user is stored in ``g.user`` and is ``None`` if there is no-one +logged in. :: + + from functools import wraps + from flask import g, request, redirect, url_for + + def login_required(f): + @wraps(f) + def decorated_function(*args, **kwargs): + if g.user is None: + return redirect(url_for('login', next=request.url)) + return f(*args, **kwargs) + return decorated_function + +To use the decorator, apply it as innermost decorator to a view function. +When applying further decorators, always remember +that the :meth:`~flask.Flask.route` decorator is the outermost. :: + + @app.route('/secret_page') + @login_required + def secret_page(): + pass + +.. note:: + The ``next`` value will exist in ``request.args`` after a ``GET`` request for + the login page. You'll have to pass it along when sending the ``POST`` request + from the login form. You can do this with a hidden input tag, then retrieve it + from ``request.form`` when logging the user in. :: + + + + +Caching Decorator +----------------- + +Imagine you have a view function that does an expensive calculation and +because of that you would like to cache the generated results for a +certain amount of time. A decorator would be nice for that. We're +assuming you have set up a cache like mentioned in :doc:`caching`. + +Here is an example cache function. It generates the cache key from a +specific prefix (actually a format string) and the current path of the +request. Notice that we are using a function that first creates the +decorator that then decorates the function. Sounds awful? Unfortunately +it is a little bit more complex, but the code should still be +straightforward to read. + +The decorated function will then work as follows + +1. get the unique cache key for the current request based on the current + path. +2. get the value for that key from the cache. If the cache returned + something we will return that value. +3. otherwise the original function is called and the return value is + stored in the cache for the timeout provided (by default 5 minutes). + +Here the code:: + + from functools import wraps + from flask import request + + def cached(timeout=5 * 60, key='view/{}'): + def decorator(f): + @wraps(f) + def decorated_function(*args, **kwargs): + cache_key = key.format(request.path) + rv = cache.get(cache_key) + if rv is not None: + return rv + rv = f(*args, **kwargs) + cache.set(cache_key, rv, timeout=timeout) + return rv + return decorated_function + return decorator + +Notice that this assumes an instantiated ``cache`` object is available, see +:doc:`caching`. + + +Templating Decorator +-------------------- + +A common pattern invented by the TurboGears guys a while back is a +templating decorator. The idea of that decorator is that you return a +dictionary with the values passed to the template from the view function +and the template is automatically rendered. With that, the following +three examples do exactly the same:: + + @app.route('/') + def index(): + return render_template('index.html', value=42) + + @app.route('/') + @templated('index.html') + def index(): + return dict(value=42) + + @app.route('/') + @templated() + def index(): + return dict(value=42) + +As you can see, if no template name is provided it will use the endpoint +of the URL map with dots converted to slashes + ``'.html'``. Otherwise +the provided template name is used. When the decorated function returns, +the dictionary returned is passed to the template rendering function. If +``None`` is returned, an empty dictionary is assumed, if something else than +a dictionary is returned we return it from the function unchanged. That +way you can still use the redirect function or return simple strings. + +Here is the code for that decorator:: + + from functools import wraps + from flask import request, render_template + + def templated(template=None): + def decorator(f): + @wraps(f) + def decorated_function(*args, **kwargs): + template_name = template + if template_name is None: + template_name = f"{request.endpoint.replace('.', '/')}.html" + ctx = f(*args, **kwargs) + if ctx is None: + ctx = {} + elif not isinstance(ctx, dict): + return ctx + return render_template(template_name, **ctx) + return decorated_function + return decorator + + +Endpoint Decorator +------------------ + +When you want to use the werkzeug routing system for more flexibility you +need to map the endpoint as defined in the :class:`~werkzeug.routing.Rule` +to a view function. This is possible with this decorator. For example:: + + from flask import Flask + from werkzeug.routing import Rule + + app = Flask(__name__) + app.url_map.add(Rule('/', endpoint='index')) + + @app.endpoint('index') + def my_index(): + return "Hello world" diff --git a/testbed/pallets__flask/docs/patterns/wtforms.rst b/testbed/pallets__flask/docs/patterns/wtforms.rst new file mode 100644 index 0000000000000000000000000000000000000000..3d626f5084f9a5ea5209f12ddab4a5d0ca12e8d2 --- /dev/null +++ b/testbed/pallets__flask/docs/patterns/wtforms.rst @@ -0,0 +1,126 @@ +Form Validation with WTForms +============================ + +When you have to work with form data submitted by a browser view, code +quickly becomes very hard to read. There are libraries out there designed +to make this process easier to manage. One of them is `WTForms`_ which we +will handle here. If you find yourself in the situation of having many +forms, you might want to give it a try. + +When you are working with WTForms you have to define your forms as classes +first. I recommend breaking up the application into multiple modules +(:doc:`packages`) for that and adding a separate module for the +forms. + +.. admonition:: Getting the most out of WTForms with an Extension + + The `Flask-WTF`_ extension expands on this pattern and adds a + few little helpers that make working with forms and Flask more + fun. You can get it from `PyPI + `_. + +.. _Flask-WTF: https://flask-wtf.readthedocs.io/ + +The Forms +--------- + +This is an example form for a typical registration page:: + + from wtforms import Form, BooleanField, StringField, PasswordField, validators + + class RegistrationForm(Form): + username = StringField('Username', [validators.Length(min=4, max=25)]) + email = StringField('Email Address', [validators.Length(min=6, max=35)]) + password = PasswordField('New Password', [ + validators.DataRequired(), + validators.EqualTo('confirm', message='Passwords must match') + ]) + confirm = PasswordField('Repeat Password') + accept_tos = BooleanField('I accept the TOS', [validators.DataRequired()]) + +In the View +----------- + +In the view function, the usage of this form looks like this:: + + @app.route('/register', methods=['GET', 'POST']) + def register(): + form = RegistrationForm(request.form) + if request.method == 'POST' and form.validate(): + user = User(form.username.data, form.email.data, + form.password.data) + db_session.add(user) + flash('Thanks for registering') + return redirect(url_for('login')) + return render_template('register.html', form=form) + +Notice we're implying that the view is using SQLAlchemy here +(:doc:`sqlalchemy`), but that's not a requirement, of course. Adapt +the code as necessary. + +Things to remember: + +1. create the form from the request :attr:`~flask.request.form` value if + the data is submitted via the HTTP ``POST`` method and + :attr:`~flask.request.args` if the data is submitted as ``GET``. +2. to validate the data, call the :func:`~wtforms.form.Form.validate` + method, which will return ``True`` if the data validates, ``False`` + otherwise. +3. to access individual values from the form, access `form..data`. + +Forms in Templates +------------------ + +Now to the template side. When you pass the form to the templates, you can +easily render them there. Look at the following example template to see +how easy this is. WTForms does half the form generation for us already. +To make it even nicer, we can write a macro that renders a field with +label and a list of errors if there are any. + +Here's an example :file:`_formhelpers.html` template with such a macro: + +.. sourcecode:: html+jinja + + {% macro render_field(field) %} +

{{ field.label }} +
{{ field(**kwargs)|safe }} + {% if field.errors %} +
    + {% for error in field.errors %} +
  • {{ error }}
  • + {% endfor %} +
+ {% endif %} +
+ {% endmacro %} + +This macro accepts a couple of keyword arguments that are forwarded to +WTForm's field function, which renders the field for us. The keyword +arguments will be inserted as HTML attributes. So, for example, you can +call ``render_field(form.username, class='username')`` to add a class to +the input element. Note that WTForms returns standard Python strings, +so we have to tell Jinja2 that this data is already HTML-escaped with +the ``|safe`` filter. + +Here is the :file:`register.html` template for the function we used above, which +takes advantage of the :file:`_formhelpers.html` template: + +.. sourcecode:: html+jinja + + {% from "_formhelpers.html" import render_field %} +
+
+ {{ render_field(form.username) }} + {{ render_field(form.email) }} + {{ render_field(form.password) }} + {{ render_field(form.confirm) }} + {{ render_field(form.accept_tos) }} +
+

+

+ +For more information about WTForms, head over to the `WTForms +website`_. + +.. _WTForms: https://wtforms.readthedocs.io/ +.. _WTForms website: https://wtforms.readthedocs.io/ diff --git a/testbed/pallets__flask/docs/quickstart.rst b/testbed/pallets__flask/docs/quickstart.rst new file mode 100644 index 0000000000000000000000000000000000000000..a6956c3201c2c927ae7076cbe6bda2a2ef7145b1 --- /dev/null +++ b/testbed/pallets__flask/docs/quickstart.rst @@ -0,0 +1,958 @@ +Quickstart +========== + +Eager to get started? This page gives a good introduction to Flask. +Follow :doc:`installation` to set up a project and install Flask first. + + +A Minimal Application +--------------------- + +A minimal Flask application looks something like this: + +.. code-block:: python + + from flask import Flask + + app = Flask(__name__) + + @app.route("/") + def hello_world(): + return "

Hello, World!

" + +So what did that code do? + +1. First we imported the :class:`~flask.Flask` class. An instance of + this class will be our WSGI application. +2. Next we create an instance of this class. The first argument is the + name of the application's module or package. ``__name__`` is a + convenient shortcut for this that is appropriate for most cases. + This is needed so that Flask knows where to look for resources such + as templates and static files. +3. We then use the :meth:`~flask.Flask.route` decorator to tell Flask + what URL should trigger our function. +4. The function returns the message we want to display in the user's + browser. The default content type is HTML, so HTML in the string + will be rendered by the browser. + +Save it as :file:`hello.py` or something similar. Make sure to not call +your application :file:`flask.py` because this would conflict with Flask +itself. + +To run the application, use the :command:`flask` command or +:command:`python -m flask`. Before you can do that you need +to tell your terminal the application to work with by exporting the +``FLASK_APP`` environment variable: + +.. tabs:: + + .. group-tab:: Bash + + .. code-block:: text + + $ export FLASK_APP=hello + $ flask run + * Running on http://127.0.0.1:5000/ + + .. group-tab:: Fish + + .. code-block:: text + + $ set -x FLASK_APP hello + $ flask run + * Running on http://127.0.0.1:5000/ + + .. group-tab:: CMD + + .. code-block:: text + + > set FLASK_APP=hello + > flask run + * Running on http://127.0.0.1:5000/ + + .. group-tab:: Powershell + + .. code-block:: text + + > $env:FLASK_APP = "hello" + > flask run + * Running on http://127.0.0.1:5000/ + +.. admonition:: Application Discovery Behavior + + As a shortcut, if the file is named ``app.py`` or ``wsgi.py``, you + don't have to set the ``FLASK_APP`` environment variable. See + :doc:`/cli` for more details. + +This launches a very simple builtin server, which is good enough for +testing but probably not what you want to use in production. For +deployment options see :doc:`deploying/index`. + +Now head over to http://127.0.0.1:5000/, and you should see your hello +world greeting. + +If another program is already using port 5000, you'll see +``OSError: [Errno 98]`` or ``OSError: [WinError 10013]`` when the +server tries to start. See :ref:`address-already-in-use` for how to +handle that. + +.. _public-server: + +.. admonition:: Externally Visible Server + + If you run the server you will notice that the server is only accessible + from your own computer, not from any other in the network. This is the + default because in debugging mode a user of the application can execute + arbitrary Python code on your computer. + + If you have the debugger disabled or trust the users on your network, + you can make the server publicly available simply by adding + ``--host=0.0.0.0`` to the command line:: + + $ flask run --host=0.0.0.0 + + This tells your operating system to listen on all public IPs. + + +What to do if the Server does not Start +--------------------------------------- + +In case the :command:`python -m flask` fails or :command:`flask` +does not exist, there are multiple reasons this might be the case. +First of all you need to look at the error message. + +Old Version of Flask +```````````````````` + +Versions of Flask older than 0.11 used to have different ways to start the +application. In short, the :command:`flask` command did not exist, and +neither did :command:`python -m flask`. In that case you have two options: +either upgrade to newer Flask versions or have a look at :doc:`/server` +to see the alternative method for running a server. + +Invalid Import Name +``````````````````` + +The ``FLASK_APP`` environment variable is the name of the module to import at +:command:`flask run`. In case that module is incorrectly named you will get an +import error upon start (or if debug is enabled when you navigate to the +application). It will tell you what it tried to import and why it failed. + +The most common reason is a typo or because you did not actually create an +``app`` object. + + +Debug Mode +---------- + +The ``flask run`` command can do more than just start the development +server. By enabling debug mode, the server will automatically reload if +code changes, and will show an interactive debugger in the browser if an +error occurs during a request. + +.. image:: _static/debugger.png + :align: center + :class: screenshot + :alt: The interactive debugger in action. + +.. warning:: + + The debugger allows executing arbitrary Python code from the + browser. It is protected by a pin, but still represents a major + security risk. Do not run the development server or debugger in a + production environment. + +To enable all development features, set the ``FLASK_ENV`` environment +variable to ``development`` before calling ``flask run``. + +.. tabs:: + + .. group-tab:: Bash + + .. code-block:: text + + $ export FLASK_ENV=development + $ flask run + + .. group-tab:: Fish + + .. code-block:: text + + $ set -x FLASK_ENV development + $ flask run + + .. group-tab:: CMD + + .. code-block:: text + + > set FLASK_ENV=development + > flask run + + .. group-tab:: Powershell + + .. code-block:: text + + > $env:FLASK_ENV = "development" + > flask run + +See also: + +- :doc:`/server` and :doc:`/cli` for information about running in + development mode. +- :doc:`/debugging` for information about using the built-in debugger + and other debuggers. +- :doc:`/logging` and :doc:`/errorhandling` to log errors and display + nice error pages. + + +HTML Escaping +------------- + +When returning HTML (the default response type in Flask), any +user-provided values rendered in the output must be escaped to protect +from injection attacks. HTML templates rendered with Jinja, introduced +later, will do this automatically. + +:func:`~markupsafe.escape`, shown here, can be used manually. It is +omitted in most examples for brevity, but you should always be aware of +how you're using untrusted data. + +.. code-block:: python + + from markupsafe import escape + + @app.route("/") + def hello(name): + return f"Hello, {escape(name)}!" + +If a user managed to submit the name ````, +escaping causes it to be rendered as text, rather than running the +script in the user's browser. + +```` in the route captures a value from the URL and passes it to +the view function. These variable rules are explained below. + + +Routing +------- + +Modern web applications use meaningful URLs to help users. Users are more +likely to like a page and come back if the page uses a meaningful URL they can +remember and use to directly visit a page. + +Use the :meth:`~flask.Flask.route` decorator to bind a function to a URL. :: + + @app.route('/') + def index(): + return 'Index Page' + + @app.route('/hello') + def hello(): + return 'Hello, World' + +You can do more! You can make parts of the URL dynamic and attach multiple +rules to a function. + +Variable Rules +`````````````` + +You can add variable sections to a URL by marking sections with +````. Your function then receives the ```` +as a keyword argument. Optionally, you can use a converter to specify the type +of the argument like ````. :: + + from markupsafe import escape + + @app.route('/user/') + def show_user_profile(username): + # show the user profile for that user + return f'User {escape(username)}' + + @app.route('/post/') + def show_post(post_id): + # show the post with the given id, the id is an integer + return f'Post {post_id}' + + @app.route('/path/') + def show_subpath(subpath): + # show the subpath after /path/ + return f'Subpath {escape(subpath)}' + +Converter types: + +========== ========================================== +``string`` (default) accepts any text without a slash +``int`` accepts positive integers +``float`` accepts positive floating point values +``path`` like ``string`` but also accepts slashes +``uuid`` accepts UUID strings +========== ========================================== + + +Unique URLs / Redirection Behavior +`````````````````````````````````` + +The following two rules differ in their use of a trailing slash. :: + + @app.route('/projects/') + def projects(): + return 'The project page' + + @app.route('/about') + def about(): + return 'The about page' + +The canonical URL for the ``projects`` endpoint has a trailing slash. +It's similar to a folder in a file system. If you access the URL without +a trailing slash (``/projects``), Flask redirects you to the canonical URL +with the trailing slash (``/projects/``). + +The canonical URL for the ``about`` endpoint does not have a trailing +slash. It's similar to the pathname of a file. Accessing the URL with a +trailing slash (``/about/``) produces a 404 "Not Found" error. This helps +keep URLs unique for these resources, which helps search engines avoid +indexing the same page twice. + + +.. _url-building: + +URL Building +```````````` + +To build a URL to a specific function, use the :func:`~flask.url_for` function. +It accepts the name of the function as its first argument and any number of +keyword arguments, each corresponding to a variable part of the URL rule. +Unknown variable parts are appended to the URL as query parameters. + +Why would you want to build URLs using the URL reversing function +:func:`~flask.url_for` instead of hard-coding them into your templates? + +1. Reversing is often more descriptive than hard-coding the URLs. +2. You can change your URLs in one go instead of needing to remember to + manually change hard-coded URLs. +3. URL building handles escaping of special characters transparently. +4. The generated paths are always absolute, avoiding unexpected behavior + of relative paths in browsers. +5. If your application is placed outside the URL root, for example, in + ``/myapplication`` instead of ``/``, :func:`~flask.url_for` properly + handles that for you. + +For example, here we use the :meth:`~flask.Flask.test_request_context` method +to try out :func:`~flask.url_for`. :meth:`~flask.Flask.test_request_context` +tells Flask to behave as though it's handling a request even while we use a +Python shell. See :ref:`context-locals`. + +.. code-block:: python + + from flask import url_for + + @app.route('/') + def index(): + return 'index' + + @app.route('/login') + def login(): + return 'login' + + @app.route('/user/') + def profile(username): + return f'{username}\'s profile' + + with app.test_request_context(): + print(url_for('index')) + print(url_for('login')) + print(url_for('login', next='/')) + print(url_for('profile', username='John Doe')) + +.. code-block:: text + + / + /login + /login?next=/ + /user/John%20Doe + + +HTTP Methods +```````````` + +Web applications use different HTTP methods when accessing URLs. You should +familiarize yourself with the HTTP methods as you work with Flask. By default, +a route only answers to ``GET`` requests. You can use the ``methods`` argument +of the :meth:`~flask.Flask.route` decorator to handle different HTTP methods. +:: + + from flask import request + + @app.route('/login', methods=['GET', 'POST']) + def login(): + if request.method == 'POST': + return do_the_login() + else: + return show_the_login_form() + +If ``GET`` is present, Flask automatically adds support for the ``HEAD`` method +and handles ``HEAD`` requests according to the `HTTP RFC`_. Likewise, +``OPTIONS`` is automatically implemented for you. + +.. _HTTP RFC: https://www.ietf.org/rfc/rfc2068.txt + +Static Files +------------ + +Dynamic web applications also need static files. That's usually where +the CSS and JavaScript files are coming from. Ideally your web server is +configured to serve them for you, but during development Flask can do that +as well. Just create a folder called :file:`static` in your package or next to +your module and it will be available at ``/static`` on the application. + +To generate URLs for static files, use the special ``'static'`` endpoint name:: + + url_for('static', filename='style.css') + +The file has to be stored on the filesystem as :file:`static/style.css`. + +Rendering Templates +------------------- + +Generating HTML from within Python is not fun, and actually pretty +cumbersome because you have to do the HTML escaping on your own to keep +the application secure. Because of that Flask configures the `Jinja2 +`_ template engine for you automatically. + +To render a template you can use the :func:`~flask.render_template` +method. All you have to do is provide the name of the template and the +variables you want to pass to the template engine as keyword arguments. +Here's a simple example of how to render a template:: + + from flask import render_template + + @app.route('/hello/') + @app.route('/hello/') + def hello(name=None): + return render_template('hello.html', name=name) + +Flask will look for templates in the :file:`templates` folder. So if your +application is a module, this folder is next to that module, if it's a +package it's actually inside your package: + +**Case 1**: a module:: + + /application.py + /templates + /hello.html + +**Case 2**: a package:: + + /application + /__init__.py + /templates + /hello.html + +For templates you can use the full power of Jinja2 templates. Head over +to the official `Jinja2 Template Documentation +`_ for more information. + +Here is an example template: + +.. sourcecode:: html+jinja + + + Hello from Flask + {% if name %} +

Hello {{ name }}!

+ {% else %} +

Hello, World!

+ {% endif %} + +Inside templates you also have access to the :data:`~flask.Flask.config`, +:class:`~flask.request`, :class:`~flask.session` and :class:`~flask.g` [#]_ objects +as well as the :func:`~flask.url_for` and :func:`~flask.get_flashed_messages` functions. + +Templates are especially useful if inheritance is used. If you want to +know how that works, see :doc:`patterns/templateinheritance`. Basically +template inheritance makes it possible to keep certain elements on each +page (like header, navigation and footer). + +Automatic escaping is enabled, so if ``name`` contains HTML it will be escaped +automatically. If you can trust a variable and you know that it will be +safe HTML (for example because it came from a module that converts wiki +markup to HTML) you can mark it as safe by using the +:class:`~markupsafe.Markup` class or by using the ``|safe`` filter in the +template. Head over to the Jinja 2 documentation for more examples. + +Here is a basic introduction to how the :class:`~markupsafe.Markup` class works:: + + >>> from markupsafe import Markup + >>> Markup('Hello %s!') % 'hacker' + Markup('Hello <blink>hacker</blink>!') + >>> Markup.escape('hacker') + Markup('<blink>hacker</blink>') + >>> Markup('Marked up » HTML').striptags() + 'Marked up » HTML' + +.. versionchanged:: 0.5 + + Autoescaping is no longer enabled for all templates. The following + extensions for templates trigger autoescaping: ``.html``, ``.htm``, + ``.xml``, ``.xhtml``. Templates loaded from a string will have + autoescaping disabled. + +.. [#] Unsure what that :class:`~flask.g` object is? It's something in which + you can store information for your own needs. See the documentation + for :class:`flask.g` and :doc:`patterns/sqlite3`. + + +Accessing Request Data +---------------------- + +For web applications it's crucial to react to the data a client sends to +the server. In Flask this information is provided by the global +:class:`~flask.request` object. If you have some experience with Python +you might be wondering how that object can be global and how Flask +manages to still be threadsafe. The answer is context locals: + + +.. _context-locals: + +Context Locals +`````````````` + +.. admonition:: Insider Information + + If you want to understand how that works and how you can implement + tests with context locals, read this section, otherwise just skip it. + +Certain objects in Flask are global objects, but not of the usual kind. +These objects are actually proxies to objects that are local to a specific +context. What a mouthful. But that is actually quite easy to understand. + +Imagine the context being the handling thread. A request comes in and the +web server decides to spawn a new thread (or something else, the +underlying object is capable of dealing with concurrency systems other +than threads). When Flask starts its internal request handling it +figures out that the current thread is the active context and binds the +current application and the WSGI environments to that context (thread). +It does that in an intelligent way so that one application can invoke another +application without breaking. + +So what does this mean to you? Basically you can completely ignore that +this is the case unless you are doing something like unit testing. You +will notice that code which depends on a request object will suddenly break +because there is no request object. The solution is creating a request +object yourself and binding it to the context. The easiest solution for +unit testing is to use the :meth:`~flask.Flask.test_request_context` +context manager. In combination with the ``with`` statement it will bind a +test request so that you can interact with it. Here is an example:: + + from flask import request + + with app.test_request_context('/hello', method='POST'): + # now you can do something with the request until the + # end of the with block, such as basic assertions: + assert request.path == '/hello' + assert request.method == 'POST' + +The other possibility is passing a whole WSGI environment to the +:meth:`~flask.Flask.request_context` method:: + + with app.request_context(environ): + assert request.method == 'POST' + +The Request Object +`````````````````` + +The request object is documented in the API section and we will not cover +it here in detail (see :class:`~flask.Request`). Here is a broad overview of +some of the most common operations. First of all you have to import it from +the ``flask`` module:: + + from flask import request + +The current request method is available by using the +:attr:`~flask.Request.method` attribute. To access form data (data +transmitted in a ``POST`` or ``PUT`` request) you can use the +:attr:`~flask.Request.form` attribute. Here is a full example of the two +attributes mentioned above:: + + @app.route('/login', methods=['POST', 'GET']) + def login(): + error = None + if request.method == 'POST': + if valid_login(request.form['username'], + request.form['password']): + return log_the_user_in(request.form['username']) + else: + error = 'Invalid username/password' + # the code below is executed if the request method + # was GET or the credentials were invalid + return render_template('login.html', error=error) + +What happens if the key does not exist in the ``form`` attribute? In that +case a special :exc:`KeyError` is raised. You can catch it like a +standard :exc:`KeyError` but if you don't do that, a HTTP 400 Bad Request +error page is shown instead. So for many situations you don't have to +deal with that problem. + +To access parameters submitted in the URL (``?key=value``) you can use the +:attr:`~flask.Request.args` attribute:: + + searchword = request.args.get('key', '') + +We recommend accessing URL parameters with `get` or by catching the +:exc:`KeyError` because users might change the URL and presenting them a 400 +bad request page in that case is not user friendly. + +For a full list of methods and attributes of the request object, head over +to the :class:`~flask.Request` documentation. + + +File Uploads +```````````` + +You can handle uploaded files with Flask easily. Just make sure not to +forget to set the ``enctype="multipart/form-data"`` attribute on your HTML +form, otherwise the browser will not transmit your files at all. + +Uploaded files are stored in memory or at a temporary location on the +filesystem. You can access those files by looking at the +:attr:`~flask.request.files` attribute on the request object. Each +uploaded file is stored in that dictionary. It behaves just like a +standard Python :class:`file` object, but it also has a +:meth:`~werkzeug.datastructures.FileStorage.save` method that +allows you to store that file on the filesystem of the server. +Here is a simple example showing how that works:: + + from flask import request + + @app.route('/upload', methods=['GET', 'POST']) + def upload_file(): + if request.method == 'POST': + f = request.files['the_file'] + f.save('/var/www/uploads/uploaded_file.txt') + ... + +If you want to know how the file was named on the client before it was +uploaded to your application, you can access the +:attr:`~werkzeug.datastructures.FileStorage.filename` attribute. +However please keep in mind that this value can be forged +so never ever trust that value. If you want to use the filename +of the client to store the file on the server, pass it through the +:func:`~werkzeug.utils.secure_filename` function that +Werkzeug provides for you:: + + from werkzeug.utils import secure_filename + + @app.route('/upload', methods=['GET', 'POST']) + def upload_file(): + if request.method == 'POST': + file = request.files['the_file'] + file.save(f"/var/www/uploads/{secure_filename(file.filename)}") + ... + +For some better examples, see :doc:`patterns/fileuploads`. + +Cookies +``````` + +To access cookies you can use the :attr:`~flask.Request.cookies` +attribute. To set cookies you can use the +:attr:`~flask.Response.set_cookie` method of response objects. The +:attr:`~flask.Request.cookies` attribute of request objects is a +dictionary with all the cookies the client transmits. If you want to use +sessions, do not use the cookies directly but instead use the +:ref:`sessions` in Flask that add some security on top of cookies for you. + +Reading cookies:: + + from flask import request + + @app.route('/') + def index(): + username = request.cookies.get('username') + # use cookies.get(key) instead of cookies[key] to not get a + # KeyError if the cookie is missing. + +Storing cookies:: + + from flask import make_response + + @app.route('/') + def index(): + resp = make_response(render_template(...)) + resp.set_cookie('username', 'the username') + return resp + +Note that cookies are set on response objects. Since you normally +just return strings from the view functions Flask will convert them into +response objects for you. If you explicitly want to do that you can use +the :meth:`~flask.make_response` function and then modify it. + +Sometimes you might want to set a cookie at a point where the response +object does not exist yet. This is possible by utilizing the +:doc:`patterns/deferredcallbacks` pattern. + +For this also see :ref:`about-responses`. + +Redirects and Errors +-------------------- + +To redirect a user to another endpoint, use the :func:`~flask.redirect` +function; to abort a request early with an error code, use the +:func:`~flask.abort` function:: + + from flask import abort, redirect, url_for + + @app.route('/') + def index(): + return redirect(url_for('login')) + + @app.route('/login') + def login(): + abort(401) + this_is_never_executed() + +This is a rather pointless example because a user will be redirected from +the index to a page they cannot access (401 means access denied) but it +shows how that works. + +By default a black and white error page is shown for each error code. If +you want to customize the error page, you can use the +:meth:`~flask.Flask.errorhandler` decorator:: + + from flask import render_template + + @app.errorhandler(404) + def page_not_found(error): + return render_template('page_not_found.html'), 404 + +Note the ``404`` after the :func:`~flask.render_template` call. This +tells Flask that the status code of that page should be 404 which means +not found. By default 200 is assumed which translates to: all went well. + +See :doc:`errorhandling` for more details. + +.. _about-responses: + +About Responses +--------------- + +The return value from a view function is automatically converted into +a response object for you. If the return value is a string it's +converted into a response object with the string as response body, a +``200 OK`` status code and a :mimetype:`text/html` mimetype. If the +return value is a dict, :func:`jsonify` is called to produce a response. +The logic that Flask applies to converting return values into response +objects is as follows: + +1. If a response object of the correct type is returned it's directly + returned from the view. +2. If it's a string, a response object is created with that data and + the default parameters. +3. If it's a dict, a response object is created using ``jsonify``. +4. If a tuple is returned the items in the tuple can provide extra + information. Such tuples have to be in the form + ``(response, status)``, ``(response, headers)``, or + ``(response, status, headers)``. The ``status`` value will override + the status code and ``headers`` can be a list or dictionary of + additional header values. +5. If none of that works, Flask will assume the return value is a + valid WSGI application and convert that into a response object. + +If you want to get hold of the resulting response object inside the view +you can use the :func:`~flask.make_response` function. + +Imagine you have a view like this:: + + from flask import render_template + + @app.errorhandler(404) + def not_found(error): + return render_template('error.html'), 404 + +You just need to wrap the return expression with +:func:`~flask.make_response` and get the response object to modify it, then +return it:: + + from flask import make_response + + @app.errorhandler(404) + def not_found(error): + resp = make_response(render_template('error.html'), 404) + resp.headers['X-Something'] = 'A value' + return resp + + +APIs with JSON +`````````````` + +A common response format when writing an API is JSON. It's easy to get +started writing such an API with Flask. If you return a ``dict`` from a +view, it will be converted to a JSON response. + +.. code-block:: python + + @app.route("/me") + def me_api(): + user = get_current_user() + return { + "username": user.username, + "theme": user.theme, + "image": url_for("user_image", filename=user.image), + } + +Depending on your API design, you may want to create JSON responses for +types other than ``dict``. In that case, use the +:func:`~flask.json.jsonify` function, which will serialize any supported +JSON data type. Or look into Flask community extensions that support +more complex applications. + +.. code-block:: python + + from flask import jsonify + + @app.route("/users") + def users_api(): + users = get_all_users() + return jsonify([user.to_json() for user in users]) + + +.. _sessions: + +Sessions +-------- + +In addition to the request object there is also a second object called +:class:`~flask.session` which allows you to store information specific to a +user from one request to the next. This is implemented on top of cookies +for you and signs the cookies cryptographically. What this means is that +the user could look at the contents of your cookie but not modify it, +unless they know the secret key used for signing. + +In order to use sessions you have to set a secret key. Here is how +sessions work:: + + from flask import session + + # Set the secret key to some random bytes. Keep this really secret! + app.secret_key = b'_5#y2L"F4Q8z\n\xec]/' + + @app.route('/') + def index(): + if 'username' in session: + return f'Logged in as {session["username"]}' + return 'You are not logged in' + + @app.route('/login', methods=['GET', 'POST']) + def login(): + if request.method == 'POST': + session['username'] = request.form['username'] + return redirect(url_for('index')) + return ''' +
+

+

+

+ ''' + + @app.route('/logout') + def logout(): + # remove the username from the session if it's there + session.pop('username', None) + return redirect(url_for('index')) + +.. admonition:: How to generate good secret keys + + A secret key should be as random as possible. Your operating system has + ways to generate pretty random data based on a cryptographic random + generator. Use the following command to quickly generate a value for + :attr:`Flask.secret_key` (or :data:`SECRET_KEY`):: + + $ python -c 'import secrets; print(secrets.token_hex())' + '192b9bdd22ab9ed4d12e236c78afcb9a393ec15f71bbf5dc987d54727823bcbf' + +A note on cookie-based sessions: Flask will take the values you put into the +session object and serialize them into a cookie. If you are finding some +values do not persist across requests, cookies are indeed enabled, and you are +not getting a clear error message, check the size of the cookie in your page +responses compared to the size supported by web browsers. + +Besides the default client-side based sessions, if you want to handle +sessions on the server-side instead, there are several +Flask extensions that support this. + +Message Flashing +---------------- + +Good applications and user interfaces are all about feedback. If the user +does not get enough feedback they will probably end up hating the +application. Flask provides a really simple way to give feedback to a +user with the flashing system. The flashing system basically makes it +possible to record a message at the end of a request and access it on the next +(and only the next) request. This is usually combined with a layout +template to expose the message. + +To flash a message use the :func:`~flask.flash` method, to get hold of the +messages you can use :func:`~flask.get_flashed_messages` which is also +available in the templates. See :doc:`patterns/flashing` for a full +example. + +Logging +------- + +.. versionadded:: 0.3 + +Sometimes you might be in a situation where you deal with data that +should be correct, but actually is not. For example you may have +some client-side code that sends an HTTP request to the server +but it's obviously malformed. This might be caused by a user tampering +with the data, or the client code failing. Most of the time it's okay +to reply with ``400 Bad Request`` in that situation, but sometimes +that won't do and the code has to continue working. + +You may still want to log that something fishy happened. This is where +loggers come in handy. As of Flask 0.3 a logger is preconfigured for you +to use. + +Here are some example log calls:: + + app.logger.debug('A value for debugging') + app.logger.warning('A warning occurred (%d apples)', 42) + app.logger.error('An error occurred') + +The attached :attr:`~flask.Flask.logger` is a standard logging +:class:`~logging.Logger`, so head over to the official :mod:`logging` +docs for more information. + +See :doc:`errorhandling`. + + +Hooking in WSGI Middleware +-------------------------- + +To add WSGI middleware to your Flask application, wrap the application's +``wsgi_app`` attribute. For example, to apply Werkzeug's +:class:`~werkzeug.middleware.proxy_fix.ProxyFix` middleware for running +behind Nginx: + +.. code-block:: python + + from werkzeug.middleware.proxy_fix import ProxyFix + app.wsgi_app = ProxyFix(app.wsgi_app) + +Wrapping ``app.wsgi_app`` instead of ``app`` means that ``app`` still +points at your Flask application, not at the middleware, so you can +continue to use and configure ``app`` directly. + +Using Flask Extensions +---------------------- + +Extensions are packages that help you accomplish common tasks. For +example, Flask-SQLAlchemy provides SQLAlchemy support that makes it simple +and easy to use with Flask. + +For more on Flask extensions, see :doc:`extensions`. + +Deploying to a Web Server +------------------------- + +Ready to deploy your new Flask app? See :doc:`deploying/index`. diff --git a/testbed/pallets__flask/docs/reqcontext.rst b/testbed/pallets__flask/docs/reqcontext.rst new file mode 100644 index 0000000000000000000000000000000000000000..b67745edbe3ec2d641de6d31914b705f63f75068 --- /dev/null +++ b/testbed/pallets__flask/docs/reqcontext.rst @@ -0,0 +1,265 @@ +.. currentmodule:: flask + +The Request Context +=================== + +The request context keeps track of the request-level data during a +request. Rather than passing the request object to each function that +runs during a request, the :data:`request` and :data:`session` proxies +are accessed instead. + +This is similar to :doc:`/appcontext`, which keeps track of the +application-level data independent of a request. A corresponding +application context is pushed when a request context is pushed. + + +Purpose of the Context +---------------------- + +When the :class:`Flask` application handles a request, it creates a +:class:`Request` object based on the environment it received from the +WSGI server. Because a *worker* (thread, process, or coroutine depending +on the server) handles only one request at a time, the request data can +be considered global to that worker during that request. Flask uses the +term *context local* for this. + +Flask automatically *pushes* a request context when handling a request. +View functions, error handlers, and other functions that run during a +request will have access to the :data:`request` proxy, which points to +the request object for the current request. + + +Lifetime of the Context +----------------------- + +When a Flask application begins handling a request, it pushes a request +context, which also pushes an :doc:`app context `. When the +request ends it pops the request context then the application context. + +The context is unique to each thread (or other worker type). +:data:`request` cannot be passed to another thread, the other thread +will have a different context stack and will not know about the request +the parent thread was pointing to. + +Context locals are implemented in Werkzeug. See :doc:`werkzeug:local` +for more information on how this works internally. + + +Manually Push a Context +----------------------- + +If you try to access :data:`request`, or anything that uses it, outside +a request context, you'll get this error message: + +.. code-block:: pytb + + RuntimeError: Working outside of request context. + + This typically means that you attempted to use functionality that + needed an active HTTP request. Consult the documentation on testing + for information about how to avoid this problem. + +This should typically only happen when testing code that expects an +active request. One option is to use the +:meth:`test client ` to simulate a full request. Or +you can use :meth:`~Flask.test_request_context` in a ``with`` block, and +everything that runs in the block will have access to :data:`request`, +populated with your test data. :: + + def generate_report(year): + format = request.args.get('format') + ... + + with app.test_request_context( + '/make_report/2017', data={'format': 'short'}): + generate_report() + +If you see that error somewhere else in your code not related to +testing, it most likely indicates that you should move that code into a +view function. + +For information on how to use the request context from the interactive +Python shell, see :doc:`/shell`. + + +How the Context Works +--------------------- + +The :meth:`Flask.wsgi_app` method is called to handle each request. It +manages the contexts during the request. Internally, the request and +application contexts work as stacks, :data:`_request_ctx_stack` and +:data:`_app_ctx_stack`. When contexts are pushed onto the stack, the +proxies that depend on them are available and point at information from +the top context on the stack. + +When the request starts, a :class:`~ctx.RequestContext` is created and +pushed, which creates and pushes an :class:`~ctx.AppContext` first if +a context for that application is not already the top context. While +these contexts are pushed, the :data:`current_app`, :data:`g`, +:data:`request`, and :data:`session` proxies are available to the +original thread handling the request. + +Because the contexts are stacks, other contexts may be pushed to change +the proxies during a request. While this is not a common pattern, it +can be used in advanced applications to, for example, do internal +redirects or chain different applications together. + +After the request is dispatched and a response is generated and sent, +the request context is popped, which then pops the application context. +Immediately before they are popped, the :meth:`~Flask.teardown_request` +and :meth:`~Flask.teardown_appcontext` functions are executed. These +execute even if an unhandled exception occurred during dispatch. + + +.. _callbacks-and-errors: + +Callbacks and Errors +-------------------- + +Flask dispatches a request in multiple stages which can affect the +request, response, and how errors are handled. The contexts are active +during all of these stages. + +A :class:`Blueprint` can add handlers for these events that are specific +to the blueprint. The handlers for a blueprint will run if the blueprint +owns the route that matches the request. + +#. Before each request, :meth:`~Flask.before_request` functions are + called. If one of these functions return a value, the other + functions are skipped. The return value is treated as the response + and the view function is not called. + +#. If the :meth:`~Flask.before_request` functions did not return a + response, the view function for the matched route is called and + returns a response. + +#. The return value of the view is converted into an actual response + object and passed to the :meth:`~Flask.after_request` + functions. Each function returns a modified or new response object. + +#. After the response is returned, the contexts are popped, which calls + the :meth:`~Flask.teardown_request` and + :meth:`~Flask.teardown_appcontext` functions. These functions are + called even if an unhandled exception was raised at any point above. + +If an exception is raised before the teardown functions, Flask tries to +match it with an :meth:`~Flask.errorhandler` function to handle the +exception and return a response. If no error handler is found, or the +handler itself raises an exception, Flask returns a generic +``500 Internal Server Error`` response. The teardown functions are still +called, and are passed the exception object. + +If debug mode is enabled, unhandled exceptions are not converted to a +``500`` response and instead are propagated to the WSGI server. This +allows the development server to present the interactive debugger with +the traceback. + + +Teardown Callbacks +~~~~~~~~~~~~~~~~~~ + +The teardown callbacks are independent of the request dispatch, and are +instead called by the contexts when they are popped. The functions are +called even if there is an unhandled exception during dispatch, and for +manually pushed contexts. This means there is no guarantee that any +other parts of the request dispatch have run first. Be sure to write +these functions in a way that does not depend on other callbacks and +will not fail. + +During testing, it can be useful to defer popping the contexts after the +request ends, so that their data can be accessed in the test function. +Use the :meth:`~Flask.test_client` as a ``with`` block to preserve the +contexts until the ``with`` block exits. + +.. code-block:: python + + from flask import Flask, request + + app = Flask(__name__) + + @app.route('/') + def hello(): + print('during view') + return 'Hello, World!' + + @app.teardown_request + def show_teardown(exception): + print('after with block') + + with app.test_request_context(): + print('during with block') + + # teardown functions are called after the context with block exits + + with app.test_client() as client: + client.get('/') + # the contexts are not popped even though the request ended + print(request.path) + + # the contexts are popped and teardown functions are called after + # the client with block exits + +Signals +~~~~~~~ + +If :data:`~signals.signals_available` is true, the following signals are +sent: + +#. :data:`request_started` is sent before the + :meth:`~Flask.before_request` functions are called. + +#. :data:`request_finished` is sent after the + :meth:`~Flask.after_request` functions are called. + +#. :data:`got_request_exception` is sent when an exception begins to + be handled, but before an :meth:`~Flask.errorhandler` is looked up or + called. + +#. :data:`request_tearing_down` is sent after the + :meth:`~Flask.teardown_request` functions are called. + + +Context Preservation on Error +----------------------------- + +At the end of a request, the request context is popped and all data +associated with it is destroyed. If an error occurs during development, +it is useful to delay destroying the data for debugging purposes. + +When the development server is running in development mode (the +``FLASK_ENV`` environment variable is set to ``'development'``), the +error and data will be preserved and shown in the interactive debugger. + +This behavior can be controlled with the +:data:`PRESERVE_CONTEXT_ON_EXCEPTION` config. As described above, it +defaults to ``True`` in the development environment. + +Do not enable :data:`PRESERVE_CONTEXT_ON_EXCEPTION` in production, as it +will cause your application to leak memory on exceptions. + + +.. _notes-on-proxies: + +Notes On Proxies +---------------- + +Some of the objects provided by Flask are proxies to other objects. The +proxies are accessed in the same way for each worker thread, but +point to the unique object bound to each worker behind the scenes as +described on this page. + +Most of the time you don't have to care about that, but there are some +exceptions where it is good to know that this object is actually a proxy: + +- The proxy objects cannot fake their type as the actual object types. + If you want to perform instance checks, you have to do that on the + object being proxied. +- The reference to the proxied object is needed in some situations, + such as sending :doc:`signals` or passing data to a background + thread. + +If you need to access the underlying object that is proxied, use the +:meth:`~werkzeug.local.LocalProxy._get_current_object` method:: + + app = current_app._get_current_object() + my_signal.send(app) diff --git a/testbed/pallets__flask/docs/security.rst b/testbed/pallets__flask/docs/security.rst new file mode 100644 index 0000000000000000000000000000000000000000..777e51125b982ab2f906dbca3c4262a8c5668308 --- /dev/null +++ b/testbed/pallets__flask/docs/security.rst @@ -0,0 +1,274 @@ +Security Considerations +======================= + +Web applications usually face all kinds of security problems and it's very +hard to get everything right. Flask tries to solve a few of these things +for you, but there are a couple more you have to take care of yourself. + +.. _security-xss: + +Cross-Site Scripting (XSS) +-------------------------- + +Cross site scripting is the concept of injecting arbitrary HTML (and with +it JavaScript) into the context of a website. To remedy this, developers +have to properly escape text so that it cannot include arbitrary HTML +tags. For more information on that have a look at the Wikipedia article +on `Cross-Site Scripting +`_. + +Flask configures Jinja2 to automatically escape all values unless +explicitly told otherwise. This should rule out all XSS problems caused +in templates, but there are still other places where you have to be +careful: + +- generating HTML without the help of Jinja2 +- calling :class:`~flask.Markup` on data submitted by users +- sending out HTML from uploaded files, never do that, use the + ``Content-Disposition: attachment`` header to prevent that problem. +- sending out textfiles from uploaded files. Some browsers are using + content-type guessing based on the first few bytes so users could + trick a browser to execute HTML. + +Another thing that is very important are unquoted attributes. While +Jinja2 can protect you from XSS issues by escaping HTML, there is one +thing it cannot protect you from: XSS by attribute injection. To counter +this possible attack vector, be sure to always quote your attributes with +either double or single quotes when using Jinja expressions in them: + +.. sourcecode:: html+jinja + + + +Why is this necessary? Because if you would not be doing that, an +attacker could easily inject custom JavaScript handlers. For example an +attacker could inject this piece of HTML+JavaScript: + +.. sourcecode:: html + + onmouseover=alert(document.cookie) + +When the user would then move with the mouse over the input, the cookie +would be presented to the user in an alert window. But instead of showing +the cookie to the user, a good attacker might also execute any other +JavaScript code. In combination with CSS injections the attacker might +even make the element fill out the entire page so that the user would +just have to have the mouse anywhere on the page to trigger the attack. + +There is one class of XSS issues that Jinja's escaping does not protect +against. The ``a`` tag's ``href`` attribute can contain a `javascript:` URI, +which the browser will execute when clicked if not secured properly. + +.. sourcecode:: html + + click here + click here + +To prevent this, you'll need to set the :ref:`security-csp` response header. + +Cross-Site Request Forgery (CSRF) +--------------------------------- + +Another big problem is CSRF. This is a very complex topic and I won't +outline it here in detail just mention what it is and how to theoretically +prevent it. + +If your authentication information is stored in cookies, you have implicit +state management. The state of "being logged in" is controlled by a +cookie, and that cookie is sent with each request to a page. +Unfortunately that includes requests triggered by 3rd party sites. If you +don't keep that in mind, some people might be able to trick your +application's users with social engineering to do stupid things without +them knowing. + +Say you have a specific URL that, when you sent ``POST`` requests to will +delete a user's profile (say ``http://example.com/user/delete``). If an +attacker now creates a page that sends a post request to that page with +some JavaScript they just have to trick some users to load that page and +their profiles will end up being deleted. + +Imagine you were to run Facebook with millions of concurrent users and +someone would send out links to images of little kittens. When users +would go to that page, their profiles would get deleted while they are +looking at images of fluffy cats. + +How can you prevent that? Basically for each request that modifies +content on the server you would have to either use a one-time token and +store that in the cookie **and** also transmit it with the form data. +After receiving the data on the server again, you would then have to +compare the two tokens and ensure they are equal. + +Why does Flask not do that for you? The ideal place for this to happen is +the form validation framework, which does not exist in Flask. + +.. _security-json: + +JSON Security +------------- + +In Flask 0.10 and lower, :func:`~flask.jsonify` did not serialize top-level +arrays to JSON. This was because of a security vulnerability in ECMAScript 4. + +ECMAScript 5 closed this vulnerability, so only extremely old browsers are +still vulnerable. All of these browsers have `other more serious +vulnerabilities +`_, so +this behavior was changed and :func:`~flask.jsonify` now supports serializing +arrays. + +Security Headers +---------------- + +Browsers recognize various response headers in order to control security. We +recommend reviewing each of the headers below for use in your application. +The `Flask-Talisman`_ extension can be used to manage HTTPS and the security +headers for you. + +.. _Flask-Talisman: https://github.com/GoogleCloudPlatform/flask-talisman + +HTTP Strict Transport Security (HSTS) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Tells the browser to convert all HTTP requests to HTTPS, preventing +man-in-the-middle (MITM) attacks. :: + + response.headers['Strict-Transport-Security'] = 'max-age=31536000; includeSubDomains' + +- https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security + +.. _security-csp: + +Content Security Policy (CSP) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Tell the browser where it can load various types of resource from. This header +should be used whenever possible, but requires some work to define the correct +policy for your site. A very strict policy would be:: + + response.headers['Content-Security-Policy'] = "default-src 'self'" + +- https://csp.withgoogle.com/docs/index.html +- https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy + +X-Content-Type-Options +~~~~~~~~~~~~~~~~~~~~~~ + +Forces the browser to honor the response content type instead of trying to +detect it, which can be abused to generate a cross-site scripting (XSS) +attack. :: + + response.headers['X-Content-Type-Options'] = 'nosniff' + +- https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options + +X-Frame-Options +~~~~~~~~~~~~~~~ + +Prevents external sites from embedding your site in an ``iframe``. This +prevents a class of attacks where clicks in the outer frame can be translated +invisibly to clicks on your page's elements. This is also known as +"clickjacking". :: + + response.headers['X-Frame-Options'] = 'SAMEORIGIN' + +- https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options + +.. _security-cookie: + +Set-Cookie options +~~~~~~~~~~~~~~~~~~ + +These options can be added to a ``Set-Cookie`` header to improve their +security. Flask has configuration options to set these on the session cookie. +They can be set on other cookies too. + +- ``Secure`` limits cookies to HTTPS traffic only. +- ``HttpOnly`` protects the contents of cookies from being read with + JavaScript. +- ``SameSite`` restricts how cookies are sent with requests from + external sites. Can be set to ``'Lax'`` (recommended) or ``'Strict'``. + ``Lax`` prevents sending cookies with CSRF-prone requests from + external sites, such as submitting a form. ``Strict`` prevents sending + cookies with all external requests, including following regular links. + +:: + + app.config.update( + SESSION_COOKIE_SECURE=True, + SESSION_COOKIE_HTTPONLY=True, + SESSION_COOKIE_SAMESITE='Lax', + ) + + response.set_cookie('username', 'flask', secure=True, httponly=True, samesite='Lax') + +Specifying ``Expires`` or ``Max-Age`` options, will remove the cookie after +the given time, or the current time plus the age, respectively. If neither +option is set, the cookie will be removed when the browser is closed. :: + + # cookie expires after 10 minutes + response.set_cookie('snakes', '3', max_age=600) + +For the session cookie, if :attr:`session.permanent ` +is set, then :data:`PERMANENT_SESSION_LIFETIME` is used to set the expiration. +Flask's default cookie implementation validates that the cryptographic +signature is not older than this value. Lowering this value may help mitigate +replay attacks, where intercepted cookies can be sent at a later time. :: + + app.config.update( + PERMANENT_SESSION_LIFETIME=600 + ) + + @app.route('/login', methods=['POST']) + def login(): + ... + session.clear() + session['user_id'] = user.id + session.permanent = True + ... + +Use :class:`itsdangerous.TimedSerializer` to sign and validate other cookie +values (or any values that need secure signatures). + +- https://developer.mozilla.org/en-US/docs/Web/HTTP/Cookies +- https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie + +.. _samesite_support: https://caniuse.com/#feat=same-site-cookie-attribute + + +HTTP Public Key Pinning (HPKP) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This tells the browser to authenticate with the server using only the specific +certificate key to prevent MITM attacks. + +.. warning:: + Be careful when enabling this, as it is very difficult to undo if you set up + or upgrade your key incorrectly. + +- https://developer.mozilla.org/en-US/docs/Web/HTTP/Public_Key_Pinning + + +Copy/Paste to Terminal +---------------------- + +Hidden characters such as the backspace character (``\b``, ``^H``) can +cause text to render differently in HTML than how it is interpreted if +`pasted into a terminal `__. + +For example, ``import y\bose\bm\bi\bt\be\b`` renders as +``import yosemite`` in HTML, but the backspaces are applied when pasted +into a terminal, and it becomes ``import os``. + +If you expect users to copy and paste untrusted code from your site, +such as from comments posted by users on a technical blog, consider +applying extra filtering, such as replacing all ``\b`` characters. + +.. code-block:: python + + body = body.replace("\b", "") + +Most modern terminals will warn about and remove hidden characters when +pasting, so this isn't strictly necessary. It's also possible to craft +dangerous commands in other ways that aren't possible to filter. +Depending on your site's use case, it may be good to show a warning +about copying code in general. diff --git a/testbed/pallets__flask/docs/server.rst b/testbed/pallets__flask/docs/server.rst new file mode 100644 index 0000000000000000000000000000000000000000..f674bcd70c931008561c608eae4f220bd6506415 --- /dev/null +++ b/testbed/pallets__flask/docs/server.rst @@ -0,0 +1,165 @@ +.. currentmodule:: flask + +Development Server +================== + +Flask provides a ``run`` command to run the application with a +development server. In development mode, this server provides an +interactive debugger and will reload when code is changed. + +.. warning:: + + Do not use the development server when deploying to production. It + is intended for use only during local development. It is not + designed to be particularly efficient, stable, or secure. + + See :doc:`/deploying/index` for deployment options. + +Command Line +------------ + +The ``flask run`` command line script is the recommended way to run the +development server. It requires setting the ``FLASK_APP`` environment +variable to point to your application, and ``FLASK_ENV=development`` to +fully enable development mode. + +.. tabs:: + + .. group-tab:: Bash + + .. code-block:: text + + $ export FLASK_APP=hello + $ export FLASK_ENV=development + $ flask run + + .. group-tab:: Fish + + .. code-block:: text + + $ set -x FLASK_APP hello + $ export FLASK_ENV=development + $ flask run + + .. group-tab:: CMD + + .. code-block:: text + + > set FLASK_APP=hello + > set FLASK_ENV=development + > flask run + + .. group-tab:: Powershell + + .. code-block:: text + + > $env:FLASK_APP = "hello" + > $env:FLASK_ENV = "development" + > flask run + +This enables the development environment, including the interactive +debugger and reloader, and then starts the server on +http://localhost:5000/. Use ``flask run --help`` to see the available +options, and :doc:`/cli` for detailed instructions about configuring +and using the CLI. + +.. note:: + + Prior to Flask 1.0 the ``FLASK_ENV`` environment variable was not + supported and you needed to enable debug mode by exporting + ``FLASK_DEBUG=1``. This can still be used to control debug mode, but + you should prefer setting the development environment as shown + above. + + +.. _address-already-in-use: + +Address already in use +~~~~~~~~~~~~~~~~~~~~~~ + +If another program is already using port 5000, you'll see an ``OSError`` +when the server tries to start. It may have one of the following +messages: + +- ``OSError: [Errno 98] Address already in use`` +- ``OSError: [WinError 10013] An attempt was made to access a socket + in a way forbidden by its access permissions`` + +Either identify and stop the other program, or use +``flask run --port 5001`` to pick a different port. + +You can use ``netstat`` or ``lsof`` to identify what process id is using +a port, then use other operating system tools stop that process. The +following example shows that process id 6847 is using port 5000. + +.. tabs:: + + .. tab:: ``netstat`` (Linux) + + .. code-block:: text + + $ netstat -nlp | grep 5000 + tcp 0 0 127.0.0.1:5000 0.0.0.0:* LISTEN 6847/python + + .. tab:: ``lsof`` (macOS / Linux) + + .. code-block:: text + + $ lsof -P -i :5000 + Python 6847 IPv4 TCP localhost:5000 (LISTEN) + + .. tab:: ``netstat`` (Windows) + + .. code-block:: text + + > netstat -ano | findstr 5000 + TCP 127.0.0.1:5000 0.0.0.0:0 LISTENING 6847 + +macOS Monterey and later automatically starts a service that uses port +5000. To disable the service, go to System Preferences, Sharing, and +disable "AirPlay Receiver". + + +Lazy or Eager Loading +~~~~~~~~~~~~~~~~~~~~~ + +When using the ``flask run`` command with the reloader, the server will +continue to run even if you introduce syntax errors or other +initialization errors into the code. Accessing the site will show the +interactive debugger for the error, rather than crashing the server. +This feature is called "lazy loading". + +If a syntax error is already present when calling ``flask run``, it will +fail immediately and show the traceback rather than waiting until the +site is accessed. This is intended to make errors more visible initially +while still allowing the server to handle errors on reload. + +To override this behavior and always fail immediately, even on reload, +pass the ``--eager-loading`` option. To always keep the server running, +even on the initial call, pass ``--lazy-loading``. + + +In Code +------- + +As an alternative to the ``flask run`` command, the development server +can also be started from Python with the :meth:`Flask.run` method. This +method takes arguments similar to the CLI options to control the server. +The main difference from the CLI command is that the server will crash +if there are errors when reloading. + +``debug=True`` can be passed to enable the debugger and reloader, but +the ``FLASK_ENV=development`` environment variable is still required to +fully enable development mode. + +Place the call in a main block, otherwise it will interfere when trying +to import and run the application with a production server later. + +.. code-block:: python + + if __name__ == "__main__": + app.run(debug=True) + +.. code-block:: text + + $ python hello.py diff --git a/testbed/pallets__flask/docs/shell.rst b/testbed/pallets__flask/docs/shell.rst new file mode 100644 index 0000000000000000000000000000000000000000..7e42e28515e828f8daca8cbf466111ce61235d7f --- /dev/null +++ b/testbed/pallets__flask/docs/shell.rst @@ -0,0 +1,100 @@ +Working with the Shell +====================== + +.. versionadded:: 0.3 + +One of the reasons everybody loves Python is the interactive shell. It +basically allows you to execute Python commands in real time and +immediately get results back. Flask itself does not come with an +interactive shell, because it does not require any specific setup upfront, +just import your application and start playing around. + +There are however some handy helpers to make playing around in the shell a +more pleasant experience. The main issue with interactive console +sessions is that you're not triggering a request like a browser does which +means that :data:`~flask.g`, :data:`~flask.request` and others are not +available. But the code you want to test might depend on them, so what +can you do? + +This is where some helper functions come in handy. Keep in mind however +that these functions are not only there for interactive shell usage, but +also for unit testing and other situations that require a faked request +context. + +Generally it's recommended that you read :doc:`reqcontext` first. + +Command Line Interface +---------------------- + +Starting with Flask 0.11 the recommended way to work with the shell is the +``flask shell`` command which does a lot of this automatically for you. +For instance the shell is automatically initialized with a loaded +application context. + +For more information see :doc:`/cli`. + +Creating a Request Context +-------------------------- + +The easiest way to create a proper request context from the shell is by +using the :attr:`~flask.Flask.test_request_context` method which creates +us a :class:`~flask.ctx.RequestContext`: + +>>> ctx = app.test_request_context() + +Normally you would use the ``with`` statement to make this request object +active, but in the shell it's easier to use the +:meth:`~flask.ctx.RequestContext.push` and +:meth:`~flask.ctx.RequestContext.pop` methods by hand: + +>>> ctx.push() + +From that point onwards you can work with the request object until you +call `pop`: + +>>> ctx.pop() + +Firing Before/After Request +--------------------------- + +By just creating a request context, you still don't have run the code that +is normally run before a request. This might result in your database +being unavailable if you are connecting to the database in a +before-request callback or the current user not being stored on the +:data:`~flask.g` object etc. + +This however can easily be done yourself. Just call +:meth:`~flask.Flask.preprocess_request`: + +>>> ctx = app.test_request_context() +>>> ctx.push() +>>> app.preprocess_request() + +Keep in mind that the :meth:`~flask.Flask.preprocess_request` function +might return a response object, in that case just ignore it. + +To shutdown a request, you need to trick a bit before the after request +functions (triggered by :meth:`~flask.Flask.process_response`) operate on +a response object: + +>>> app.process_response(app.response_class()) + +>>> ctx.pop() + +The functions registered as :meth:`~flask.Flask.teardown_request` are +automatically called when the context is popped. So this is the perfect +place to automatically tear down resources that were needed by the request +context (such as database connections). + + +Further Improving the Shell Experience +-------------------------------------- + +If you like the idea of experimenting in a shell, create yourself a module +with stuff you want to star import into your interactive session. There +you could also define some more helper methods for common things such as +initializing the database, dropping tables etc. + +Just put them into a module (like `shelltools`) and import from there: + +>>> from shelltools import * diff --git a/testbed/pallets__flask/docs/signals.rst b/testbed/pallets__flask/docs/signals.rst new file mode 100644 index 0000000000000000000000000000000000000000..27630de681732680a1bbbabd1c54b5b7d496e2d2 --- /dev/null +++ b/testbed/pallets__flask/docs/signals.rst @@ -0,0 +1,188 @@ +Signals +======= + +.. versionadded:: 0.6 + +Starting with Flask 0.6, there is integrated support for signalling in +Flask. This support is provided by the excellent `blinker`_ library and +will gracefully fall back if it is not available. + +What are signals? Signals help you decouple applications by sending +notifications when actions occur elsewhere in the core framework or +another Flask extensions. In short, signals allow certain senders to +notify subscribers that something happened. + +Flask comes with a couple of signals and other extensions might provide +more. Also keep in mind that signals are intended to notify subscribers +and should not encourage subscribers to modify data. You will notice that +there are signals that appear to do the same thing like some of the +builtin decorators do (eg: :data:`~flask.request_started` is very similar +to :meth:`~flask.Flask.before_request`). However, there are differences in +how they work. The core :meth:`~flask.Flask.before_request` handler, for +example, is executed in a specific order and is able to abort the request +early by returning a response. In contrast all signal handlers are +executed in undefined order and do not modify any data. + +The big advantage of signals over handlers is that you can safely +subscribe to them for just a split second. These temporary +subscriptions are helpful for unit testing for example. Say you want to +know what templates were rendered as part of a request: signals allow you +to do exactly that. + +Subscribing to Signals +---------------------- + +To subscribe to a signal, you can use the +:meth:`~blinker.base.Signal.connect` method of a signal. The first +argument is the function that should be called when the signal is emitted, +the optional second argument specifies a sender. To unsubscribe from a +signal, you can use the :meth:`~blinker.base.Signal.disconnect` method. + +For all core Flask signals, the sender is the application that issued the +signal. When you subscribe to a signal, be sure to also provide a sender +unless you really want to listen for signals from all applications. This is +especially true if you are developing an extension. + +For example, here is a helper context manager that can be used in a unit test +to determine which templates were rendered and what variables were passed +to the template:: + + from flask import template_rendered + from contextlib import contextmanager + + @contextmanager + def captured_templates(app): + recorded = [] + def record(sender, template, context, **extra): + recorded.append((template, context)) + template_rendered.connect(record, app) + try: + yield recorded + finally: + template_rendered.disconnect(record, app) + +This can now easily be paired with a test client:: + + with captured_templates(app) as templates: + rv = app.test_client().get('/') + assert rv.status_code == 200 + assert len(templates) == 1 + template, context = templates[0] + assert template.name == 'index.html' + assert len(context['items']) == 10 + +Make sure to subscribe with an extra ``**extra`` argument so that your +calls don't fail if Flask introduces new arguments to the signals. + +All the template rendering in the code issued by the application `app` +in the body of the ``with`` block will now be recorded in the `templates` +variable. Whenever a template is rendered, the template object as well as +context are appended to it. + +Additionally there is a convenient helper method +(:meth:`~blinker.base.Signal.connected_to`) that allows you to +temporarily subscribe a function to a signal with a context manager on +its own. Because the return value of the context manager cannot be +specified that way, you have to pass the list in as an argument:: + + from flask import template_rendered + + def captured_templates(app, recorded, **extra): + def record(sender, template, context): + recorded.append((template, context)) + return template_rendered.connected_to(record, app) + +The example above would then look like this:: + + templates = [] + with captured_templates(app, templates, **extra): + ... + template, context = templates[0] + +.. admonition:: Blinker API Changes + + The :meth:`~blinker.base.Signal.connected_to` method arrived in Blinker + with version 1.1. + +Creating Signals +---------------- + +If you want to use signals in your own application, you can use the +blinker library directly. The most common use case are named signals in a +custom :class:`~blinker.base.Namespace`.. This is what is recommended +most of the time:: + + from blinker import Namespace + my_signals = Namespace() + +Now you can create new signals like this:: + + model_saved = my_signals.signal('model-saved') + +The name for the signal here makes it unique and also simplifies +debugging. You can access the name of the signal with the +:attr:`~blinker.base.NamedSignal.name` attribute. + +.. admonition:: For Extension Developers + + If you are writing a Flask extension and you want to gracefully degrade for + missing blinker installations, you can do so by using the + :class:`flask.signals.Namespace` class. + +.. _signals-sending: + +Sending Signals +--------------- + +If you want to emit a signal, you can do so by calling the +:meth:`~blinker.base.Signal.send` method. It accepts a sender as first +argument and optionally some keyword arguments that are forwarded to the +signal subscribers:: + + class Model(object): + ... + + def save(self): + model_saved.send(self) + +Try to always pick a good sender. If you have a class that is emitting a +signal, pass ``self`` as sender. If you are emitting a signal from a random +function, you can pass ``current_app._get_current_object()`` as sender. + +.. admonition:: Passing Proxies as Senders + + Never pass :data:`~flask.current_app` as sender to a signal. Use + ``current_app._get_current_object()`` instead. The reason for this is + that :data:`~flask.current_app` is a proxy and not the real application + object. + + +Signals and Flask's Request Context +----------------------------------- + +Signals fully support :doc:`reqcontext` when receiving signals. +Context-local variables are consistently available between +:data:`~flask.request_started` and :data:`~flask.request_finished`, so you can +rely on :class:`flask.g` and others as needed. Note the limitations described +in :ref:`signals-sending` and the :data:`~flask.request_tearing_down` signal. + + +Decorator Based Signal Subscriptions +------------------------------------ + +With Blinker 1.1 you can also easily subscribe to signals by using the new +:meth:`~blinker.base.NamedSignal.connect_via` decorator:: + + from flask import template_rendered + + @template_rendered.connect_via(app) + def when_template_rendered(sender, template, context, **extra): + print(f'Template {template.name} is rendered with {context}') + +Core Signals +------------ + +Take a look at :ref:`core-signals-list` for a list of all builtin signals. + + +.. _blinker: https://pypi.org/project/blinker/ diff --git a/testbed/pallets__flask/docs/templating.rst b/testbed/pallets__flask/docs/templating.rst new file mode 100644 index 0000000000000000000000000000000000000000..dcc757c381c6759b547ec98f19eb4149a4173d7e --- /dev/null +++ b/testbed/pallets__flask/docs/templating.rst @@ -0,0 +1,203 @@ +Templates +========= + +Flask leverages Jinja2 as its template engine. You are obviously free to use +a different template engine, but you still have to install Jinja2 to run +Flask itself. This requirement is necessary to enable rich extensions. +An extension can depend on Jinja2 being present. + +This section only gives a very quick introduction into how Jinja2 +is integrated into Flask. If you want information on the template +engine's syntax itself, head over to the official `Jinja2 Template +Documentation `_ for +more information. + +Jinja Setup +----------- + +Unless customized, Jinja2 is configured by Flask as follows: + +- autoescaping is enabled for all templates ending in ``.html``, + ``.htm``, ``.xml`` as well as ``.xhtml`` when using + :func:`~flask.templating.render_template`. +- autoescaping is enabled for all strings when using + :func:`~flask.templating.render_template_string`. +- a template has the ability to opt in/out autoescaping with the + ``{% autoescape %}`` tag. +- Flask inserts a couple of global functions and helpers into the + Jinja2 context, additionally to the values that are present by + default. + +Standard Context +---------------- + +The following global variables are available within Jinja2 templates +by default: + +.. data:: config + :noindex: + + The current configuration object (:data:`flask.Flask.config`) + + .. versionadded:: 0.6 + + .. versionchanged:: 0.10 + This is now always available, even in imported templates. + +.. data:: request + :noindex: + + The current request object (:class:`flask.request`). This variable is + unavailable if the template was rendered without an active request + context. + +.. data:: session + :noindex: + + The current session object (:class:`flask.session`). This variable + is unavailable if the template was rendered without an active request + context. + +.. data:: g + :noindex: + + The request-bound object for global variables (:data:`flask.g`). This + variable is unavailable if the template was rendered without an active + request context. + +.. function:: url_for + :noindex: + + The :func:`flask.url_for` function. + +.. function:: get_flashed_messages + :noindex: + + The :func:`flask.get_flashed_messages` function. + +.. admonition:: The Jinja Context Behavior + + These variables are added to the context of variables, they are not + global variables. The difference is that by default these will not + show up in the context of imported templates. This is partially caused + by performance considerations, partially to keep things explicit. + + What does this mean for you? If you have a macro you want to import, + that needs to access the request object you have two possibilities: + + 1. you explicitly pass the request to the macro as parameter, or + the attribute of the request object you are interested in. + 2. you import the macro "with context". + + Importing with context looks like this: + + .. sourcecode:: jinja + + {% from '_helpers.html' import my_macro with context %} + + +Controlling Autoescaping +------------------------ + +Autoescaping is the concept of automatically escaping special characters +for you. Special characters in the sense of HTML (or XML, and thus XHTML) +are ``&``, ``>``, ``<``, ``"`` as well as ``'``. Because these characters +carry specific meanings in documents on their own you have to replace them +by so called "entities" if you want to use them for text. Not doing so +would not only cause user frustration by the inability to use these +characters in text, but can also lead to security problems. (see +:ref:`security-xss`) + +Sometimes however you will need to disable autoescaping in templates. +This can be the case if you want to explicitly inject HTML into pages, for +example if they come from a system that generates secure HTML like a +markdown to HTML converter. + +There are three ways to accomplish that: + +- In the Python code, wrap the HTML string in a :class:`~flask.Markup` + object before passing it to the template. This is in general the + recommended way. +- Inside the template, use the ``|safe`` filter to explicitly mark a + string as safe HTML (``{{ myvariable|safe }}``) +- Temporarily disable the autoescape system altogether. + +To disable the autoescape system in templates, you can use the ``{% +autoescape %}`` block: + +.. sourcecode:: html+jinja + + {% autoescape false %} +

autoescaping is disabled here +

{{ will_not_be_escaped }} + {% endautoescape %} + +Whenever you do this, please be very cautious about the variables you are +using in this block. + +.. _registering-filters: + +Registering Filters +------------------- + +If you want to register your own filters in Jinja2 you have two ways to do +that. You can either put them by hand into the +:attr:`~flask.Flask.jinja_env` of the application or use the +:meth:`~flask.Flask.template_filter` decorator. + +The two following examples work the same and both reverse an object:: + + @app.template_filter('reverse') + def reverse_filter(s): + return s[::-1] + + def reverse_filter(s): + return s[::-1] + app.jinja_env.filters['reverse'] = reverse_filter + +In case of the decorator the argument is optional if you want to use the +function name as name of the filter. Once registered, you can use the filter +in your templates in the same way as Jinja2's builtin filters, for example if +you have a Python list in context called `mylist`:: + + {% for x in mylist | reverse %} + {% endfor %} + + +Context Processors +------------------ + +To inject new variables automatically into the context of a template, +context processors exist in Flask. Context processors run before the +template is rendered and have the ability to inject new values into the +template context. A context processor is a function that returns a +dictionary. The keys and values of this dictionary are then merged with +the template context, for all templates in the app:: + + @app.context_processor + def inject_user(): + return dict(user=g.user) + +The context processor above makes a variable called `user` available in +the template with the value of `g.user`. This example is not very +interesting because `g` is available in templates anyways, but it gives an +idea how this works. + +Variables are not limited to values; a context processor can also make +functions available to templates (since Python allows passing around +functions):: + + @app.context_processor + def utility_processor(): + def format_price(amount, currency="€"): + return f"{amount:.2f}{currency}" + return dict(format_price=format_price) + +The context processor above makes the `format_price` function available to all +templates:: + + {{ format_price(0.33) }} + +You could also build `format_price` as a template filter (see +:ref:`registering-filters`), but this demonstrates how to pass functions in a +context processor. diff --git a/testbed/pallets__flask/docs/testing.rst b/testbed/pallets__flask/docs/testing.rst new file mode 100644 index 0000000000000000000000000000000000000000..d68cb5338b1f095ff3c73251b5eb14c3aee734f3 --- /dev/null +++ b/testbed/pallets__flask/docs/testing.rst @@ -0,0 +1,319 @@ +Testing Flask Applications +========================== + +Flask provides utilities for testing an application. This documentation +goes over techniques for working with different parts of the application +in tests. + +We will use the `pytest`_ framework to set up and run our tests. + +.. code-block:: text + + $ pip install pytest + +.. _pytest: https://docs.pytest.org/ + +The :doc:`tutorial ` goes over how to write tests for +100% coverage of the sample Flaskr blog application. See +:doc:`the tutorial on tests ` for a detailed +explanation of specific tests for an application. + + +Identifying Tests +----------------- + +Tests are typically located in the ``tests`` folder. Tests are functions +that start with ``test_``, in Python modules that start with ``test_``. +Tests can also be further grouped in classes that start with ``Test``. + +It can be difficult to know what to test. Generally, try to test the +code that you write, not the code of libraries that you use, since they +are already tested. Try to extract complex behaviors as separate +functions to test individually. + + +Fixtures +-------- + +Pytest *fixtures* allow writing pieces of code that are reusable across +tests. A simple fixture returns a value, but a fixture can also do +setup, yield a value, then do teardown. Fixtures for the application, +test client, and CLI runner are shown below, they can be placed in +``tests/conftest.py``. + +If you're using an +:doc:`application factory `, define an ``app`` +fixture to create and configure an app instance. You can add code before +and after the ``yield`` to set up and tear down other resources, such as +creating and clearing a database. + +If you're not using a factory, you already have an app object you can +import and configure directly. You can still use an ``app`` fixture to +set up and tear down resources. + +.. code-block:: python + + import pytest + from my_project import create_app + + @pytest.fixture() + def app(): + app = create_app() + app.config.update({ + "TESTING": True, + }) + + # other setup can go here + + yield app + + # clean up / reset resources here + + + @pytest.fixture() + def client(app): + return app.test_client() + + + @pytest.fixture() + def runner(app): + return app.test_cli_runner() + + +Sending Requests with the Test Client +------------------------------------- + +The test client makes requests to the application without running a live +server. Flask's client extends +:doc:`Werkzeug's client `, see those docs for additional +information. + +The ``client`` has methods that match the common HTTP request methods, +such as ``client.get()`` and ``client.post()``. They take many arguments +for building the request; you can find the full documentation in +:class:`~werkzeug.test.EnvironBuilder`. Typically you'll use ``path``, +``query``, ``headers``, and ``data`` or ``json``. + +To make a request, call the method the request should use with the path +to the route to test. A :class:`~werkzeug.test.TestResponse` is returned +to examine the response data. It has all the usual properties of a +response object. You'll usually look at ``response.data``, which is the +bytes returned by the view. If you want to use text, Werkzeug 2.1 +provides ``response.text``, or use ``response.get_data(as_text=True)``. + +.. code-block:: python + + def test_request_example(client): + response = client.get("/posts") + assert b"

Hello, World!

" in response.data + + +Pass a dict ``query={"key": "value", ...}`` to set arguments in the +query string (after the ``?`` in the URL). Pass a dict ``headers={}`` +to set request headers. + +To send a request body in a POST or PUT request, pass a value to +``data``. If raw bytes are passed, that exact body is used. Usually, +you'll pass a dict to set form data. + + +Form Data +~~~~~~~~~ + +To send form data, pass a dict to ``data``. The ``Content-Type`` header +will be set to ``multipart/form-data`` or +``application/x-www-form-urlencoded`` automatically. + +If a value is a file object opened for reading bytes (``"rb"`` mode), it +will be treated as an uploaded file. To change the detected filename and +content type, pass a ``(file, filename, content_type)`` tuple. File +objects will be closed after making the request, so they do not need to +use the usual ``with open() as f:`` pattern. + +It can be useful to store files in a ``tests/resources`` folder, then +use ``pathlib.Path`` to get files relative to the current test file. + +.. code-block:: python + + from pathlib import Path + + # get the resources folder in the tests folder + resources = Path(__file__).parent / "resources" + + def test_edit_user(client): + response = client.post("/user/2/edit", data={ + name="Flask", + theme="dark", + picture=(resources / "picture.png").open("rb"), + }) + assert response.status_code == 200 + + +JSON Data +~~~~~~~~~ + +To send JSON data, pass an object to ``json``. The ``Content-Type`` +header will be set to ``application/json`` automatically. + +Similarly, if the response contains JSON data, the ``response.json`` +attribute will contain the deserialized object. + +.. code-block:: python + + def test_json_data(client): + response = client.post("/graphql", json={ + "query": """ + query User($id: String!) { + user(id: $id) { + name + theme + picture_url + } + } + """, + variables={"id": 2}, + }) + assert response.json["data"]["user"]["name"] == "Flask" + + +Following Redirects +------------------- + +By default, the client does not make additional requests if the response +is a redirect. By passing ``follow_redirects=True`` to a request method, +the client will continue to make requests until a non-redirect response +is returned. + +:attr:`TestResponse.history ` is +a tuple of the responses that led up to the final response. Each +response has a :attr:`~werkzeug.test.TestResponse.request` attribute +which records the request that produced that response. + +.. code-block:: python + + def test_logout_redirect(client): + response = client.get("/logout") + # Check that there was one redirect response. + assert len(response.history) == 1 + # Check that the second request was to the index page. + assert response.request.path == "/index" + + +Accessing and Modifying the Session +----------------------------------- + +To access Flask's context variables, mainly +:data:`~flask.session`, use the client in a ``with`` statement. +The app and request context will remain active *after* making a request, +until the ``with`` block ends. + +.. code-block:: python + + from flask import session + + def test_access_session(client): + with client: + client.post("/auth/login", data={"username": "flask"}) + # session is still accessible + assert session["user_id"] == 1 + + # session is no longer accessible + +If you want to access or set a value in the session *before* making a +request, use the client's +:meth:`~flask.testing.FlaskClient.session_transaction` method in a +``with`` statement. It returns a session object, and will save the +session once the block ends. + +.. code-block:: python + + from flask import session + + def test_modify_session(client): + with client.session_transaction() as session: + # set a user id without going through the login route + session["user_id"] = 1 + + # session is saved now + + response = client.get("/users/me") + assert response.json["username"] == "flask" + + +.. _testing-cli: + +Running Commands with the CLI Runner +------------------------------------ + +Flask provides :meth:`~flask.Flask.test_cli_runner` to create a +:class:`~flask.testing.FlaskCliRunner`, which runs CLI commands in +isolation and captures the output in a :class:`~click.testing.Result` +object. Flask's runner extends :doc:`Click's runner `, +see those docs for additional information. + +Use the runner's :meth:`~flask.testing.FlaskCliRunner.invoke` method to +call commands in the same way they would be called with the ``flask`` +command from the command line. + +.. code-block:: python + + import click + + @app.cli.command("hello") + @click.option("--name", default="World") + def hello_command(name): + click.echo(f"Hello, {name}!") + + def test_hello_command(runner): + result = runner.invoke(["hello"]) + assert "World" in result.output + + result = runner.invoke(["hello", "--name", "Flask"]) + assert "Flask" in result.output + + +Tests that depend on an Active Context +-------------------------------------- + +You may have functions that are called from views or commands, that +expect an active :doc:`application context ` or +:doc:`request context ` because they access ``request``, +``session``, or ``current_app``. Rather than testing them by making a +request or invoking the command, you can create and activate a context +directly. + +Use ``with app.app_context()`` to push an application context. For +example, database extensions usually require an active app context to +make queries. + +.. code-block:: python + + def test_db_post_model(app): + with app.app_context(): + post = db.session.query(Post).get(1) + +Use ``with app.test_request_context()`` to push a request context. It +takes the same arguments as the test client's request methods. + +.. code-block:: python + + def test_validate_user_edit(app): + with app.test_request_context( + "/user/2/edit", method="POST", data={"name": ""} + ): + # call a function that accesses `request` + messages = validate_edit_user() + + assert messages["name"][0] == "Name cannot be empty." + +Creating a test request context doesn't run any of the Flask dispatching +code, so ``before_request`` functions are not called. If you need to +call these, usually it's better to make a full request instead. However, +it's possible to call them manually. + +.. code-block:: python + + def test_auth_token(app): + with app.test_request_context("/user/2/edit", headers={"X-Auth-Token": "1"}): + app.preprocess_request() + assert g.user.name == "Flask" diff --git a/testbed/pallets__flask/docs/tutorial/blog.rst b/testbed/pallets__flask/docs/tutorial/blog.rst new file mode 100644 index 0000000000000000000000000000000000000000..b06329eaae8360ccc7a10105370361bb451c9008 --- /dev/null +++ b/testbed/pallets__flask/docs/tutorial/blog.rst @@ -0,0 +1,336 @@ +.. currentmodule:: flask + +Blog Blueprint +============== + +You'll use the same techniques you learned about when writing the +authentication blueprint to write the blog blueprint. The blog should +list all posts, allow logged in users to create posts, and allow the +author of a post to edit or delete it. + +As you implement each view, keep the development server running. As you +save your changes, try going to the URL in your browser and testing them +out. + +The Blueprint +------------- + +Define the blueprint and register it in the application factory. + +.. code-block:: python + :caption: ``flaskr/blog.py`` + + from flask import ( + Blueprint, flash, g, redirect, render_template, request, url_for + ) + from werkzeug.exceptions import abort + + from flaskr.auth import login_required + from flaskr.db import get_db + + bp = Blueprint('blog', __name__) + +Import and register the blueprint from the factory using +:meth:`app.register_blueprint() `. Place the +new code at the end of the factory function before returning the app. + +.. code-block:: python + :caption: ``flaskr/__init__.py`` + + def create_app(): + app = ... + # existing code omitted + + from . import blog + app.register_blueprint(blog.bp) + app.add_url_rule('/', endpoint='index') + + return app + + +Unlike the auth blueprint, the blog blueprint does not have a +``url_prefix``. So the ``index`` view will be at ``/``, the ``create`` +view at ``/create``, and so on. The blog is the main feature of Flaskr, +so it makes sense that the blog index will be the main index. + +However, the endpoint for the ``index`` view defined below will be +``blog.index``. Some of the authentication views referred to a plain +``index`` endpoint. :meth:`app.add_url_rule() ` +associates the endpoint name ``'index'`` with the ``/`` url so that +``url_for('index')`` or ``url_for('blog.index')`` will both work, +generating the same ``/`` URL either way. + +In another application you might give the blog blueprint a +``url_prefix`` and define a separate ``index`` view in the application +factory, similar to the ``hello`` view. Then the ``index`` and +``blog.index`` endpoints and URLs would be different. + + +Index +----- + +The index will show all of the posts, most recent first. A ``JOIN`` is +used so that the author information from the ``user`` table is +available in the result. + +.. code-block:: python + :caption: ``flaskr/blog.py`` + + @bp.route('/') + def index(): + db = get_db() + posts = db.execute( + 'SELECT p.id, title, body, created, author_id, username' + ' FROM post p JOIN user u ON p.author_id = u.id' + ' ORDER BY created DESC' + ).fetchall() + return render_template('blog/index.html', posts=posts) + +.. code-block:: html+jinja + :caption: ``flaskr/templates/blog/index.html`` + + {% extends 'base.html' %} + + {% block header %} +

{% block title %}Posts{% endblock %}

+ {% if g.user %} + New + {% endif %} + {% endblock %} + + {% block content %} + {% for post in posts %} +
+
+
+

{{ post['title'] }}

+
by {{ post['username'] }} on {{ post['created'].strftime('%Y-%m-%d') }}
+
+ {% if g.user['id'] == post['author_id'] %} + Edit + {% endif %} +
+

{{ post['body'] }}

+
+ {% if not loop.last %} +
+ {% endif %} + {% endfor %} + {% endblock %} + +When a user is logged in, the ``header`` block adds a link to the +``create`` view. When the user is the author of a post, they'll see an +"Edit" link to the ``update`` view for that post. ``loop.last`` is a +special variable available inside `Jinja for loops`_. It's used to +display a line after each post except the last one, to visually separate +them. + +.. _Jinja for loops: https://jinja.palletsprojects.com/templates/#for + + +Create +------ + +The ``create`` view works the same as the auth ``register`` view. Either +the form is displayed, or the posted data is validated and the post is +added to the database or an error is shown. + +The ``login_required`` decorator you wrote earlier is used on the blog +views. A user must be logged in to visit these views, otherwise they +will be redirected to the login page. + +.. code-block:: python + :caption: ``flaskr/blog.py`` + + @bp.route('/create', methods=('GET', 'POST')) + @login_required + def create(): + if request.method == 'POST': + title = request.form['title'] + body = request.form['body'] + error = None + + if not title: + error = 'Title is required.' + + if error is not None: + flash(error) + else: + db = get_db() + db.execute( + 'INSERT INTO post (title, body, author_id)' + ' VALUES (?, ?, ?)', + (title, body, g.user['id']) + ) + db.commit() + return redirect(url_for('blog.index')) + + return render_template('blog/create.html') + +.. code-block:: html+jinja + :caption: ``flaskr/templates/blog/create.html`` + + {% extends 'base.html' %} + + {% block header %} +

{% block title %}New Post{% endblock %}

+ {% endblock %} + + {% block content %} +
+ + + + + +
+ {% endblock %} + + +Update +------ + +Both the ``update`` and ``delete`` views will need to fetch a ``post`` +by ``id`` and check if the author matches the logged in user. To avoid +duplicating code, you can write a function to get the ``post`` and call +it from each view. + +.. code-block:: python + :caption: ``flaskr/blog.py`` + + def get_post(id, check_author=True): + post = get_db().execute( + 'SELECT p.id, title, body, created, author_id, username' + ' FROM post p JOIN user u ON p.author_id = u.id' + ' WHERE p.id = ?', + (id,) + ).fetchone() + + if post is None: + abort(404, f"Post id {id} doesn't exist.") + + if check_author and post['author_id'] != g.user['id']: + abort(403) + + return post + +:func:`abort` will raise a special exception that returns an HTTP status +code. It takes an optional message to show with the error, otherwise a +default message is used. ``404`` means "Not Found", and ``403`` means +"Forbidden". (``401`` means "Unauthorized", but you redirect to the +login page instead of returning that status.) + +The ``check_author`` argument is defined so that the function can be +used to get a ``post`` without checking the author. This would be useful +if you wrote a view to show an individual post on a page, where the user +doesn't matter because they're not modifying the post. + +.. code-block:: python + :caption: ``flaskr/blog.py`` + + @bp.route('//update', methods=('GET', 'POST')) + @login_required + def update(id): + post = get_post(id) + + if request.method == 'POST': + title = request.form['title'] + body = request.form['body'] + error = None + + if not title: + error = 'Title is required.' + + if error is not None: + flash(error) + else: + db = get_db() + db.execute( + 'UPDATE post SET title = ?, body = ?' + ' WHERE id = ?', + (title, body, id) + ) + db.commit() + return redirect(url_for('blog.index')) + + return render_template('blog/update.html', post=post) + +Unlike the views you've written so far, the ``update`` function takes +an argument, ``id``. That corresponds to the ```` in the route. +A real URL will look like ``/1/update``. Flask will capture the ``1``, +ensure it's an :class:`int`, and pass it as the ``id`` argument. If you +don't specify ``int:`` and instead do ````, it will be a string. +To generate a URL to the update page, :func:`url_for` needs to be passed +the ``id`` so it knows what to fill in: +``url_for('blog.update', id=post['id'])``. This is also in the +``index.html`` file above. + +The ``create`` and ``update`` views look very similar. The main +difference is that the ``update`` view uses a ``post`` object and an +``UPDATE`` query instead of an ``INSERT``. With some clever refactoring, +you could use one view and template for both actions, but for the +tutorial it's clearer to keep them separate. + +.. code-block:: html+jinja + :caption: ``flaskr/templates/blog/update.html`` + + {% extends 'base.html' %} + + {% block header %} +

{% block title %}Edit "{{ post['title'] }}"{% endblock %}

+ {% endblock %} + + {% block content %} +
+ + + + + +
+
+
+ +
+ {% endblock %} + +This template has two forms. The first posts the edited data to the +current page (``//update``). The other form contains only a button +and specifies an ``action`` attribute that posts to the delete view +instead. The button uses some JavaScript to show a confirmation dialog +before submitting. + +The pattern ``{{ request.form['title'] or post['title'] }}`` is used to +choose what data appears in the form. When the form hasn't been +submitted, the original ``post`` data appears, but if invalid form data +was posted you want to display that so the user can fix the error, so +``request.form`` is used instead. :data:`request` is another variable +that's automatically available in templates. + + +Delete +------ + +The delete view doesn't have its own template, the delete button is part +of ``update.html`` and posts to the ``//delete`` URL. Since there +is no template, it will only handle the ``POST`` method and then redirect +to the ``index`` view. + +.. code-block:: python + :caption: ``flaskr/blog.py`` + + @bp.route('//delete', methods=('POST',)) + @login_required + def delete(id): + get_post(id) + db = get_db() + db.execute('DELETE FROM post WHERE id = ?', (id,)) + db.commit() + return redirect(url_for('blog.index')) + +Congratulations, you've now finished writing your application! Take some +time to try out everything in the browser. However, there's still more +to do before the project is complete. + +Continue to :doc:`install`. diff --git a/testbed/pallets__flask/docs/tutorial/database.rst b/testbed/pallets__flask/docs/tutorial/database.rst new file mode 100644 index 0000000000000000000000000000000000000000..b094909eca8445a471d7fc9957e7f8aba8902529 --- /dev/null +++ b/testbed/pallets__flask/docs/tutorial/database.rst @@ -0,0 +1,213 @@ +.. currentmodule:: flask + +Define and Access the Database +============================== + +The application will use a `SQLite`_ database to store users and posts. +Python comes with built-in support for SQLite in the :mod:`sqlite3` +module. + +SQLite is convenient because it doesn't require setting up a separate +database server and is built-in to Python. However, if concurrent +requests try to write to the database at the same time, they will slow +down as each write happens sequentially. Small applications won't notice +this. Once you become big, you may want to switch to a different +database. + +The tutorial doesn't go into detail about SQL. If you are not familiar +with it, the SQLite docs describe the `language`_. + +.. _SQLite: https://sqlite.org/about.html +.. _language: https://sqlite.org/lang.html + + +Connect to the Database +----------------------- + +The first thing to do when working with a SQLite database (and most +other Python database libraries) is to create a connection to it. Any +queries and operations are performed using the connection, which is +closed after the work is finished. + +In web applications this connection is typically tied to the request. It +is created at some point when handling a request, and closed before the +response is sent. + +.. code-block:: python + :caption: ``flaskr/db.py`` + + import sqlite3 + + import click + from flask import current_app, g + from flask.cli import with_appcontext + + + def get_db(): + if 'db' not in g: + g.db = sqlite3.connect( + current_app.config['DATABASE'], + detect_types=sqlite3.PARSE_DECLTYPES + ) + g.db.row_factory = sqlite3.Row + + return g.db + + + def close_db(e=None): + db = g.pop('db', None) + + if db is not None: + db.close() + +:data:`g` is a special object that is unique for each request. It is +used to store data that might be accessed by multiple functions during +the request. The connection is stored and reused instead of creating a +new connection if ``get_db`` is called a second time in the same +request. + +:data:`current_app` is another special object that points to the Flask +application handling the request. Since you used an application factory, +there is no application object when writing the rest of your code. +``get_db`` will be called when the application has been created and is +handling a request, so :data:`current_app` can be used. + +:func:`sqlite3.connect` establishes a connection to the file pointed at +by the ``DATABASE`` configuration key. This file doesn't have to exist +yet, and won't until you initialize the database later. + +:class:`sqlite3.Row` tells the connection to return rows that behave +like dicts. This allows accessing the columns by name. + +``close_db`` checks if a connection was created by checking if ``g.db`` +was set. If the connection exists, it is closed. Further down you will +tell your application about the ``close_db`` function in the application +factory so that it is called after each request. + + +Create the Tables +----------------- + +In SQLite, data is stored in *tables* and *columns*. These need to be +created before you can store and retrieve data. Flaskr will store users +in the ``user`` table, and posts in the ``post`` table. Create a file +with the SQL commands needed to create empty tables: + +.. code-block:: sql + :caption: ``flaskr/schema.sql`` + + DROP TABLE IF EXISTS user; + DROP TABLE IF EXISTS post; + + CREATE TABLE user ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + username TEXT UNIQUE NOT NULL, + password TEXT NOT NULL + ); + + CREATE TABLE post ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + author_id INTEGER NOT NULL, + created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + title TEXT NOT NULL, + body TEXT NOT NULL, + FOREIGN KEY (author_id) REFERENCES user (id) + ); + +Add the Python functions that will run these SQL commands to the +``db.py`` file: + +.. code-block:: python + :caption: ``flaskr/db.py`` + + def init_db(): + db = get_db() + + with current_app.open_resource('schema.sql') as f: + db.executescript(f.read().decode('utf8')) + + + @click.command('init-db') + @with_appcontext + def init_db_command(): + """Clear the existing data and create new tables.""" + init_db() + click.echo('Initialized the database.') + +:meth:`open_resource() ` opens a file relative to +the ``flaskr`` package, which is useful since you won't necessarily know +where that location is when deploying the application later. ``get_db`` +returns a database connection, which is used to execute the commands +read from the file. + +:func:`click.command` defines a command line command called ``init-db`` +that calls the ``init_db`` function and shows a success message to the +user. You can read :doc:`/cli` to learn more about writing commands. + + +Register with the Application +----------------------------- + +The ``close_db`` and ``init_db_command`` functions need to be registered +with the application instance; otherwise, they won't be used by the +application. However, since you're using a factory function, that +instance isn't available when writing the functions. Instead, write a +function that takes an application and does the registration. + +.. code-block:: python + :caption: ``flaskr/db.py`` + + def init_app(app): + app.teardown_appcontext(close_db) + app.cli.add_command(init_db_command) + +:meth:`app.teardown_appcontext() ` tells +Flask to call that function when cleaning up after returning the +response. + +:meth:`app.cli.add_command() ` adds a new +command that can be called with the ``flask`` command. + +Import and call this function from the factory. Place the new code at +the end of the factory function before returning the app. + +.. code-block:: python + :caption: ``flaskr/__init__.py`` + + def create_app(): + app = ... + # existing code omitted + + from . import db + db.init_app(app) + + return app + + +Initialize the Database File +---------------------------- + +Now that ``init-db`` has been registered with the app, it can be called +using the ``flask`` command, similar to the ``run`` command from the +previous page. + +.. note:: + + If you're still running the server from the previous page, you can + either stop the server, or run this command in a new terminal. If + you use a new terminal, remember to change to your project directory + and activate the env as described in :doc:`/installation`. You'll + also need to set ``FLASK_APP`` and ``FLASK_ENV`` as shown on the + previous page. + +Run the ``init-db`` command: + +.. code-block:: none + + $ flask init-db + Initialized the database. + +There will now be a ``flaskr.sqlite`` file in the ``instance`` folder in +your project. + +Continue to :doc:`views`. diff --git a/testbed/pallets__flask/docs/tutorial/deploy.rst b/testbed/pallets__flask/docs/tutorial/deploy.rst new file mode 100644 index 0000000000000000000000000000000000000000..269402407a32ba97c40f1109ace6004c1cd3894c --- /dev/null +++ b/testbed/pallets__flask/docs/tutorial/deploy.rst @@ -0,0 +1,146 @@ +Deploy to Production +==================== + +This part of the tutorial assumes you have a server that you want to +deploy your application to. It gives an overview of how to create the +distribution file and install it, but won't go into specifics about +what server or software to use. You can set up a new environment on your +development computer to try out the instructions below, but probably +shouldn't use it for hosting a real public application. See +:doc:`/deploying/index` for a list of many different ways to host your +application. + + +Build and Install +----------------- + +When you want to deploy your application elsewhere, you build a +distribution file. The current standard for Python distribution is the +*wheel* format, with the ``.whl`` extension. Make sure the wheel library +is installed first: + +.. code-block:: none + + $ pip install wheel + +Running ``setup.py`` with Python gives you a command line tool to issue +build-related commands. The ``bdist_wheel`` command will build a wheel +distribution file. + +.. code-block:: none + + $ python setup.py bdist_wheel + +You can find the file in ``dist/flaskr-1.0.0-py3-none-any.whl``. The +file name is in the format of {project name}-{version}-{python tag} +-{abi tag}-{platform tag}. + +Copy this file to another machine, +:ref:`set up a new virtualenv `, then install the +file with ``pip``. + +.. code-block:: none + + $ pip install flaskr-1.0.0-py3-none-any.whl + +Pip will install your project along with its dependencies. + +Since this is a different machine, you need to run ``init-db`` again to +create the database in the instance folder. + +.. tabs:: + + .. group-tab:: Bash + + .. code-block:: text + + $ export FLASK_APP=flaskr + $ flask init-db + + .. group-tab:: Fish + + .. code-block:: text + + $ set -x FLASK_APP flaskr + $ flask init-db + + .. group-tab:: CMD + + .. code-block:: text + + > set FLASK_APP=flaskr + > flask init-db + + .. group-tab:: Powershell + + .. code-block:: text + + > $env:FLASK_APP = "flaskr" + > flask init-db + +When Flask detects that it's installed (not in editable mode), it uses +a different directory for the instance folder. You can find it at +``venv/var/flaskr-instance`` instead. + + +Configure the Secret Key +------------------------ + +In the beginning of the tutorial that you gave a default value for +:data:`SECRET_KEY`. This should be changed to some random bytes in +production. Otherwise, attackers could use the public ``'dev'`` key to +modify the session cookie, or anything else that uses the secret key. + +You can use the following command to output a random secret key: + +.. code-block:: none + + $ python -c 'import secrets; print(secrets.token_hex())' + + '192b9bdd22ab9ed4d12e236c78afcb9a393ec15f71bbf5dc987d54727823bcbf' + +Create the ``config.py`` file in the instance folder, which the factory +will read from if it exists. Copy the generated value into it. + +.. code-block:: python + :caption: ``venv/var/flaskr-instance/config.py`` + + SECRET_KEY = '192b9bdd22ab9ed4d12e236c78afcb9a393ec15f71bbf5dc987d54727823bcbf' + +You can also set any other necessary configuration here, although +``SECRET_KEY`` is the only one needed for Flaskr. + + +Run with a Production Server +---------------------------- + +When running publicly rather than in development, you should not use the +built-in development server (``flask run``). The development server is +provided by Werkzeug for convenience, but is not designed to be +particularly efficient, stable, or secure. + +Instead, use a production WSGI server. For example, to use `Waitress`_, +first install it in the virtual environment: + +.. code-block:: none + + $ pip install waitress + +You need to tell Waitress about your application, but it doesn't use +``FLASK_APP`` like ``flask run`` does. You need to tell it to import and +call the application factory to get an application object. + +.. code-block:: none + + $ waitress-serve --call 'flaskr:create_app' + + Serving on http://0.0.0.0:8080 + +See :doc:`/deploying/index` for a list of many different ways to host +your application. Waitress is just an example, chosen for the tutorial +because it supports both Windows and Linux. There are many more WSGI +servers and deployment options that you may choose for your project. + +.. _Waitress: https://docs.pylonsproject.org/projects/waitress/en/stable/ + +Continue to :doc:`next`. diff --git a/testbed/pallets__flask/docs/tutorial/factory.rst b/testbed/pallets__flask/docs/tutorial/factory.rst new file mode 100644 index 0000000000000000000000000000000000000000..730818743b5efa0f481a279101976fb1bfa51812 --- /dev/null +++ b/testbed/pallets__flask/docs/tutorial/factory.rst @@ -0,0 +1,193 @@ +.. currentmodule:: flask + +Application Setup +================= + +A Flask application is an instance of the :class:`Flask` class. +Everything about the application, such as configuration and URLs, will +be registered with this class. + +The most straightforward way to create a Flask application is to create +a global :class:`Flask` instance directly at the top of your code, like +how the "Hello, World!" example did on the previous page. While this is +simple and useful in some cases, it can cause some tricky issues as the +project grows. + +Instead of creating a :class:`Flask` instance globally, you will create +it inside a function. This function is known as the *application +factory*. Any configuration, registration, and other setup the +application needs will happen inside the function, then the application +will be returned. + + +The Application Factory +----------------------- + +It's time to start coding! Create the ``flaskr`` directory and add the +``__init__.py`` file. The ``__init__.py`` serves double duty: it will +contain the application factory, and it tells Python that the ``flaskr`` +directory should be treated as a package. + +.. code-block:: none + + $ mkdir flaskr + +.. code-block:: python + :caption: ``flaskr/__init__.py`` + + import os + + from flask import Flask + + + def create_app(test_config=None): + # create and configure the app + app = Flask(__name__, instance_relative_config=True) + app.config.from_mapping( + SECRET_KEY='dev', + DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'), + ) + + if test_config is None: + # load the instance config, if it exists, when not testing + app.config.from_pyfile('config.py', silent=True) + else: + # load the test config if passed in + app.config.from_mapping(test_config) + + # ensure the instance folder exists + try: + os.makedirs(app.instance_path) + except OSError: + pass + + # a simple page that says hello + @app.route('/hello') + def hello(): + return 'Hello, World!' + + return app + +``create_app`` is the application factory function. You'll add to it +later in the tutorial, but it already does a lot. + +#. ``app = Flask(__name__, instance_relative_config=True)`` creates the + :class:`Flask` instance. + + * ``__name__`` is the name of the current Python module. The app + needs to know where it's located to set up some paths, and + ``__name__`` is a convenient way to tell it that. + + * ``instance_relative_config=True`` tells the app that + configuration files are relative to the + :ref:`instance folder `. The instance folder + is located outside the ``flaskr`` package and can hold local + data that shouldn't be committed to version control, such as + configuration secrets and the database file. + +#. :meth:`app.config.from_mapping() ` sets + some default configuration that the app will use: + + * :data:`SECRET_KEY` is used by Flask and extensions to keep data + safe. It's set to ``'dev'`` to provide a convenient value + during development, but it should be overridden with a random + value when deploying. + + * ``DATABASE`` is the path where the SQLite database file will be + saved. It's under + :attr:`app.instance_path `, which is the + path that Flask has chosen for the instance folder. You'll learn + more about the database in the next section. + +#. :meth:`app.config.from_pyfile() ` overrides + the default configuration with values taken from the ``config.py`` + file in the instance folder if it exists. For example, when + deploying, this can be used to set a real ``SECRET_KEY``. + + * ``test_config`` can also be passed to the factory, and will be + used instead of the instance configuration. This is so the tests + you'll write later in the tutorial can be configured + independently of any development values you have configured. + +#. :func:`os.makedirs` ensures that + :attr:`app.instance_path ` exists. Flask + doesn't create the instance folder automatically, but it needs to be + created because your project will create the SQLite database file + there. + +#. :meth:`@app.route() ` creates a simple route so you can + see the application working before getting into the rest of the + tutorial. It creates a connection between the URL ``/hello`` and a + function that returns a response, the string ``'Hello, World!'`` in + this case. + + +Run The Application +------------------- + +Now you can run your application using the ``flask`` command. From the +terminal, tell Flask where to find your application, then run it in +development mode. Remember, you should still be in the top-level +``flask-tutorial`` directory, not the ``flaskr`` package. + +Development mode shows an interactive debugger whenever a page raises an +exception, and restarts the server whenever you make changes to the +code. You can leave it running and just reload the browser page as you +follow the tutorial. + +.. tabs:: + + .. group-tab:: Bash + + .. code-block:: text + + $ export FLASK_APP=flaskr + $ export FLASK_ENV=development + $ flask run + + .. group-tab:: Fish + + .. code-block:: text + + $ set -x FLASK_APP flaskr + $ set -x FLASK_ENV development + $ flask run + + .. group-tab:: CMD + + .. code-block:: text + + > set FLASK_APP=flaskr + > set FLASK_ENV=development + > flask run + + .. group-tab:: Powershell + + .. code-block:: text + + > $env:FLASK_APP = "flaskr" + > $env:FLASK_ENV = "development" + > flask run + +You'll see output similar to this: + +.. code-block:: none + + * Serving Flask app "flaskr" + * Environment: development + * Debug mode: on + * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit) + * Restarting with stat + * Debugger is active! + * Debugger PIN: 855-212-761 + +Visit http://127.0.0.1:5000/hello in a browser and you should see the +"Hello, World!" message. Congratulations, you're now running your Flask +web application! + +If another program is already using port 5000, you'll see +``OSError: [Errno 98]`` or ``OSError: [WinError 10013]`` when the +server tries to start. See :ref:`address-already-in-use` for how to +handle that. + +Continue to :doc:`database`. diff --git a/testbed/pallets__flask/docs/tutorial/index.rst b/testbed/pallets__flask/docs/tutorial/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..d5dc5b3c3956ed4c1464d41fbb584c52b20434c9 --- /dev/null +++ b/testbed/pallets__flask/docs/tutorial/index.rst @@ -0,0 +1,64 @@ +Tutorial +======== + +.. toctree:: + :caption: Contents: + :maxdepth: 1 + + layout + factory + database + views + templates + static + blog + install + tests + deploy + next + +This tutorial will walk you through creating a basic blog application +called Flaskr. Users will be able to register, log in, create posts, +and edit or delete their own posts. You will be able to package and +install the application on other computers. + +.. image:: flaskr_index.png + :align: center + :class: screenshot + :alt: screenshot of index page + +It's assumed that you're already familiar with Python. The `official +tutorial`_ in the Python docs is a great way to learn or review first. + +.. _official tutorial: https://docs.python.org/3/tutorial/ + +While it's designed to give a good starting point, the tutorial doesn't +cover all of Flask's features. Check out the :doc:`/quickstart` for an +overview of what Flask can do, then dive into the docs to find out more. +The tutorial only uses what's provided by Flask and Python. In another +project, you might decide to use :doc:`/extensions` or other libraries +to make some tasks simpler. + +.. image:: flaskr_login.png + :align: center + :class: screenshot + :alt: screenshot of login page + +Flask is flexible. It doesn't require you to use any particular project +or code layout. However, when first starting, it's helpful to use a more +structured approach. This means that the tutorial will require a bit of +boilerplate up front, but it's done to avoid many common pitfalls that +new developers encounter, and it creates a project that's easy to expand +on. Once you become more comfortable with Flask, you can step out of +this structure and take full advantage of Flask's flexibility. + +.. image:: flaskr_edit.png + :align: center + :class: screenshot + :alt: screenshot of edit page + +:gh:`The tutorial project is available as an example in the Flask +repository `, if you want to compare your project +with the final product as you follow the tutorial. + +Continue to :doc:`layout`. diff --git a/testbed/pallets__flask/docs/tutorial/install.rst b/testbed/pallets__flask/docs/tutorial/install.rst new file mode 100644 index 0000000000000000000000000000000000000000..3d7d7c602b3986a5e95f0d1d435a2e4b51ac29bb --- /dev/null +++ b/testbed/pallets__flask/docs/tutorial/install.rst @@ -0,0 +1,114 @@ +Make the Project Installable +============================ + +Making your project installable means that you can build a +*distribution* file and install that in another environment, just like +you installed Flask in your project's environment. This makes deploying +your project the same as installing any other library, so you're using +all the standard Python tools to manage everything. + +Installing also comes with other benefits that might not be obvious from +the tutorial or as a new Python user, including: + +* Currently, Python and Flask understand how to use the ``flaskr`` + package only because you're running from your project's directory. + Installing means you can import it no matter where you run from. + +* You can manage your project's dependencies just like other packages + do, so ``pip install yourproject.whl`` installs them. + +* Test tools can isolate your test environment from your development + environment. + +.. note:: + This is being introduced late in the tutorial, but in your future + projects you should always start with this. + + +Describe the Project +-------------------- + +The ``setup.py`` file describes your project and the files that belong +to it. + +.. code-block:: python + :caption: ``setup.py`` + + from setuptools import find_packages, setup + + setup( + name='flaskr', + version='1.0.0', + packages=find_packages(), + include_package_data=True, + zip_safe=False, + install_requires=[ + 'flask', + ], + ) + + +``packages`` tells Python what package directories (and the Python files +they contain) to include. ``find_packages()`` finds these directories +automatically so you don't have to type them out. To include other +files, such as the static and templates directories, +``include_package_data`` is set. Python needs another file named +``MANIFEST.in`` to tell what this other data is. + +.. code-block:: none + :caption: ``MANIFEST.in`` + + include flaskr/schema.sql + graft flaskr/static + graft flaskr/templates + global-exclude *.pyc + +This tells Python to copy everything in the ``static`` and ``templates`` +directories, and the ``schema.sql`` file, but to exclude all bytecode +files. + +See the `official packaging guide`_ for another explanation of the files +and options used. + +.. _official packaging guide: https://packaging.python.org/tutorials/packaging-projects/ + + +Install the Project +------------------- + +Use ``pip`` to install your project in the virtual environment. + +.. code-block:: none + + $ pip install -e . + +This tells pip to find ``setup.py`` in the current directory and install +it in *editable* or *development* mode. Editable mode means that as you +make changes to your local code, you'll only need to re-install if you +change the metadata about the project, such as its dependencies. + +You can observe that the project is now installed with ``pip list``. + +.. code-block:: none + + $ pip list + + Package Version Location + -------------- --------- ---------------------------------- + click 6.7 + Flask 1.0 + flaskr 1.0.0 /home/user/Projects/flask-tutorial + itsdangerous 0.24 + Jinja2 2.10 + MarkupSafe 1.0 + pip 9.0.3 + setuptools 39.0.1 + Werkzeug 0.14.1 + wheel 0.30.0 + +Nothing changes from how you've been running your project so far. +``FLASK_APP`` is still set to ``flaskr`` and ``flask run`` still runs +the application, but you can call it from anywhere, not just the +``flask-tutorial`` directory. + +Continue to :doc:`tests`. diff --git a/testbed/pallets__flask/docs/tutorial/layout.rst b/testbed/pallets__flask/docs/tutorial/layout.rst new file mode 100644 index 0000000000000000000000000000000000000000..b6a09f0377e75c63d8ec52271010a57dc3158746 --- /dev/null +++ b/testbed/pallets__flask/docs/tutorial/layout.rst @@ -0,0 +1,110 @@ +Project Layout +============== + +Create a project directory and enter it: + +.. code-block:: none + + $ mkdir flask-tutorial + $ cd flask-tutorial + +Then follow the :doc:`installation instructions ` to set +up a Python virtual environment and install Flask for your project. + +The tutorial will assume you're working from the ``flask-tutorial`` +directory from now on. The file names at the top of each code block are +relative to this directory. + +---- + +A Flask application can be as simple as a single file. + +.. code-block:: python + :caption: ``hello.py`` + + from flask import Flask + + app = Flask(__name__) + + + @app.route('/') + def hello(): + return 'Hello, World!' + +However, as a project gets bigger, it becomes overwhelming to keep all +the code in one file. Python projects use *packages* to organize code +into multiple modules that can be imported where needed, and the +tutorial will do this as well. + +The project directory will contain: + +* ``flaskr/``, a Python package containing your application code and + files. +* ``tests/``, a directory containing test modules. +* ``venv/``, a Python virtual environment where Flask and other + dependencies are installed. +* Installation files telling Python how to install your project. +* Version control config, such as `git`_. You should make a habit of + using some type of version control for all your projects, no matter + the size. +* Any other project files you might add in the future. + +.. _git: https://git-scm.com/ + +By the end, your project layout will look like this: + +.. code-block:: none + + /home/user/Projects/flask-tutorial + ├── flaskr/ + │ ├── __init__.py + │ ├── db.py + │ ├── schema.sql + │ ├── auth.py + │ ├── blog.py + │ ├── templates/ + │ │ ├── base.html + │ │ ├── auth/ + │ │ │ ├── login.html + │ │ │ └── register.html + │ │ └── blog/ + │ │ ├── create.html + │ │ ├── index.html + │ │ └── update.html + │ └── static/ + │ └── style.css + ├── tests/ + │ ├── conftest.py + │ ├── data.sql + │ ├── test_factory.py + │ ├── test_db.py + │ ├── test_auth.py + │ └── test_blog.py + ├── venv/ + ├── setup.py + └── MANIFEST.in + +If you're using version control, the following files that are generated +while running your project should be ignored. There may be other files +based on the editor you use. In general, ignore files that you didn't +write. For example, with git: + +.. code-block:: none + :caption: ``.gitignore`` + + venv/ + + *.pyc + __pycache__/ + + instance/ + + .pytest_cache/ + .coverage + htmlcov/ + + dist/ + build/ + *.egg-info/ + +Continue to :doc:`factory`. diff --git a/testbed/pallets__flask/docs/tutorial/next.rst b/testbed/pallets__flask/docs/tutorial/next.rst new file mode 100644 index 0000000000000000000000000000000000000000..d41e8ef21f87cb3d1a561de7d4277e891f858ff4 --- /dev/null +++ b/testbed/pallets__flask/docs/tutorial/next.rst @@ -0,0 +1,38 @@ +Keep Developing! +================ + +You've learned about quite a few Flask and Python concepts throughout +the tutorial. Go back and review the tutorial and compare your code with +the steps you took to get there. Compare your project to the +:gh:`example project `, which might look a bit +different due to the step-by-step nature of the tutorial. + +There's a lot more to Flask than what you've seen so far. Even so, +you're now equipped to start developing your own web applications. Check +out the :doc:`/quickstart` for an overview of what Flask can do, then +dive into the docs to keep learning. Flask uses `Jinja`_, `Click`_, +`Werkzeug`_, and `ItsDangerous`_ behind the scenes, and they all have +their own documentation too. You'll also be interested in +:doc:`/extensions` which make tasks like working with the database or +validating form data easier and more powerful. + +If you want to keep developing your Flaskr project, here are some ideas +for what to try next: + +* A detail view to show a single post. Click a post's title to go to + its page. +* Like / unlike a post. +* Comments. +* Tags. Clicking a tag shows all the posts with that tag. +* A search box that filters the index page by name. +* Paged display. Only show 5 posts per page. +* Upload an image to go along with a post. +* Format posts using Markdown. +* An RSS feed of new posts. + +Have fun and make awesome applications! + +.. _Jinja: https://palletsprojects.com/p/jinja/ +.. _Click: https://palletsprojects.com/p/click/ +.. _Werkzeug: https://palletsprojects.com/p/werkzeug/ +.. _ItsDangerous: https://palletsprojects.com/p/itsdangerous/ diff --git a/testbed/pallets__flask/docs/tutorial/static.rst b/testbed/pallets__flask/docs/tutorial/static.rst new file mode 100644 index 0000000000000000000000000000000000000000..8e76c409b51014a30d1652e1369dd259d6e72be5 --- /dev/null +++ b/testbed/pallets__flask/docs/tutorial/static.rst @@ -0,0 +1,72 @@ +Static Files +============ + +The authentication views and templates work, but they look very plain +right now. Some `CSS`_ can be added to add style to the HTML layout you +constructed. The style won't change, so it's a *static* file rather than +a template. + +Flask automatically adds a ``static`` view that takes a path relative +to the ``flaskr/static`` directory and serves it. The ``base.html`` +template already has a link to the ``style.css`` file: + +.. code-block:: html+jinja + + {{ url_for('static', filename='style.css') }} + +Besides CSS, other types of static files might be files with JavaScript +functions, or a logo image. They are all placed under the +``flaskr/static`` directory and referenced with +``url_for('static', filename='...')``. + +This tutorial isn't focused on how to write CSS, so you can just copy +the following into the ``flaskr/static/style.css`` file: + +.. code-block:: css + :caption: ``flaskr/static/style.css`` + + html { font-family: sans-serif; background: #eee; padding: 1rem; } + body { max-width: 960px; margin: 0 auto; background: white; } + h1 { font-family: serif; color: #377ba8; margin: 1rem 0; } + a { color: #377ba8; } + hr { border: none; border-top: 1px solid lightgray; } + nav { background: lightgray; display: flex; align-items: center; padding: 0 0.5rem; } + nav h1 { flex: auto; margin: 0; } + nav h1 a { text-decoration: none; padding: 0.25rem 0.5rem; } + nav ul { display: flex; list-style: none; margin: 0; padding: 0; } + nav ul li a, nav ul li span, header .action { display: block; padding: 0.5rem; } + .content { padding: 0 1rem 1rem; } + .content > header { border-bottom: 1px solid lightgray; display: flex; align-items: flex-end; } + .content > header h1 { flex: auto; margin: 1rem 0 0.25rem 0; } + .flash { margin: 1em 0; padding: 1em; background: #cae6f6; border: 1px solid #377ba8; } + .post > header { display: flex; align-items: flex-end; font-size: 0.85em; } + .post > header > div:first-of-type { flex: auto; } + .post > header h1 { font-size: 1.5em; margin-bottom: 0; } + .post .about { color: slategray; font-style: italic; } + .post .body { white-space: pre-line; } + .content:last-child { margin-bottom: 0; } + .content form { margin: 1em 0; display: flex; flex-direction: column; } + .content label { font-weight: bold; margin-bottom: 0.5em; } + .content input, .content textarea { margin-bottom: 1em; } + .content textarea { min-height: 12em; resize: vertical; } + input.danger { color: #cc2f2e; } + input[type=submit] { align-self: start; min-width: 10em; } + +You can find a less compact version of ``style.css`` in the +:gh:`example code `. + +Go to http://127.0.0.1:5000/auth/login and the page should look like the +screenshot below. + +.. image:: flaskr_login.png + :align: center + :class: screenshot + :alt: screenshot of login page + +You can read more about CSS from `Mozilla's documentation `_. If +you change a static file, refresh the browser page. If the change +doesn't show up, try clearing your browser's cache. + +.. _CSS: https://developer.mozilla.org/docs/Web/CSS + +Continue to :doc:`blog`. diff --git a/testbed/pallets__flask/docs/tutorial/templates.rst b/testbed/pallets__flask/docs/tutorial/templates.rst new file mode 100644 index 0000000000000000000000000000000000000000..1a5535cc4d39833d8053fef0c958a01ac8c51a27 --- /dev/null +++ b/testbed/pallets__flask/docs/tutorial/templates.rst @@ -0,0 +1,187 @@ +.. currentmodule:: flask + +Templates +========= + +You've written the authentication views for your application, but if +you're running the server and try to go to any of the URLs, you'll see a +``TemplateNotFound`` error. That's because the views are calling +:func:`render_template`, but you haven't written the templates yet. +The template files will be stored in the ``templates`` directory inside +the ``flaskr`` package. + +Templates are files that contain static data as well as placeholders +for dynamic data. A template is rendered with specific data to produce a +final document. Flask uses the `Jinja`_ template library to render +templates. + +In your application, you will use templates to render `HTML`_ which +will display in the user's browser. In Flask, Jinja is configured to +*autoescape* any data that is rendered in HTML templates. This means +that it's safe to render user input; any characters they've entered that +could mess with the HTML, such as ``<`` and ``>`` will be *escaped* with +*safe* values that look the same in the browser but don't cause unwanted +effects. + +Jinja looks and behaves mostly like Python. Special delimiters are used +to distinguish Jinja syntax from the static data in the template. +Anything between ``{{`` and ``}}`` is an expression that will be output +to the final document. ``{%`` and ``%}`` denotes a control flow +statement like ``if`` and ``for``. Unlike Python, blocks are denoted +by start and end tags rather than indentation since static text within +a block could change indentation. + +.. _Jinja: https://jinja.palletsprojects.com/templates/ +.. _HTML: https://developer.mozilla.org/docs/Web/HTML + + +The Base Layout +--------------- + +Each page in the application will have the same basic layout around a +different body. Instead of writing the entire HTML structure in each +template, each template will *extend* a base template and override +specific sections. + +.. code-block:: html+jinja + :caption: ``flaskr/templates/base.html`` + + + {% block title %}{% endblock %} - Flaskr + + +
+
+ {% block header %}{% endblock %} +
+ {% for message in get_flashed_messages() %} +
{{ message }}
+ {% endfor %} + {% block content %}{% endblock %} +
+ +:data:`g` is automatically available in templates. Based on if +``g.user`` is set (from ``load_logged_in_user``), either the username +and a log out link are displayed, or links to register and log in +are displayed. :func:`url_for` is also automatically available, and is +used to generate URLs to views instead of writing them out manually. + +After the page title, and before the content, the template loops over +each message returned by :func:`get_flashed_messages`. You used +:func:`flash` in the views to show error messages, and this is the code +that will display them. + +There are three blocks defined here that will be overridden in the other +templates: + +#. ``{% block title %}`` will change the title displayed in the + browser's tab and window title. + +#. ``{% block header %}`` is similar to ``title`` but will change the + title displayed on the page. + +#. ``{% block content %}`` is where the content of each page goes, such + as the login form or a blog post. + +The base template is directly in the ``templates`` directory. To keep +the others organized, the templates for a blueprint will be placed in a +directory with the same name as the blueprint. + + +Register +-------- + +.. code-block:: html+jinja + :caption: ``flaskr/templates/auth/register.html`` + + {% extends 'base.html' %} + + {% block header %} +

{% block title %}Register{% endblock %}

+ {% endblock %} + + {% block content %} +
+ + + + + +
+ {% endblock %} + +``{% extends 'base.html' %}`` tells Jinja that this template should +replace the blocks from the base template. All the rendered content must +appear inside ``{% block %}`` tags that override blocks from the base +template. + +A useful pattern used here is to place ``{% block title %}`` inside +``{% block header %}``. This will set the title block and then output +the value of it into the header block, so that both the window and page +share the same title without writing it twice. + +The ``input`` tags are using the ``required`` attribute here. This tells +the browser not to submit the form until those fields are filled in. If +the user is using an older browser that doesn't support that attribute, +or if they are using something besides a browser to make requests, you +still want to validate the data in the Flask view. It's important to +always fully validate the data on the server, even if the client does +some validation as well. + + +Log In +------ + +This is identical to the register template except for the title and +submit button. + +.. code-block:: html+jinja + :caption: ``flaskr/templates/auth/login.html`` + + {% extends 'base.html' %} + + {% block header %} +

{% block title %}Log In{% endblock %}

+ {% endblock %} + + {% block content %} +
+ + + + + +
+ {% endblock %} + + +Register A User +--------------- + +Now that the authentication templates are written, you can register a +user. Make sure the server is still running (``flask run`` if it's not), +then go to http://127.0.0.1:5000/auth/register. + +Try clicking the "Register" button without filling out the form and see +that the browser shows an error message. Try removing the ``required`` +attributes from the ``register.html`` template and click "Register" +again. Instead of the browser showing an error, the page will reload and +the error from :func:`flash` in the view will be shown. + +Fill out a username and password and you'll be redirected to the login +page. Try entering an incorrect username, or the correct username and +incorrect password. If you log in you'll get an error because there's +no ``index`` view to redirect to yet. + +Continue to :doc:`static`. diff --git a/testbed/pallets__flask/docs/tutorial/tests.rst b/testbed/pallets__flask/docs/tutorial/tests.rst new file mode 100644 index 0000000000000000000000000000000000000000..cb60790cf59160a678e1f3cc06d7d99d8cf8fa54 --- /dev/null +++ b/testbed/pallets__flask/docs/tutorial/tests.rst @@ -0,0 +1,561 @@ +.. currentmodule:: flask + +Test Coverage +============= + +Writing unit tests for your application lets you check that the code +you wrote works the way you expect. Flask provides a test client that +simulates requests to the application and returns the response data. + +You should test as much of your code as possible. Code in functions only +runs when the function is called, and code in branches, such as ``if`` +blocks, only runs when the condition is met. You want to make sure that +each function is tested with data that covers each branch. + +The closer you get to 100% coverage, the more comfortable you can be +that making a change won't unexpectedly change other behavior. However, +100% coverage doesn't guarantee that your application doesn't have bugs. +In particular, it doesn't test how the user interacts with the +application in the browser. Despite this, test coverage is an important +tool to use during development. + +.. note:: + This is being introduced late in the tutorial, but in your future + projects you should test as you develop. + +You'll use `pytest`_ and `coverage`_ to test and measure your code. +Install them both: + +.. code-block:: none + + $ pip install pytest coverage + +.. _pytest: https://pytest.readthedocs.io/ +.. _coverage: https://coverage.readthedocs.io/ + + +Setup and Fixtures +------------------ + +The test code is located in the ``tests`` directory. This directory is +*next to* the ``flaskr`` package, not inside it. The +``tests/conftest.py`` file contains setup functions called *fixtures* +that each test will use. Tests are in Python modules that start with +``test_``, and each test function in those modules also starts with +``test_``. + +Each test will create a new temporary database file and populate some +data that will be used in the tests. Write a SQL file to insert that +data. + +.. code-block:: sql + :caption: ``tests/data.sql`` + + INSERT INTO user (username, password) + VALUES + ('test', 'pbkdf2:sha256:50000$TCI4GzcX$0de171a4f4dac32e3364c7ddc7c14f3e2fa61f2d17574483f7ffbb431b4acb2f'), + ('other', 'pbkdf2:sha256:50000$kJPKsz6N$d2d4784f1b030a9761f5ccaeeaca413f27f2ecb76d6168407af962ddce849f79'); + + INSERT INTO post (title, body, author_id, created) + VALUES + ('test title', 'test' || x'0a' || 'body', 1, '2018-01-01 00:00:00'); + +The ``app`` fixture will call the factory and pass ``test_config`` to +configure the application and database for testing instead of using your +local development configuration. + +.. code-block:: python + :caption: ``tests/conftest.py`` + + import os + import tempfile + + import pytest + from flaskr import create_app + from flaskr.db import get_db, init_db + + with open(os.path.join(os.path.dirname(__file__), 'data.sql'), 'rb') as f: + _data_sql = f.read().decode('utf8') + + + @pytest.fixture + def app(): + db_fd, db_path = tempfile.mkstemp() + + app = create_app({ + 'TESTING': True, + 'DATABASE': db_path, + }) + + with app.app_context(): + init_db() + get_db().executescript(_data_sql) + + yield app + + os.close(db_fd) + os.unlink(db_path) + + + @pytest.fixture + def client(app): + return app.test_client() + + + @pytest.fixture + def runner(app): + return app.test_cli_runner() + +:func:`tempfile.mkstemp` creates and opens a temporary file, returning +the file descriptor and the path to it. The ``DATABASE`` path is +overridden so it points to this temporary path instead of the instance +folder. After setting the path, the database tables are created and the +test data is inserted. After the test is over, the temporary file is +closed and removed. + +:data:`TESTING` tells Flask that the app is in test mode. Flask changes +some internal behavior so it's easier to test, and other extensions can +also use the flag to make testing them easier. + +The ``client`` fixture calls +:meth:`app.test_client() ` with the application +object created by the ``app`` fixture. Tests will use the client to make +requests to the application without running the server. + +The ``runner`` fixture is similar to ``client``. +:meth:`app.test_cli_runner() ` creates a runner +that can call the Click commands registered with the application. + +Pytest uses fixtures by matching their function names with the names +of arguments in the test functions. For example, the ``test_hello`` +function you'll write next takes a ``client`` argument. Pytest matches +that with the ``client`` fixture function, calls it, and passes the +returned value to the test function. + + +Factory +------- + +There's not much to test about the factory itself. Most of the code will +be executed for each test already, so if something fails the other tests +will notice. + +The only behavior that can change is passing test config. If config is +not passed, there should be some default configuration, otherwise the +configuration should be overridden. + +.. code-block:: python + :caption: ``tests/test_factory.py`` + + from flaskr import create_app + + + def test_config(): + assert not create_app().testing + assert create_app({'TESTING': True}).testing + + + def test_hello(client): + response = client.get('/hello') + assert response.data == b'Hello, World!' + +You added the ``hello`` route as an example when writing the factory at +the beginning of the tutorial. It returns "Hello, World!", so the test +checks that the response data matches. + + +Database +-------- + +Within an application context, ``get_db`` should return the same +connection each time it's called. After the context, the connection +should be closed. + +.. code-block:: python + :caption: ``tests/test_db.py`` + + import sqlite3 + + import pytest + from flaskr.db import get_db + + + def test_get_close_db(app): + with app.app_context(): + db = get_db() + assert db is get_db() + + with pytest.raises(sqlite3.ProgrammingError) as e: + db.execute('SELECT 1') + + assert 'closed' in str(e.value) + +The ``init-db`` command should call the ``init_db`` function and output +a message. + +.. code-block:: python + :caption: ``tests/test_db.py`` + + def test_init_db_command(runner, monkeypatch): + class Recorder(object): + called = False + + def fake_init_db(): + Recorder.called = True + + monkeypatch.setattr('flaskr.db.init_db', fake_init_db) + result = runner.invoke(args=['init-db']) + assert 'Initialized' in result.output + assert Recorder.called + +This test uses Pytest's ``monkeypatch`` fixture to replace the +``init_db`` function with one that records that it's been called. The +``runner`` fixture you wrote above is used to call the ``init-db`` +command by name. + + +Authentication +-------------- + +For most of the views, a user needs to be logged in. The easiest way to +do this in tests is to make a ``POST`` request to the ``login`` view +with the client. Rather than writing that out every time, you can write +a class with methods to do that, and use a fixture to pass it the client +for each test. + +.. code-block:: python + :caption: ``tests/conftest.py`` + + class AuthActions(object): + def __init__(self, client): + self._client = client + + def login(self, username='test', password='test'): + return self._client.post( + '/auth/login', + data={'username': username, 'password': password} + ) + + def logout(self): + return self._client.get('/auth/logout') + + + @pytest.fixture + def auth(client): + return AuthActions(client) + +With the ``auth`` fixture, you can call ``auth.login()`` in a test to +log in as the ``test`` user, which was inserted as part of the test +data in the ``app`` fixture. + +The ``register`` view should render successfully on ``GET``. On ``POST`` +with valid form data, it should redirect to the login URL and the user's +data should be in the database. Invalid data should display error +messages. + +.. code-block:: python + :caption: ``tests/test_auth.py`` + + import pytest + from flask import g, session + from flaskr.db import get_db + + + def test_register(client, app): + assert client.get('/auth/register').status_code == 200 + response = client.post( + '/auth/register', data={'username': 'a', 'password': 'a'} + ) + assert response.headers["Location"] == "/auth/login" + + with app.app_context(): + assert get_db().execute( + "SELECT * FROM user WHERE username = 'a'", + ).fetchone() is not None + + + @pytest.mark.parametrize(('username', 'password', 'message'), ( + ('', '', b'Username is required.'), + ('a', '', b'Password is required.'), + ('test', 'test', b'already registered'), + )) + def test_register_validate_input(client, username, password, message): + response = client.post( + '/auth/register', + data={'username': username, 'password': password} + ) + assert message in response.data + +:meth:`client.get() ` makes a ``GET`` request +and returns the :class:`Response` object returned by Flask. Similarly, +:meth:`client.post() ` makes a ``POST`` +request, converting the ``data`` dict into form data. + +To test that the page renders successfully, a simple request is made and +checked for a ``200 OK`` :attr:`~Response.status_code`. If +rendering failed, Flask would return a ``500 Internal Server Error`` +code. + +:attr:`~Response.headers` will have a ``Location`` header with the login +URL when the register view redirects to the login view. + +:attr:`~Response.data` contains the body of the response as bytes. If +you expect a certain value to render on the page, check that it's in +``data``. Bytes must be compared to bytes. If you want to compare text, +use :meth:`get_data(as_text=True) ` +instead. + +``pytest.mark.parametrize`` tells Pytest to run the same test function +with different arguments. You use it here to test different invalid +input and error messages without writing the same code three times. + +The tests for the ``login`` view are very similar to those for +``register``. Rather than testing the data in the database, +:data:`session` should have ``user_id`` set after logging in. + +.. code-block:: python + :caption: ``tests/test_auth.py`` + + def test_login(client, auth): + assert client.get('/auth/login').status_code == 200 + response = auth.login() + assert response.headers["Location"] == "/" + + with client: + client.get('/') + assert session['user_id'] == 1 + assert g.user['username'] == 'test' + + + @pytest.mark.parametrize(('username', 'password', 'message'), ( + ('a', 'test', b'Incorrect username.'), + ('test', 'a', b'Incorrect password.'), + )) + def test_login_validate_input(auth, username, password, message): + response = auth.login(username, password) + assert message in response.data + +Using ``client`` in a ``with`` block allows accessing context variables +such as :data:`session` after the response is returned. Normally, +accessing ``session`` outside of a request would raise an error. + +Testing ``logout`` is the opposite of ``login``. :data:`session` should +not contain ``user_id`` after logging out. + +.. code-block:: python + :caption: ``tests/test_auth.py`` + + def test_logout(client, auth): + auth.login() + + with client: + auth.logout() + assert 'user_id' not in session + + +Blog +---- + +All the blog views use the ``auth`` fixture you wrote earlier. Call +``auth.login()`` and subsequent requests from the client will be logged +in as the ``test`` user. + +The ``index`` view should display information about the post that was +added with the test data. When logged in as the author, there should be +a link to edit the post. + +You can also test some more authentication behavior while testing the +``index`` view. When not logged in, each page shows links to log in or +register. When logged in, there's a link to log out. + +.. code-block:: python + :caption: ``tests/test_blog.py`` + + import pytest + from flaskr.db import get_db + + + def test_index(client, auth): + response = client.get('/') + assert b"Log In" in response.data + assert b"Register" in response.data + + auth.login() + response = client.get('/') + assert b'Log Out' in response.data + assert b'test title' in response.data + assert b'by test on 2018-01-01' in response.data + assert b'test\nbody' in response.data + assert b'href="/1/update"' in response.data + +A user must be logged in to access the ``create``, ``update``, and +``delete`` views. The logged in user must be the author of the post to +access ``update`` and ``delete``, otherwise a ``403 Forbidden`` status +is returned. If a ``post`` with the given ``id`` doesn't exist, +``update`` and ``delete`` should return ``404 Not Found``. + +.. code-block:: python + :caption: ``tests/test_blog.py`` + + @pytest.mark.parametrize('path', ( + '/create', + '/1/update', + '/1/delete', + )) + def test_login_required(client, path): + response = client.post(path) + assert response.headers["Location"] == "/auth/login" + + + def test_author_required(app, client, auth): + # change the post author to another user + with app.app_context(): + db = get_db() + db.execute('UPDATE post SET author_id = 2 WHERE id = 1') + db.commit() + + auth.login() + # current user can't modify other user's post + assert client.post('/1/update').status_code == 403 + assert client.post('/1/delete').status_code == 403 + # current user doesn't see edit link + assert b'href="/1/update"' not in client.get('/').data + + + @pytest.mark.parametrize('path', ( + '/2/update', + '/2/delete', + )) + def test_exists_required(client, auth, path): + auth.login() + assert client.post(path).status_code == 404 + +The ``create`` and ``update`` views should render and return a +``200 OK`` status for a ``GET`` request. When valid data is sent in a +``POST`` request, ``create`` should insert the new post data into the +database, and ``update`` should modify the existing data. Both pages +should show an error message on invalid data. + +.. code-block:: python + :caption: ``tests/test_blog.py`` + + def test_create(client, auth, app): + auth.login() + assert client.get('/create').status_code == 200 + client.post('/create', data={'title': 'created', 'body': ''}) + + with app.app_context(): + db = get_db() + count = db.execute('SELECT COUNT(id) FROM post').fetchone()[0] + assert count == 2 + + + def test_update(client, auth, app): + auth.login() + assert client.get('/1/update').status_code == 200 + client.post('/1/update', data={'title': 'updated', 'body': ''}) + + with app.app_context(): + db = get_db() + post = db.execute('SELECT * FROM post WHERE id = 1').fetchone() + assert post['title'] == 'updated' + + + @pytest.mark.parametrize('path', ( + '/create', + '/1/update', + )) + def test_create_update_validate(client, auth, path): + auth.login() + response = client.post(path, data={'title': '', 'body': ''}) + assert b'Title is required.' in response.data + +The ``delete`` view should redirect to the index URL and the post should +no longer exist in the database. + +.. code-block:: python + :caption: ``tests/test_blog.py`` + + def test_delete(client, auth, app): + auth.login() + response = client.post('/1/delete') + assert response.headers["Location"] == "/" + + with app.app_context(): + db = get_db() + post = db.execute('SELECT * FROM post WHERE id = 1').fetchone() + assert post is None + + +Running the Tests +----------------- + +Some extra configuration, which is not required but makes running +tests with coverage less verbose, can be added to the project's +``setup.cfg`` file. + +.. code-block:: none + :caption: ``setup.cfg`` + + [tool:pytest] + testpaths = tests + + [coverage:run] + branch = True + source = + flaskr + +To run the tests, use the ``pytest`` command. It will find and run all +the test functions you've written. + +.. code-block:: none + + $ pytest + + ========================= test session starts ========================== + platform linux -- Python 3.6.4, pytest-3.5.0, py-1.5.3, pluggy-0.6.0 + rootdir: /home/user/Projects/flask-tutorial, inifile: setup.cfg + collected 23 items + + tests/test_auth.py ........ [ 34%] + tests/test_blog.py ............ [ 86%] + tests/test_db.py .. [ 95%] + tests/test_factory.py .. [100%] + + ====================== 24 passed in 0.64 seconds ======================= + +If any tests fail, pytest will show the error that was raised. You can +run ``pytest -v`` to get a list of each test function rather than dots. + +To measure the code coverage of your tests, use the ``coverage`` command +to run pytest instead of running it directly. + +.. code-block:: none + + $ coverage run -m pytest + +You can either view a simple coverage report in the terminal: + +.. code-block:: none + + $ coverage report + + Name Stmts Miss Branch BrPart Cover + ------------------------------------------------------ + flaskr/__init__.py 21 0 2 0 100% + flaskr/auth.py 54 0 22 0 100% + flaskr/blog.py 54 0 16 0 100% + flaskr/db.py 24 0 4 0 100% + ------------------------------------------------------ + TOTAL 153 0 44 0 100% + +An HTML report allows you to see which lines were covered in each file: + +.. code-block:: none + + $ coverage html + +This generates files in the ``htmlcov`` directory. Open +``htmlcov/index.html`` in your browser to see the report. + +Continue to :doc:`deploy`. diff --git a/testbed/pallets__flask/docs/tutorial/views.rst b/testbed/pallets__flask/docs/tutorial/views.rst new file mode 100644 index 0000000000000000000000000000000000000000..7092dbc28fa5eb2ff9795f68fcd598d1ac6e552e --- /dev/null +++ b/testbed/pallets__flask/docs/tutorial/views.rst @@ -0,0 +1,305 @@ +.. currentmodule:: flask + +Blueprints and Views +==================== + +A view function is the code you write to respond to requests to your +application. Flask uses patterns to match the incoming request URL to +the view that should handle it. The view returns data that Flask turns +into an outgoing response. Flask can also go the other direction and +generate a URL to a view based on its name and arguments. + + +Create a Blueprint +------------------ + +A :class:`Blueprint` is a way to organize a group of related views and +other code. Rather than registering views and other code directly with +an application, they are registered with a blueprint. Then the blueprint +is registered with the application when it is available in the factory +function. + +Flaskr will have two blueprints, one for authentication functions and +one for the blog posts functions. The code for each blueprint will go +in a separate module. Since the blog needs to know about authentication, +you'll write the authentication one first. + +.. code-block:: python + :caption: ``flaskr/auth.py`` + + import functools + + from flask import ( + Blueprint, flash, g, redirect, render_template, request, session, url_for + ) + from werkzeug.security import check_password_hash, generate_password_hash + + from flaskr.db import get_db + + bp = Blueprint('auth', __name__, url_prefix='/auth') + +This creates a :class:`Blueprint` named ``'auth'``. Like the application +object, the blueprint needs to know where it's defined, so ``__name__`` +is passed as the second argument. The ``url_prefix`` will be prepended +to all the URLs associated with the blueprint. + +Import and register the blueprint from the factory using +:meth:`app.register_blueprint() `. Place the +new code at the end of the factory function before returning the app. + +.. code-block:: python + :caption: ``flaskr/__init__.py`` + + def create_app(): + app = ... + # existing code omitted + + from . import auth + app.register_blueprint(auth.bp) + + return app + +The authentication blueprint will have views to register new users and +to log in and log out. + + +The First View: Register +------------------------ + +When the user visits the ``/auth/register`` URL, the ``register`` view +will return `HTML`_ with a form for them to fill out. When they submit +the form, it will validate their input and either show the form again +with an error message or create the new user and go to the login page. + +.. _HTML: https://developer.mozilla.org/docs/Web/HTML + +For now you will just write the view code. On the next page, you'll +write templates to generate the HTML form. + +.. code-block:: python + :caption: ``flaskr/auth.py`` + + @bp.route('/register', methods=('GET', 'POST')) + def register(): + if request.method == 'POST': + username = request.form['username'] + password = request.form['password'] + db = get_db() + error = None + + if not username: + error = 'Username is required.' + elif not password: + error = 'Password is required.' + + if error is None: + try: + db.execute( + "INSERT INTO user (username, password) VALUES (?, ?)", + (username, generate_password_hash(password)), + ) + db.commit() + except db.IntegrityError: + error = f"User {username} is already registered." + else: + return redirect(url_for("auth.login")) + + flash(error) + + return render_template('auth/register.html') + +Here's what the ``register`` view function is doing: + +#. :meth:`@bp.route ` associates the URL ``/register`` + with the ``register`` view function. When Flask receives a request + to ``/auth/register``, it will call the ``register`` view and use + the return value as the response. + +#. If the user submitted the form, + :attr:`request.method ` will be ``'POST'``. In this + case, start validating the input. + +#. :attr:`request.form ` is a special type of + :class:`dict` mapping submitted form keys and values. The user will + input their ``username`` and ``password``. + +#. Validate that ``username`` and ``password`` are not empty. + +#. If validation succeeds, insert the new user data into the database. + + - :meth:`db.execute ` takes a SQL + query with ``?`` placeholders for any user input, and a tuple of + values to replace the placeholders with. The database library + will take care of escaping the values so you are not vulnerable + to a *SQL injection attack*. + + - For security, passwords should never be stored in the database + directly. Instead, + :func:`~werkzeug.security.generate_password_hash` is used to + securely hash the password, and that hash is stored. Since this + query modifies data, + :meth:`db.commit() ` needs to be + called afterwards to save the changes. + + - An :exc:`sqlite3.IntegrityError` will occur if the username + already exists, which should be shown to the user as another + validation error. + +#. After storing the user, they are redirected to the login page. + :func:`url_for` generates the URL for the login view based on its + name. This is preferable to writing the URL directly as it allows + you to change the URL later without changing all code that links to + it. :func:`redirect` generates a redirect response to the generated + URL. + +#. If validation fails, the error is shown to the user. :func:`flash` + stores messages that can be retrieved when rendering the template. + +#. When the user initially navigates to ``auth/register``, or + there was a validation error, an HTML page with the registration + form should be shown. :func:`render_template` will render a template + containing the HTML, which you'll write in the next step of the + tutorial. + + +Login +----- + +This view follows the same pattern as the ``register`` view above. + +.. code-block:: python + :caption: ``flaskr/auth.py`` + + @bp.route('/login', methods=('GET', 'POST')) + def login(): + if request.method == 'POST': + username = request.form['username'] + password = request.form['password'] + db = get_db() + error = None + user = db.execute( + 'SELECT * FROM user WHERE username = ?', (username,) + ).fetchone() + + if user is None: + error = 'Incorrect username.' + elif not check_password_hash(user['password'], password): + error = 'Incorrect password.' + + if error is None: + session.clear() + session['user_id'] = user['id'] + return redirect(url_for('index')) + + flash(error) + + return render_template('auth/login.html') + +There are a few differences from the ``register`` view: + +#. The user is queried first and stored in a variable for later use. + + :meth:`~sqlite3.Cursor.fetchone` returns one row from the query. + If the query returned no results, it returns ``None``. Later, + :meth:`~sqlite3.Cursor.fetchall` will be used, which returns a list + of all results. + +#. :func:`~werkzeug.security.check_password_hash` hashes the submitted + password in the same way as the stored hash and securely compares + them. If they match, the password is valid. + +#. :data:`session` is a :class:`dict` that stores data across requests. + When validation succeeds, the user's ``id`` is stored in a new + session. The data is stored in a *cookie* that is sent to the + browser, and the browser then sends it back with subsequent requests. + Flask securely *signs* the data so that it can't be tampered with. + +Now that the user's ``id`` is stored in the :data:`session`, it will be +available on subsequent requests. At the beginning of each request, if +a user is logged in their information should be loaded and made +available to other views. + +.. code-block:: python + :caption: ``flaskr/auth.py`` + + @bp.before_app_request + def load_logged_in_user(): + user_id = session.get('user_id') + + if user_id is None: + g.user = None + else: + g.user = get_db().execute( + 'SELECT * FROM user WHERE id = ?', (user_id,) + ).fetchone() + +:meth:`bp.before_app_request() ` registers +a function that runs before the view function, no matter what URL is +requested. ``load_logged_in_user`` checks if a user id is stored in the +:data:`session` and gets that user's data from the database, storing it +on :data:`g.user `, which lasts for the length of the request. If +there is no user id, or if the id doesn't exist, ``g.user`` will be +``None``. + + +Logout +------ + +To log out, you need to remove the user id from the :data:`session`. +Then ``load_logged_in_user`` won't load a user on subsequent requests. + +.. code-block:: python + :caption: ``flaskr/auth.py`` + + @bp.route('/logout') + def logout(): + session.clear() + return redirect(url_for('index')) + + +Require Authentication in Other Views +------------------------------------- + +Creating, editing, and deleting blog posts will require a user to be +logged in. A *decorator* can be used to check this for each view it's +applied to. + +.. code-block:: python + :caption: ``flaskr/auth.py`` + + def login_required(view): + @functools.wraps(view) + def wrapped_view(**kwargs): + if g.user is None: + return redirect(url_for('auth.login')) + + return view(**kwargs) + + return wrapped_view + +This decorator returns a new view function that wraps the original view +it's applied to. The new function checks if a user is loaded and +redirects to the login page otherwise. If a user is loaded the original +view is called and continues normally. You'll use this decorator when +writing the blog views. + +Endpoints and URLs +------------------ + +The :func:`url_for` function generates the URL to a view based on a name +and arguments. The name associated with a view is also called the +*endpoint*, and by default it's the same as the name of the view +function. + +For example, the ``hello()`` view that was added to the app +factory earlier in the tutorial has the name ``'hello'`` and can be +linked to with ``url_for('hello')``. If it took an argument, which +you'll see later, it would be linked to using +``url_for('hello', who='World')``. + +When using a blueprint, the name of the blueprint is prepended to the +name of the function, so the endpoint for the ``login`` function you +wrote above is ``'auth.login'`` because you added it to the ``'auth'`` +blueprint. + +Continue to :doc:`templates`. diff --git a/testbed/pallets__flask/docs/views.rst b/testbed/pallets__flask/docs/views.rst new file mode 100644 index 0000000000000000000000000000000000000000..63d26c5c312fed9a89ab27e7220a604dcc6861f5 --- /dev/null +++ b/testbed/pallets__flask/docs/views.rst @@ -0,0 +1,235 @@ +Pluggable Views +=============== + +.. versionadded:: 0.7 + +Flask 0.7 introduces pluggable views inspired by the generic views from +Django which are based on classes instead of functions. The main +intention is that you can replace parts of the implementations and this +way have customizable pluggable views. + +Basic Principle +--------------- + +Consider you have a function that loads a list of objects from the +database and renders into a template:: + + @app.route('/users/') + def show_users(page): + users = User.query.all() + return render_template('users.html', users=users) + +This is simple and flexible, but if you want to provide this view in a +generic fashion that can be adapted to other models and templates as well +you might want more flexibility. This is where pluggable class-based +views come into place. As the first step to convert this into a class +based view you would do this:: + + + from flask.views import View + + class ShowUsers(View): + + def dispatch_request(self): + users = User.query.all() + return render_template('users.html', objects=users) + + app.add_url_rule('/users/', view_func=ShowUsers.as_view('show_users')) + +As you can see what you have to do is to create a subclass of +:class:`flask.views.View` and implement +:meth:`~flask.views.View.dispatch_request`. Then we have to convert that +class into an actual view function by using the +:meth:`~flask.views.View.as_view` class method. The string you pass to +that function is the name of the endpoint that view will then have. But +this by itself is not helpful, so let's refactor the code a bit:: + + + from flask.views import View + + class ListView(View): + + def get_template_name(self): + raise NotImplementedError() + + def render_template(self, context): + return render_template(self.get_template_name(), **context) + + def dispatch_request(self): + context = {'objects': self.get_objects()} + return self.render_template(context) + + class UserView(ListView): + + def get_template_name(self): + return 'users.html' + + def get_objects(self): + return User.query.all() + +This of course is not that helpful for such a small example, but it's good +enough to explain the basic principle. When you have a class-based view +the question comes up what ``self`` points to. The way this works is that +whenever the request is dispatched a new instance of the class is created +and the :meth:`~flask.views.View.dispatch_request` method is called with +the parameters from the URL rule. The class itself is instantiated with +the parameters passed to the :meth:`~flask.views.View.as_view` function. +For instance you can write a class like this:: + + class RenderTemplateView(View): + def __init__(self, template_name): + self.template_name = template_name + def dispatch_request(self): + return render_template(self.template_name) + +And then you can register it like this:: + + app.add_url_rule('/about', view_func=RenderTemplateView.as_view( + 'about_page', template_name='about.html')) + +Method Hints +------------ + +Pluggable views are attached to the application like a regular function by +either using :func:`~flask.Flask.route` or better +:meth:`~flask.Flask.add_url_rule`. That however also means that you would +have to provide the names of the HTTP methods the view supports when you +attach this. In order to move that information to the class you can +provide a :attr:`~flask.views.View.methods` attribute that has this +information:: + + class MyView(View): + methods = ['GET', 'POST'] + + def dispatch_request(self): + if request.method == 'POST': + ... + ... + + app.add_url_rule('/myview', view_func=MyView.as_view('myview')) + +Method Based Dispatching +------------------------ + +For RESTful APIs it's especially helpful to execute a different function +for each HTTP method. With the :class:`flask.views.MethodView` you can +easily do that. Each HTTP method maps to a method of the class with the +same name (just in lowercase):: + + from flask.views import MethodView + + class UserAPI(MethodView): + + def get(self): + users = User.query.all() + ... + + def post(self): + user = User.from_form_data(request.form) + ... + + app.add_url_rule('/users/', view_func=UserAPI.as_view('users')) + +That way you also don't have to provide the +:attr:`~flask.views.View.methods` attribute. It's automatically set based +on the methods defined in the class. + +Decorating Views +---------------- + +Since the view class itself is not the view function that is added to the +routing system it does not make much sense to decorate the class itself. +Instead you either have to decorate the return value of +:meth:`~flask.views.View.as_view` by hand:: + + def user_required(f): + """Checks whether user is logged in or raises error 401.""" + def decorator(*args, **kwargs): + if not g.user: + abort(401) + return f(*args, **kwargs) + return decorator + + view = user_required(UserAPI.as_view('users')) + app.add_url_rule('/users/', view_func=view) + +Starting with Flask 0.8 there is also an alternative way where you can +specify a list of decorators to apply in the class declaration:: + + class UserAPI(MethodView): + decorators = [user_required] + +Due to the implicit self from the caller's perspective you cannot use +regular view decorators on the individual methods of the view however, +keep this in mind. + +Method Views for APIs +--------------------- + +Web APIs are often working very closely with HTTP verbs so it makes a lot +of sense to implement such an API based on the +:class:`~flask.views.MethodView`. That said, you will notice that the API +will require different URL rules that go to the same method view most of +the time. For instance consider that you are exposing a user object on +the web: + +=============== =============== ====================================== +URL Method Description +--------------- --------------- -------------------------------------- +``/users/`` ``GET`` Gives a list of all users +``/users/`` ``POST`` Creates a new user +``/users/`` ``GET`` Shows a single user +``/users/`` ``PUT`` Updates a single user +``/users/`` ``DELETE`` Deletes a single user +=============== =============== ====================================== + +So how would you go about doing that with the +:class:`~flask.views.MethodView`? The trick is to take advantage of the +fact that you can provide multiple rules to the same view. + +Let's assume for the moment the view would look like this:: + + class UserAPI(MethodView): + + def get(self, user_id): + if user_id is None: + # return a list of users + pass + else: + # expose a single user + pass + + def post(self): + # create a new user + pass + + def delete(self, user_id): + # delete a single user + pass + + def put(self, user_id): + # update a single user + pass + +So how do we hook this up with the routing system? By adding two rules +and explicitly mentioning the methods for each:: + + user_view = UserAPI.as_view('user_api') + app.add_url_rule('/users/', defaults={'user_id': None}, + view_func=user_view, methods=['GET',]) + app.add_url_rule('/users/', view_func=user_view, methods=['POST',]) + app.add_url_rule('/users/', view_func=user_view, + methods=['GET', 'PUT', 'DELETE']) + +If you have a lot of APIs that look similar you can refactor that +registration code:: + + def register_api(view, endpoint, url, pk='id', pk_type='int'): + view_func = view.as_view(endpoint) + app.add_url_rule(url, defaults={pk: None}, + view_func=view_func, methods=['GET',]) + app.add_url_rule(url, view_func=view_func, methods=['POST',]) + app.add_url_rule(f'{url}<{pk_type}:{pk}>', view_func=view_func, + methods=['GET', 'PUT', 'DELETE']) + + register_api(UserAPI, 'user_api', '/users/', pk='user_id') diff --git a/testbed/pallets__flask/examples/javascript/.gitignore b/testbed/pallets__flask/examples/javascript/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..85a35845adb3903826fa5d8a6409796dc576c327 --- /dev/null +++ b/testbed/pallets__flask/examples/javascript/.gitignore @@ -0,0 +1,14 @@ +venv/ +*.pyc +__pycache__/ +instance/ +.cache/ +.pytest_cache/ +.coverage +htmlcov/ +dist/ +build/ +*.egg-info/ +.idea/ +*.swp +*~ diff --git a/testbed/pallets__flask/examples/javascript/LICENSE.rst b/testbed/pallets__flask/examples/javascript/LICENSE.rst new file mode 100644 index 0000000000000000000000000000000000000000..9d227a0cc43c3268d15722b763bd94ad298645a1 --- /dev/null +++ b/testbed/pallets__flask/examples/javascript/LICENSE.rst @@ -0,0 +1,28 @@ +Copyright 2010 Pallets + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/testbed/pallets__flask/examples/javascript/MANIFEST.in b/testbed/pallets__flask/examples/javascript/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..c730a34e1a7d2428f5f2360a5a1f949be869ccd0 --- /dev/null +++ b/testbed/pallets__flask/examples/javascript/MANIFEST.in @@ -0,0 +1,4 @@ +include LICENSE.rst +graft js_example/templates +graft tests +global-exclude *.pyc diff --git a/testbed/pallets__flask/examples/javascript/README.rst b/testbed/pallets__flask/examples/javascript/README.rst new file mode 100644 index 0000000000000000000000000000000000000000..b25bdb4e41b33210affd54d28ee433c26be78ff6 --- /dev/null +++ b/testbed/pallets__flask/examples/javascript/README.rst @@ -0,0 +1,49 @@ +JavaScript Ajax Example +======================= + +Demonstrates how to post form data and process a JSON response using +JavaScript. This allows making requests without navigating away from the +page. Demonstrates using |XMLHttpRequest|_, |fetch|_, and +|jQuery.ajax|_. See the `Flask docs`_ about jQuery and Ajax. + +.. |XMLHttpRequest| replace:: ``XMLHttpRequest`` +.. _XMLHttpRequest: https://developer.mozilla.org/en-US/docs/Web/API/XMLHttpRequest + +.. |fetch| replace:: ``fetch`` +.. _fetch: https://developer.mozilla.org/en-US/docs/Web/API/WindowOrWorkerGlobalScope/fetch + +.. |jQuery.ajax| replace:: ``jQuery.ajax`` +.. _jQuery.ajax: https://api.jquery.com/jQuery.ajax/ + +.. _Flask docs: https://flask.palletsprojects.com/patterns/jquery/ + + +Install +------- + +:: + + $ python3 -m venv venv + $ . venv/bin/activate + $ pip install -e . + + +Run +--- + +:: + + $ export FLASK_APP=js_example + $ flask run + +Open http://127.0.0.1:5000 in a browser. + + +Test +---- + +:: + + $ pip install -e '.[test]' + $ coverage run -m pytest + $ coverage report diff --git a/testbed/pallets__flask/examples/javascript/js_example/__init__.py b/testbed/pallets__flask/examples/javascript/js_example/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..068b2d98ed479b952eded5de98abd18398a9c651 --- /dev/null +++ b/testbed/pallets__flask/examples/javascript/js_example/__init__.py @@ -0,0 +1,5 @@ +from flask import Flask + +app = Flask(__name__) + +from js_example import views # noqa: F401 diff --git a/testbed/pallets__flask/examples/javascript/js_example/templates/base.html b/testbed/pallets__flask/examples/javascript/js_example/templates/base.html new file mode 100644 index 0000000000000000000000000000000000000000..50ce0e9c4b8bb9618d2efc28a33c5741adeb259a --- /dev/null +++ b/testbed/pallets__flask/examples/javascript/js_example/templates/base.html @@ -0,0 +1,33 @@ + +JavaScript Example + + + + +
+

{% block intro %}{% endblock %}

+
+
+ + + + + +
+= +{% block script %}{% endblock %} diff --git a/testbed/pallets__flask/examples/javascript/js_example/templates/fetch.html b/testbed/pallets__flask/examples/javascript/js_example/templates/fetch.html new file mode 100644 index 0000000000000000000000000000000000000000..780ecec5057d558bc5a0fee269ea09d73e22d769 --- /dev/null +++ b/testbed/pallets__flask/examples/javascript/js_example/templates/fetch.html @@ -0,0 +1,36 @@ +{% extends 'base.html' %} + +{% block intro %} + fetch + is the new plain JavaScript way to make requests. It's + supported in all modern browsers except IE, which requires a + polyfill. +{% endblock %} + +{% block script %} + + + +{% endblock %} diff --git a/testbed/pallets__flask/examples/javascript/js_example/templates/jquery.html b/testbed/pallets__flask/examples/javascript/js_example/templates/jquery.html new file mode 100644 index 0000000000000000000000000000000000000000..48f0c11ca2be74166b3467b6546aec96db039dcb --- /dev/null +++ b/testbed/pallets__flask/examples/javascript/js_example/templates/jquery.html @@ -0,0 +1,27 @@ +{% extends 'base.html' %} + +{% block intro %} + jQuery is a popular library that + adds cross browser APIs for common tasks. However, it requires loading + an extra library. +{% endblock %} + +{% block script %} + + +{% endblock %} diff --git a/testbed/pallets__flask/examples/javascript/js_example/templates/plain.html b/testbed/pallets__flask/examples/javascript/js_example/templates/plain.html new file mode 100644 index 0000000000000000000000000000000000000000..59a7dd95221a9080ba3dd882a11b8a2824662a33 --- /dev/null +++ b/testbed/pallets__flask/examples/javascript/js_example/templates/plain.html @@ -0,0 +1,28 @@ +{% extends 'base.html' %} + +{% block intro %} + XMLHttpRequest + is the plain JavaScript way to make requests. It's natively supported + by all browsers. +{% endblock %} + +{% block script %} + +{% endblock %} diff --git a/testbed/pallets__flask/examples/javascript/js_example/views.py b/testbed/pallets__flask/examples/javascript/js_example/views.py new file mode 100644 index 0000000000000000000000000000000000000000..6c601a9043f080a6cfcd1b9dfb6801052e8114a8 --- /dev/null +++ b/testbed/pallets__flask/examples/javascript/js_example/views.py @@ -0,0 +1,18 @@ +from flask import jsonify +from flask import render_template +from flask import request + +from js_example import app + + +@app.route("/", defaults={"js": "plain"}) +@app.route("/") +def index(js): + return render_template(f"{js}.html", js=js) + + +@app.route("/add", methods=["POST"]) +def add(): + a = request.form.get("a", 0, type=float) + b = request.form.get("b", 0, type=float) + return jsonify(result=a + b) diff --git a/testbed/pallets__flask/examples/javascript/setup.cfg b/testbed/pallets__flask/examples/javascript/setup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..c4a3efad9f02bf78774c2aa7c17cbc37183e90f6 --- /dev/null +++ b/testbed/pallets__flask/examples/javascript/setup.cfg @@ -0,0 +1,29 @@ +[metadata] +name = js_example +version = 1.0.0 +url = https://flask.palletsprojects.com/patterns/jquery/ +license = BSD-3-Clause +maintainer = Pallets +maintainer_email = contact@palletsprojects.com +description = Demonstrates making AJAX requests to Flask. +long_description = file: README.rst +long_description_content_type = text/x-rst + +[options] +packages = find: +include_package_data = true +install_requires = + Flask + +[options.extras_require] +test = + pytest + blinker + +[tool:pytest] +testpaths = tests + +[coverage:run] +branch = True +source = + js_example diff --git a/testbed/pallets__flask/examples/javascript/setup.py b/testbed/pallets__flask/examples/javascript/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..606849326a4002007fd42060b51e69a19c18675c --- /dev/null +++ b/testbed/pallets__flask/examples/javascript/setup.py @@ -0,0 +1,3 @@ +from setuptools import setup + +setup() diff --git a/testbed/pallets__flask/examples/javascript/tests/conftest.py b/testbed/pallets__flask/examples/javascript/tests/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..e0cabbfd1d5773f6d1c8c32177cbbde9b19d5d2a --- /dev/null +++ b/testbed/pallets__flask/examples/javascript/tests/conftest.py @@ -0,0 +1,15 @@ +import pytest + +from js_example import app + + +@pytest.fixture(name="app") +def fixture_app(): + app.testing = True + yield app + app.testing = False + + +@pytest.fixture +def client(app): + return app.test_client() diff --git a/testbed/pallets__flask/examples/javascript/tests/test_js_example.py b/testbed/pallets__flask/examples/javascript/tests/test_js_example.py new file mode 100644 index 0000000000000000000000000000000000000000..aa0215f133ff620cb1ad063aa394b259183b85a3 --- /dev/null +++ b/testbed/pallets__flask/examples/javascript/tests/test_js_example.py @@ -0,0 +1,27 @@ +import pytest +from flask import template_rendered + + +@pytest.mark.parametrize( + ("path", "template_name"), + ( + ("/", "plain.html"), + ("/plain", "plain.html"), + ("/fetch", "fetch.html"), + ("/jquery", "jquery.html"), + ), +) +def test_index(app, client, path, template_name): + def check(sender, template, context): + assert template.name == template_name + + with template_rendered.connected_to(check, app): + client.get(path) + + +@pytest.mark.parametrize( + ("a", "b", "result"), ((2, 3, 5), (2.5, 3, 5.5), (2, None, 2), (2, "b", 2)) +) +def test_add(client, a, b, result): + response = client.post("/add", data={"a": a, "b": b}) + assert response.get_json()["result"] == result diff --git a/testbed/pallets__flask/examples/tutorial/.gitignore b/testbed/pallets__flask/examples/tutorial/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..85a35845adb3903826fa5d8a6409796dc576c327 --- /dev/null +++ b/testbed/pallets__flask/examples/tutorial/.gitignore @@ -0,0 +1,14 @@ +venv/ +*.pyc +__pycache__/ +instance/ +.cache/ +.pytest_cache/ +.coverage +htmlcov/ +dist/ +build/ +*.egg-info/ +.idea/ +*.swp +*~ diff --git a/testbed/pallets__flask/examples/tutorial/MANIFEST.in b/testbed/pallets__flask/examples/tutorial/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..97d55d517eb21005d9abda1bf327ead3cab44756 --- /dev/null +++ b/testbed/pallets__flask/examples/tutorial/MANIFEST.in @@ -0,0 +1,6 @@ +include LICENSE.rst +include flaskr/schema.sql +graft flaskr/static +graft flaskr/templates +graft tests +global-exclude *.pyc diff --git a/testbed/pallets__flask/examples/tutorial/flaskr/auth.py b/testbed/pallets__flask/examples/tutorial/flaskr/auth.py new file mode 100644 index 0000000000000000000000000000000000000000..b423e6ae4a729f53c893a7a4705121d26b316d23 --- /dev/null +++ b/testbed/pallets__flask/examples/tutorial/flaskr/auth.py @@ -0,0 +1,116 @@ +import functools + +from flask import Blueprint +from flask import flash +from flask import g +from flask import redirect +from flask import render_template +from flask import request +from flask import session +from flask import url_for +from werkzeug.security import check_password_hash +from werkzeug.security import generate_password_hash + +from flaskr.db import get_db + +bp = Blueprint("auth", __name__, url_prefix="/auth") + + +def login_required(view): + """View decorator that redirects anonymous users to the login page.""" + + @functools.wraps(view) + def wrapped_view(**kwargs): + if g.user is None: + return redirect(url_for("auth.login")) + + return view(**kwargs) + + return wrapped_view + + +@bp.before_app_request +def load_logged_in_user(): + """If a user id is stored in the session, load the user object from + the database into ``g.user``.""" + user_id = session.get("user_id") + + if user_id is None: + g.user = None + else: + g.user = ( + get_db().execute("SELECT * FROM user WHERE id = ?", (user_id,)).fetchone() + ) + + +@bp.route("/register", methods=("GET", "POST")) +def register(): + """Register a new user. + + Validates that the username is not already taken. Hashes the + password for security. + """ + if request.method == "POST": + username = request.form["username"] + password = request.form["password"] + db = get_db() + error = None + + if not username: + error = "Username is required." + elif not password: + error = "Password is required." + + if error is None: + try: + db.execute( + "INSERT INTO user (username, password) VALUES (?, ?)", + (username, generate_password_hash(password)), + ) + db.commit() + except db.IntegrityError: + # The username was already taken, which caused the + # commit to fail. Show a validation error. + error = f"User {username} is already registered." + else: + # Success, go to the login page. + return redirect(url_for("auth.login")) + + flash(error) + + return render_template("auth/register.html") + + +@bp.route("/login", methods=("GET", "POST")) +def login(): + """Log in a registered user by adding the user id to the session.""" + if request.method == "POST": + username = request.form["username"] + password = request.form["password"] + db = get_db() + error = None + user = db.execute( + "SELECT * FROM user WHERE username = ?", (username,) + ).fetchone() + + if user is None: + error = "Incorrect username." + elif not check_password_hash(user["password"], password): + error = "Incorrect password." + + if error is None: + # store the user id in a new session and return to the index + session.clear() + session["user_id"] = user["id"] + return redirect(url_for("index")) + + flash(error) + + return render_template("auth/login.html") + + +@bp.route("/logout") +def logout(): + """Clear the current session, including the stored user id.""" + session.clear() + return redirect(url_for("index")) diff --git a/testbed/pallets__flask/examples/tutorial/flaskr/blog.py b/testbed/pallets__flask/examples/tutorial/flaskr/blog.py new file mode 100644 index 0000000000000000000000000000000000000000..3704626b1c68f9e4a92e5b7ccbdd816871f8a6f6 --- /dev/null +++ b/testbed/pallets__flask/examples/tutorial/flaskr/blog.py @@ -0,0 +1,125 @@ +from flask import Blueprint +from flask import flash +from flask import g +from flask import redirect +from flask import render_template +from flask import request +from flask import url_for +from werkzeug.exceptions import abort + +from flaskr.auth import login_required +from flaskr.db import get_db + +bp = Blueprint("blog", __name__) + + +@bp.route("/") +def index(): + """Show all the posts, most recent first.""" + db = get_db() + posts = db.execute( + "SELECT p.id, title, body, created, author_id, username" + " FROM post p JOIN user u ON p.author_id = u.id" + " ORDER BY created DESC" + ).fetchall() + return render_template("blog/index.html", posts=posts) + + +def get_post(id, check_author=True): + """Get a post and its author by id. + + Checks that the id exists and optionally that the current user is + the author. + + :param id: id of post to get + :param check_author: require the current user to be the author + :return: the post with author information + :raise 404: if a post with the given id doesn't exist + :raise 403: if the current user isn't the author + """ + post = ( + get_db() + .execute( + "SELECT p.id, title, body, created, author_id, username" + " FROM post p JOIN user u ON p.author_id = u.id" + " WHERE p.id = ?", + (id,), + ) + .fetchone() + ) + + if post is None: + abort(404, f"Post id {id} doesn't exist.") + + if check_author and post["author_id"] != g.user["id"]: + abort(403) + + return post + + +@bp.route("/create", methods=("GET", "POST")) +@login_required +def create(): + """Create a new post for the current user.""" + if request.method == "POST": + title = request.form["title"] + body = request.form["body"] + error = None + + if not title: + error = "Title is required." + + if error is not None: + flash(error) + else: + db = get_db() + db.execute( + "INSERT INTO post (title, body, author_id) VALUES (?, ?, ?)", + (title, body, g.user["id"]), + ) + db.commit() + return redirect(url_for("blog.index")) + + return render_template("blog/create.html") + + +@bp.route("//update", methods=("GET", "POST")) +@login_required +def update(id): + """Update a post if the current user is the author.""" + post = get_post(id) + + if request.method == "POST": + title = request.form["title"] + body = request.form["body"] + error = None + + if not title: + error = "Title is required." + + if error is not None: + flash(error) + else: + db = get_db() + db.execute( + "UPDATE post SET title = ?, body = ? WHERE id = ?", (title, body, id) + ) + db.commit() + return redirect(url_for("blog.index")) + + return render_template("blog/update.html", post=post) + + +@bp.route("//delete", methods=("POST",)) +@login_required +def delete(id): + """Delete a post. + + Ensures that the post exists and that the logged in user is the + author of the post. + """ + get_post(id) + db = get_db() + db.execute("DELETE FROM post WHERE id = ?", (id,)) + db.commit() + return redirect(url_for("blog.index")) diff --git a/testbed/pallets__flask/examples/tutorial/flaskr/static/style.css b/testbed/pallets__flask/examples/tutorial/flaskr/static/style.css new file mode 100644 index 0000000000000000000000000000000000000000..2f1f4d0c3c57c4916577e3acc4b9d3f1d1e6f23c --- /dev/null +++ b/testbed/pallets__flask/examples/tutorial/flaskr/static/style.css @@ -0,0 +1,134 @@ +html { + font-family: sans-serif; + background: #eee; + padding: 1rem; +} + +body { + max-width: 960px; + margin: 0 auto; + background: white; +} + +h1, h2, h3, h4, h5, h6 { + font-family: serif; + color: #377ba8; + margin: 1rem 0; +} + +a { + color: #377ba8; +} + +hr { + border: none; + border-top: 1px solid lightgray; +} + +nav { + background: lightgray; + display: flex; + align-items: center; + padding: 0 0.5rem; +} + +nav h1 { + flex: auto; + margin: 0; +} + +nav h1 a { + text-decoration: none; + padding: 0.25rem 0.5rem; +} + +nav ul { + display: flex; + list-style: none; + margin: 0; + padding: 0; +} + +nav ul li a, nav ul li span, header .action { + display: block; + padding: 0.5rem; +} + +.content { + padding: 0 1rem 1rem; +} + +.content > header { + border-bottom: 1px solid lightgray; + display: flex; + align-items: flex-end; +} + +.content > header h1 { + flex: auto; + margin: 1rem 0 0.25rem 0; +} + +.flash { + margin: 1em 0; + padding: 1em; + background: #cae6f6; + border: 1px solid #377ba8; +} + +.post > header { + display: flex; + align-items: flex-end; + font-size: 0.85em; +} + +.post > header > div:first-of-type { + flex: auto; +} + +.post > header h1 { + font-size: 1.5em; + margin-bottom: 0; +} + +.post .about { + color: slategray; + font-style: italic; +} + +.post .body { + white-space: pre-line; +} + +.content:last-child { + margin-bottom: 0; +} + +.content form { + margin: 1em 0; + display: flex; + flex-direction: column; +} + +.content label { + font-weight: bold; + margin-bottom: 0.5em; +} + +.content input, .content textarea { + margin-bottom: 1em; +} + +.content textarea { + min-height: 12em; + resize: vertical; +} + +input.danger { + color: #cc2f2e; +} + +input[type=submit] { + align-self: start; + min-width: 10em; +} diff --git a/testbed/pallets__flask/examples/tutorial/flaskr/templates/auth/login.html b/testbed/pallets__flask/examples/tutorial/flaskr/templates/auth/login.html new file mode 100644 index 0000000000000000000000000000000000000000..b326b5a6b8e469710b20be8487956757ffc8ac9c --- /dev/null +++ b/testbed/pallets__flask/examples/tutorial/flaskr/templates/auth/login.html @@ -0,0 +1,15 @@ +{% extends 'base.html' %} + +{% block header %} +

{% block title %}Log In{% endblock %}

+{% endblock %} + +{% block content %} +
+ + + + + +
+{% endblock %} diff --git a/testbed/pallets__flask/examples/tutorial/flaskr/templates/auth/register.html b/testbed/pallets__flask/examples/tutorial/flaskr/templates/auth/register.html new file mode 100644 index 0000000000000000000000000000000000000000..4320e17e8499856b22b22d5cfb830ab0378a378b --- /dev/null +++ b/testbed/pallets__flask/examples/tutorial/flaskr/templates/auth/register.html @@ -0,0 +1,15 @@ +{% extends 'base.html' %} + +{% block header %} +

{% block title %}Register{% endblock %}

+{% endblock %} + +{% block content %} +
+ + + + + +
+{% endblock %} diff --git a/testbed/pallets__flask/examples/tutorial/flaskr/templates/base.html b/testbed/pallets__flask/examples/tutorial/flaskr/templates/base.html new file mode 100644 index 0000000000000000000000000000000000000000..f09e92687c7acf29f0068ae485859dca43bfb5c3 --- /dev/null +++ b/testbed/pallets__flask/examples/tutorial/flaskr/templates/base.html @@ -0,0 +1,24 @@ + +{% block title %}{% endblock %} - Flaskr + + +
+
+ {% block header %}{% endblock %} +
+ {% for message in get_flashed_messages() %} +
{{ message }}
+ {% endfor %} + {% block content %}{% endblock %} +
diff --git a/testbed/pallets__flask/examples/tutorial/flaskr/templates/blog/create.html b/testbed/pallets__flask/examples/tutorial/flaskr/templates/blog/create.html new file mode 100644 index 0000000000000000000000000000000000000000..88e31e44bdc255d905e2e6b4a040c979d7e48221 --- /dev/null +++ b/testbed/pallets__flask/examples/tutorial/flaskr/templates/blog/create.html @@ -0,0 +1,15 @@ +{% extends 'base.html' %} + +{% block header %} +

{% block title %}New Post{% endblock %}

+{% endblock %} + +{% block content %} +
+ + + + + +
+{% endblock %} diff --git a/testbed/pallets__flask/examples/tutorial/flaskr/templates/blog/index.html b/testbed/pallets__flask/examples/tutorial/flaskr/templates/blog/index.html new file mode 100644 index 0000000000000000000000000000000000000000..3481b8e18d6022f3c0c633c0c721824cc398f79e --- /dev/null +++ b/testbed/pallets__flask/examples/tutorial/flaskr/templates/blog/index.html @@ -0,0 +1,28 @@ +{% extends 'base.html' %} + +{% block header %} +

{% block title %}Posts{% endblock %}

+ {% if g.user %} + New + {% endif %} +{% endblock %} + +{% block content %} + {% for post in posts %} +
+
+
+

{{ post['title'] }}

+
by {{ post['username'] }} on {{ post['created'].strftime('%Y-%m-%d') }}
+
+ {% if g.user['id'] == post['author_id'] %} + Edit + {% endif %} +
+

{{ post['body'] }}

+
+ {% if not loop.last %} +
+ {% endif %} + {% endfor %} +{% endblock %} diff --git a/testbed/pallets__flask/examples/tutorial/flaskr/templates/blog/update.html b/testbed/pallets__flask/examples/tutorial/flaskr/templates/blog/update.html new file mode 100644 index 0000000000000000000000000000000000000000..2c405e63038ec6693ca7fcbe8af8a53150c0d3e9 --- /dev/null +++ b/testbed/pallets__flask/examples/tutorial/flaskr/templates/blog/update.html @@ -0,0 +1,19 @@ +{% extends 'base.html' %} + +{% block header %} +

{% block title %}Edit "{{ post['title'] }}"{% endblock %}

+{% endblock %} + +{% block content %} +
+ + + + + +
+
+
+ +
+{% endblock %} diff --git a/testbed/pallets__flask/examples/tutorial/tests/conftest.py b/testbed/pallets__flask/examples/tutorial/tests/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..6bf62f0a8fcf6f922fe43bf58fb625b7352492f4 --- /dev/null +++ b/testbed/pallets__flask/examples/tutorial/tests/conftest.py @@ -0,0 +1,62 @@ +import os +import tempfile + +import pytest + +from flaskr import create_app +from flaskr.db import get_db +from flaskr.db import init_db + +# read in SQL for populating test data +with open(os.path.join(os.path.dirname(__file__), "data.sql"), "rb") as f: + _data_sql = f.read().decode("utf8") + + +@pytest.fixture +def app(): + """Create and configure a new app instance for each test.""" + # create a temporary file to isolate the database for each test + db_fd, db_path = tempfile.mkstemp() + # create the app with common test config + app = create_app({"TESTING": True, "DATABASE": db_path}) + + # create the database and load test data + with app.app_context(): + init_db() + get_db().executescript(_data_sql) + + yield app + + # close and remove the temporary database + os.close(db_fd) + os.unlink(db_path) + + +@pytest.fixture +def client(app): + """A test client for the app.""" + return app.test_client() + + +@pytest.fixture +def runner(app): + """A test runner for the app's Click commands.""" + return app.test_cli_runner() + + +class AuthActions: + def __init__(self, client): + self._client = client + + def login(self, username="test", password="test"): + return self._client.post( + "/auth/login", data={"username": username, "password": password} + ) + + def logout(self): + return self._client.get("/auth/logout") + + +@pytest.fixture +def auth(client): + return AuthActions(client) diff --git a/testbed/pallets__flask/examples/tutorial/tests/data.sql b/testbed/pallets__flask/examples/tutorial/tests/data.sql new file mode 100644 index 0000000000000000000000000000000000000000..9b68006510ba310c37f43d7f9ecd5588d2707a7e --- /dev/null +++ b/testbed/pallets__flask/examples/tutorial/tests/data.sql @@ -0,0 +1,8 @@ +INSERT INTO user (username, password) +VALUES + ('test', 'pbkdf2:sha256:50000$TCI4GzcX$0de171a4f4dac32e3364c7ddc7c14f3e2fa61f2d17574483f7ffbb431b4acb2f'), + ('other', 'pbkdf2:sha256:50000$kJPKsz6N$d2d4784f1b030a9761f5ccaeeaca413f27f2ecb76d6168407af962ddce849f79'); + +INSERT INTO post (title, body, author_id, created) +VALUES + ('test title', 'test' || x'0a' || 'body', 1, '2018-01-01 00:00:00'); diff --git a/testbed/pallets__flask/examples/tutorial/tests/test_auth.py b/testbed/pallets__flask/examples/tutorial/tests/test_auth.py new file mode 100644 index 0000000000000000000000000000000000000000..76db62f79d66f92e45eb3dc85b61c7a3dbdbdb57 --- /dev/null +++ b/testbed/pallets__flask/examples/tutorial/tests/test_auth.py @@ -0,0 +1,69 @@ +import pytest +from flask import g +from flask import session + +from flaskr.db import get_db + + +def test_register(client, app): + # test that viewing the page renders without template errors + assert client.get("/auth/register").status_code == 200 + + # test that successful registration redirects to the login page + response = client.post("/auth/register", data={"username": "a", "password": "a"}) + assert response.headers["Location"] == "/auth/login" + + # test that the user was inserted into the database + with app.app_context(): + assert ( + get_db().execute("SELECT * FROM user WHERE username = 'a'").fetchone() + is not None + ) + + +@pytest.mark.parametrize( + ("username", "password", "message"), + ( + ("", "", b"Username is required."), + ("a", "", b"Password is required."), + ("test", "test", b"already registered"), + ), +) +def test_register_validate_input(client, username, password, message): + response = client.post( + "/auth/register", data={"username": username, "password": password} + ) + assert message in response.data + + +def test_login(client, auth): + # test that viewing the page renders without template errors + assert client.get("/auth/login").status_code == 200 + + # test that successful login redirects to the index page + response = auth.login() + assert response.headers["Location"] == "/" + + # login request set the user_id in the session + # check that the user is loaded from the session + with client: + client.get("/") + assert session["user_id"] == 1 + assert g.user["username"] == "test" + + +@pytest.mark.parametrize( + ("username", "password", "message"), + (("a", "test", b"Incorrect username."), ("test", "a", b"Incorrect password.")), +) +def test_login_validate_input(auth, username, password, message): + response = auth.login(username, password) + assert message in response.data + + +def test_logout(client, auth): + auth.login() + + with client: + auth.logout() + assert "user_id" not in session diff --git a/testbed/pallets__flask/examples/tutorial/tests/test_db.py b/testbed/pallets__flask/examples/tutorial/tests/test_db.py new file mode 100644 index 0000000000000000000000000000000000000000..2363bf816375198cb1bc72b134f223fbc66c7caf --- /dev/null +++ b/testbed/pallets__flask/examples/tutorial/tests/test_db.py @@ -0,0 +1,29 @@ +import sqlite3 + +import pytest + +from flaskr.db import get_db + + +def test_get_close_db(app): + with app.app_context(): + db = get_db() + assert db is get_db() + + with pytest.raises(sqlite3.ProgrammingError) as e: + db.execute("SELECT 1") + + assert "closed" in str(e.value) + + +def test_init_db_command(runner, monkeypatch): + class Recorder: + called = False + + def fake_init_db(): + Recorder.called = True + + monkeypatch.setattr("flaskr.db.init_db", fake_init_db) + result = runner.invoke(args=["init-db"]) + assert "Initialized" in result.output + assert Recorder.called diff --git a/testbed/pallets__flask/src/flask/__init__.py b/testbed/pallets__flask/src/flask/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6987709669cff46cd4baef9c8512f1f54dddafe --- /dev/null +++ b/testbed/pallets__flask/src/flask/__init__.py @@ -0,0 +1,45 @@ +from markupsafe import escape +from markupsafe import Markup +from werkzeug.exceptions import abort as abort +from werkzeug.utils import redirect as redirect + +from . import json as json +from .app import Flask as Flask +from .app import Request as Request +from .app import Response as Response +from .blueprints import Blueprint as Blueprint +from .config import Config as Config +from .ctx import after_this_request as after_this_request +from .ctx import copy_current_request_context as copy_current_request_context +from .ctx import has_app_context as has_app_context +from .ctx import has_request_context as has_request_context +from .globals import _app_ctx_stack as _app_ctx_stack +from .globals import _request_ctx_stack as _request_ctx_stack +from .globals import current_app as current_app +from .globals import g as g +from .globals import request as request +from .globals import session as session +from .helpers import flash as flash +from .helpers import get_flashed_messages as get_flashed_messages +from .helpers import get_template_attribute as get_template_attribute +from .helpers import make_response as make_response +from .helpers import send_file as send_file +from .helpers import send_from_directory as send_from_directory +from .helpers import stream_with_context as stream_with_context +from .helpers import url_for as url_for +from .json import jsonify as jsonify +from .signals import appcontext_popped as appcontext_popped +from .signals import appcontext_pushed as appcontext_pushed +from .signals import appcontext_tearing_down as appcontext_tearing_down +from .signals import before_render_template as before_render_template +from .signals import got_request_exception as got_request_exception +from .signals import message_flashed as message_flashed +from .signals import request_finished as request_finished +from .signals import request_started as request_started +from .signals import request_tearing_down as request_tearing_down +from .signals import signals_available as signals_available +from .signals import template_rendered as template_rendered +from .templating import render_template as render_template +from .templating import render_template_string as render_template_string + +__version__ = "2.1.0.dev0" diff --git a/testbed/pallets__flask/src/flask/__main__.py b/testbed/pallets__flask/src/flask/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..4e28416e104515e90fca4b69cc60d0c61fd15d61 --- /dev/null +++ b/testbed/pallets__flask/src/flask/__main__.py @@ -0,0 +1,3 @@ +from .cli import main + +main() diff --git a/testbed/pallets__flask/src/flask/json/tag.py b/testbed/pallets__flask/src/flask/json/tag.py new file mode 100644 index 0000000000000000000000000000000000000000..97f365a9b090e2ca1779445c022be134583547d6 --- /dev/null +++ b/testbed/pallets__flask/src/flask/json/tag.py @@ -0,0 +1,312 @@ +""" +Tagged JSON +~~~~~~~~~~~ + +A compact representation for lossless serialization of non-standard JSON +types. :class:`~flask.sessions.SecureCookieSessionInterface` uses this +to serialize the session data, but it may be useful in other places. It +can be extended to support other types. + +.. autoclass:: TaggedJSONSerializer + :members: + +.. autoclass:: JSONTag + :members: + +Let's see an example that adds support for +:class:`~collections.OrderedDict`. Dicts don't have an order in JSON, so +to handle this we will dump the items as a list of ``[key, value]`` +pairs. Subclass :class:`JSONTag` and give it the new key ``' od'`` to +identify the type. The session serializer processes dicts first, so +insert the new tag at the front of the order since ``OrderedDict`` must +be processed before ``dict``. + +.. code-block:: python + + from flask.json.tag import JSONTag + + class TagOrderedDict(JSONTag): + __slots__ = ('serializer',) + key = ' od' + + def check(self, value): + return isinstance(value, OrderedDict) + + def to_json(self, value): + return [[k, self.serializer.tag(v)] for k, v in iteritems(value)] + + def to_python(self, value): + return OrderedDict(value) + + app.session_interface.serializer.register(TagOrderedDict, index=0) +""" +import typing as t +from base64 import b64decode +from base64 import b64encode +from datetime import datetime +from uuid import UUID + +from markupsafe import Markup +from werkzeug.http import http_date +from werkzeug.http import parse_date + +from ..json import dumps +from ..json import loads + + +class JSONTag: + """Base class for defining type tags for :class:`TaggedJSONSerializer`.""" + + __slots__ = ("serializer",) + + #: The tag to mark the serialized object with. If ``None``, this tag is + #: only used as an intermediate step during tagging. + key: t.Optional[str] = None + + def __init__(self, serializer: "TaggedJSONSerializer") -> None: + """Create a tagger for the given serializer.""" + self.serializer = serializer + + def check(self, value: t.Any) -> bool: + """Check if the given value should be tagged by this tag.""" + raise NotImplementedError + + def to_json(self, value: t.Any) -> t.Any: + """Convert the Python object to an object that is a valid JSON type. + The tag will be added later.""" + raise NotImplementedError + + def to_python(self, value: t.Any) -> t.Any: + """Convert the JSON representation back to the correct type. The tag + will already be removed.""" + raise NotImplementedError + + def tag(self, value: t.Any) -> t.Any: + """Convert the value to a valid JSON type and add the tag structure + around it.""" + return {self.key: self.to_json(value)} + + +class TagDict(JSONTag): + """Tag for 1-item dicts whose only key matches a registered tag. + + Internally, the dict key is suffixed with `__`, and the suffix is removed + when deserializing. + """ + + __slots__ = () + key = " di" + + def check(self, value: t.Any) -> bool: + return ( + isinstance(value, dict) + and len(value) == 1 + and next(iter(value)) in self.serializer.tags + ) + + def to_json(self, value: t.Any) -> t.Any: + key = next(iter(value)) + return {f"{key}__": self.serializer.tag(value[key])} + + def to_python(self, value: t.Any) -> t.Any: + key = next(iter(value)) + return {key[:-2]: value[key]} + + +class PassDict(JSONTag): + __slots__ = () + + def check(self, value: t.Any) -> bool: + return isinstance(value, dict) + + def to_json(self, value: t.Any) -> t.Any: + # JSON objects may only have string keys, so don't bother tagging the + # key here. + return {k: self.serializer.tag(v) for k, v in value.items()} + + tag = to_json + + +class TagTuple(JSONTag): + __slots__ = () + key = " t" + + def check(self, value: t.Any) -> bool: + return isinstance(value, tuple) + + def to_json(self, value: t.Any) -> t.Any: + return [self.serializer.tag(item) for item in value] + + def to_python(self, value: t.Any) -> t.Any: + return tuple(value) + + +class PassList(JSONTag): + __slots__ = () + + def check(self, value: t.Any) -> bool: + return isinstance(value, list) + + def to_json(self, value: t.Any) -> t.Any: + return [self.serializer.tag(item) for item in value] + + tag = to_json + + +class TagBytes(JSONTag): + __slots__ = () + key = " b" + + def check(self, value: t.Any) -> bool: + return isinstance(value, bytes) + + def to_json(self, value: t.Any) -> t.Any: + return b64encode(value).decode("ascii") + + def to_python(self, value: t.Any) -> t.Any: + return b64decode(value) + + +class TagMarkup(JSONTag): + """Serialize anything matching the :class:`~markupsafe.Markup` API by + having a ``__html__`` method to the result of that method. Always + deserializes to an instance of :class:`~markupsafe.Markup`.""" + + __slots__ = () + key = " m" + + def check(self, value: t.Any) -> bool: + return callable(getattr(value, "__html__", None)) + + def to_json(self, value: t.Any) -> t.Any: + return str(value.__html__()) + + def to_python(self, value: t.Any) -> t.Any: + return Markup(value) + + +class TagUUID(JSONTag): + __slots__ = () + key = " u" + + def check(self, value: t.Any) -> bool: + return isinstance(value, UUID) + + def to_json(self, value: t.Any) -> t.Any: + return value.hex + + def to_python(self, value: t.Any) -> t.Any: + return UUID(value) + + +class TagDateTime(JSONTag): + __slots__ = () + key = " d" + + def check(self, value: t.Any) -> bool: + return isinstance(value, datetime) + + def to_json(self, value: t.Any) -> t.Any: + return http_date(value) + + def to_python(self, value: t.Any) -> t.Any: + return parse_date(value) + + +class TaggedJSONSerializer: + """Serializer that uses a tag system to compactly represent objects that + are not JSON types. Passed as the intermediate serializer to + :class:`itsdangerous.Serializer`. + + The following extra types are supported: + + * :class:`dict` + * :class:`tuple` + * :class:`bytes` + * :class:`~markupsafe.Markup` + * :class:`~uuid.UUID` + * :class:`~datetime.datetime` + """ + + __slots__ = ("tags", "order") + + #: Tag classes to bind when creating the serializer. Other tags can be + #: added later using :meth:`~register`. + default_tags = [ + TagDict, + PassDict, + TagTuple, + PassList, + TagBytes, + TagMarkup, + TagUUID, + TagDateTime, + ] + + def __init__(self) -> None: + self.tags: t.Dict[str, JSONTag] = {} + self.order: t.List[JSONTag] = [] + + for cls in self.default_tags: + self.register(cls) + + def register( + self, + tag_class: t.Type[JSONTag], + force: bool = False, + index: t.Optional[int] = None, + ) -> None: + """Register a new tag with this serializer. + + :param tag_class: tag class to register. Will be instantiated with this + serializer instance. + :param force: overwrite an existing tag. If false (default), a + :exc:`KeyError` is raised. + :param index: index to insert the new tag in the tag order. Useful when + the new tag is a special case of an existing tag. If ``None`` + (default), the tag is appended to the end of the order. + + :raise KeyError: if the tag key is already registered and ``force`` is + not true. + """ + tag = tag_class(self) + key = tag.key + + if key is not None: + if not force and key in self.tags: + raise KeyError(f"Tag '{key}' is already registered.") + + self.tags[key] = tag + + if index is None: + self.order.append(tag) + else: + self.order.insert(index, tag) + + def tag(self, value: t.Any) -> t.Dict[str, t.Any]: + """Convert a value to a tagged representation if necessary.""" + for tag in self.order: + if tag.check(value): + return tag.tag(value) + + return value + + def untag(self, value: t.Dict[str, t.Any]) -> t.Any: + """Convert a tagged representation back to the original type.""" + if len(value) != 1: + return value + + key = next(iter(value)) + + if key not in self.tags: + return value + + return self.tags[key].to_python(value[key]) + + def dumps(self, value: t.Any) -> str: + """Tag the value and dump it to a compact JSON string.""" + return dumps(self.tag(value), separators=(",", ":")) + + def loads(self, value: str) -> t.Any: + """Load data from a JSON string and deserialized any tagged objects.""" + return loads(value, object_hook=self.untag)