hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5b0b8d51773c98d175378fac706b8fb44b0e22c0
| 4,396
|
py
|
Python
|
dpctl/tests/test_sycl_device.py
|
vlad-perevezentsev/dpctl
|
f4a281f821c238ce1f6d1cef083b1f51a95b0ead
|
[
"Apache-2.0"
] | null | null | null |
dpctl/tests/test_sycl_device.py
|
vlad-perevezentsev/dpctl
|
f4a281f821c238ce1f6d1cef083b1f51a95b0ead
|
[
"Apache-2.0"
] | null | null | null |
dpctl/tests/test_sycl_device.py
|
vlad-perevezentsev/dpctl
|
f4a281f821c238ce1f6d1cef083b1f51a95b0ead
|
[
"Apache-2.0"
] | null | null | null |
# ===------------- test_sycl_device.py - dpctl -------*- Python -*---------===#
#
# Data Parallel Control (dpctl)
#
# Copyright 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ===-----------------------------------------------------------------------===#
#
# \file
# Defines unit test cases for the SyclDevice classes defined in sycl_core.pyx.
# ===-----------------------------------------------------------------------===#
import dpctl
import unittest
@unittest.skipIf(not dpctl.has_sycl_platforms(), "No SYCL platforms available")
class TestSyclDevice(unittest.TestCase):
def test_get_max_compute_units(self):
try:
q = dpctl.get_current_queue()
except Exception:
self.fail("Encountered an exception inside get_current_queue().")
try:
max_compute_units = q.get_sycl_device().get_max_compute_units()
except Exception:
self.fail("Encountered an exception inside get_max_compute_units().")
self.assertTrue(max_compute_units > 0)
def test_get_max_work_item_dims(self):
try:
q = dpctl.get_current_queue()
except Exception:
self.fail("Encountered an exception inside get_current_queue().")
try:
max_work_item_dims = q.get_sycl_device().get_max_work_item_dims()
except Exception:
self.fail("Encountered an exception inside get_max_work_item_dims().")
self.assertTrue(max_work_item_dims > 0)
def test_get_max_work_item_sizes(self):
try:
q = dpctl.get_current_queue()
except Exception:
self.fail("Encountered an exception inside get_current_queue().")
try:
max_work_item_sizes = q.get_sycl_device().get_max_work_item_sizes()
except Exception:
self.fail("Encountered an exception inside get_max_work_item_sizes().")
self.assertNotEqual(max_work_item_sizes, (None, None, None))
def test_get_max_work_group_size(self):
try:
q = dpctl.get_current_queue()
except Exception:
self.fail("Encountered an exception inside get_current_queue().")
try:
max_work_group_size = q.get_sycl_device().get_max_work_group_size()
except Exception:
self.fail("Encountered an exception inside get_max_work_group_size().")
self.assertTrue(max_work_group_size > 0)
def test_get_max_num_sub_groups(self):
try:
q = dpctl.get_current_queue()
except Exception:
self.fail("Encountered an exception inside get_current_queue().")
try:
max_num_sub_groups = q.get_sycl_device().get_max_num_sub_groups()
except Exception:
self.fail("Encountered an exception inside get_max_num_sub_groups().")
self.assertTrue(max_num_sub_groups > 0)
def test_has_int64_base_atomics(self):
try:
q = dpctl.get_current_queue()
except Exception:
self.fail("Encountered an exception inside get_current_queue().")
try:
aspects_base_atomics = q.get_sycl_device().has_int64_base_atomics()
except Exception:
self.fail("Encountered an exception inside has_int64_base_atomics().")
self.assertNotEqual(aspects_base_atomics, False)
def test_has_int64_extended_atomics(self):
try:
q = dpctl.get_current_queue()
except Exception:
self.fail("Encountered an exception inside get_current_queue().")
try:
aspects_extended_atomics = q.get_sycl_device().has_int64_extended_atomics()
except Exception:
self.fail("Encountered an exception inside has_int64_extended_atomics().")
self.assertNotEqual(aspects_extended_atomics, False)
if __name__ == "__main__":
unittest.main()
| 39.603604
| 87
| 0.644904
|
ba2ad62a8619225536a28a50b9cd4e79706e688b
| 11,492
|
py
|
Python
|
tools/openblas_support.py
|
LaurynasMiksys/scipy
|
bfc49e336c102269521eed6a6c3c48963ac33897
|
[
"BSD-3-Clause"
] | 1
|
2020-07-22T17:29:25.000Z
|
2020-07-22T17:29:25.000Z
|
tools/openblas_support.py
|
LaurynasMiksys/scipy
|
bfc49e336c102269521eed6a6c3c48963ac33897
|
[
"BSD-3-Clause"
] | null | null | null |
tools/openblas_support.py
|
LaurynasMiksys/scipy
|
bfc49e336c102269521eed6a6c3c48963ac33897
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
import glob
import shutil
import textwrap
import platform
import hashlib
from tempfile import mkstemp, gettempdir
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import zipfile
import tarfile
OPENBLAS_V = 'v0.3.9'
OPENBLAS_LONG = 'v0.3.9'
BASE_LOC = ''
ANACONDA = 'https://anaconda.org/multibuild-wheels-staging/openblas-libs'
ARCHITECTURES = ['', 'windows', 'darwin', 'aarch64', 'x86', 'ppc64le', 's390x']
sha256_vals = {
'openblas64_-v0.3.9-macosx_10_9_x86_64-gf_1becaaa.tar.gz':
'53f606a7da75d390287f1c51b2af7866b8fe7553a26d2474f827daf0e5c8a886',
'openblas64_-v0.3.9-manylinux1_x86_64.tar.gz':
'6fe5b1e2a4baa16833724bcc94a80b22e9c99fc1b9a2ddbce4f1f82a8002d906',
'openblas64_-v0.3.9-win_amd64-gcc_7_1_0.zip':
'15d24a66c5b22cc7b3120e831658f491c7a063804c33813235044a6f8b56686d',
'openblas-v0.3.9-macosx_10_9_x86_64-gf_1becaaa.tar.gz':
'8221397b9cfb8cb22f3efb7f228ef901e13f9fd89c7d7d0cb7b8a79b0610bf33',
'openblas-v0.3.9-manylinux1_i686.tar.gz':
'31abf8eccb697a320a998ce0f59045edc964602f815d78690c5a23839819261c',
'openblas-v0.3.9-manylinux1_x86_64.tar.gz':
'd9c39acbafae9b1daef19c2738ec938109a59e9322f93eb9a3c50869d220deff',
'openblas-v0.3.9-win32-gcc_7_1_0.zip':
'69a7dc265e8a8e45b358637d11cb1710ce88c4456634c7ce37d429b1d9bc9aaa',
'openblas-v0.3.9-win_amd64-gcc_7_1_0.zip':
'0cea06f4a2afebaa6255854f73f237802fc6b58eaeb1a8b1c22d87cc399e0d48'
}
IS_32BIT = sys.maxsize < 2**32
def get_arch():
if platform.system() == 'Windows':
ret = 'windows'
elif platform.system() == 'Darwin':
ret = 'darwin'
else:
ret = platform.uname().machine
# What do 32 bit machines report?
# If they are a docker, they report x86_64 or i686
if 'x86' in ret or ret == 'i686':
ret = 'x86'
assert ret in ARCHITECTURES
return ret
def get_ilp64():
if os.environ.get("NPY_USE_BLAS_ILP64", "0") == "0":
return None
if IS_32BIT:
raise RuntimeError("NPY_USE_BLAS_ILP64 set on 32-bit arch")
return "64_"
def download_openblas(target, arch, ilp64):
fnsuffix = {None: "", "64_": "64_"}[ilp64]
filename = ''
if arch in ('aarch64', 'ppc64le', 's390x'):
suffix = f'manylinux2014_{arch}.tar.gz'
filename = f'{ANACONDA}/{OPENBLAS_LONG}/download/openblas{fnsuffix}-{OPENBLAS_LONG}-{suffix}'
typ = 'tar.gz'
typ = 'tar.gz'
elif arch == 'darwin':
suffix = 'macosx_10_9_x86_64-gf_1becaaa.tar.gz'
filename = f'{ANACONDA}/{OPENBLAS_LONG}/download/openblas{fnsuffix}-{OPENBLAS_LONG}-{suffix}'
typ = 'tar.gz'
elif arch == 'windows':
if IS_32BIT:
suffix = 'win32-gcc_7_1_0.zip'
else:
suffix = 'win_amd64-gcc_7_1_0.zip'
filename = f'{ANACONDA}/{OPENBLAS_LONG}/download/openblas{fnsuffix}-{OPENBLAS_LONG}-{suffix}'
typ = 'zip'
elif 'x86' in arch:
if IS_32BIT:
suffix = 'manylinux1_i686.tar.gz'
else:
suffix = 'manylinux1_x86_64.tar.gz'
filename = f'{ANACONDA}/{OPENBLAS_LONG}/download/openblas{fnsuffix}-{OPENBLAS_LONG}-{suffix}'
typ = 'tar.gz'
if not filename:
return None
print("Downloading:", filename, file=sys.stderr)
try:
with open(target, 'wb') as fid:
# anaconda.org download location guards against
# scraping so trick it with a fake browser header
# see: https://medium.com/@speedforcerun/python-crawler-http-error-403-forbidden-1623ae9ba0f
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.3'}
req = Request(url=filename, headers=headers)
fid.write(urlopen(req).read())
with open(target, 'rb') as binary_to_check:
data = binary_to_check.read()
sha256_returned = hashlib.sha256(data).hexdigest()
sha256_expected = sha256_vals[os.path.basename(filename)]
if sha256_returned != sha256_expected:
raise ValueError('sha256 hash mismatch for downloaded OpenBLAS')
except HTTPError as e:
print(f'Could not download "{filename}"')
print(f'Error message: {e}')
return None
return typ
def setup_openblas(arch=get_arch(), ilp64=get_ilp64()):
'''
Download and setup an openblas library for building. If successful,
the configuration script will find it automatically.
Returns
-------
msg : str
path to extracted files on success, otherwise indicates what went wrong
To determine success, do ``os.path.exists(msg)``
'''
_, tmp = mkstemp()
if not arch:
raise ValueError('unknown architecture')
typ = download_openblas(tmp, arch, ilp64)
if not typ:
return ''
if arch == 'windows':
if not typ == 'zip':
return 'expecting to download zipfile on windows, not %s' % str(typ)
return unpack_windows_zip(tmp)
else:
if not typ == 'tar.gz':
return 'expecting to download tar.gz, not %s' % str(typ)
return unpack_targz(tmp)
def unpack_windows_zip(fname):
with zipfile.ZipFile(fname, 'r') as zf:
# Get the openblas.a file, but not openblas.dll.a nor openblas.dev.a
lib = [x for x in zf.namelist() if OPENBLAS_LONG in x and
x.endswith('a') and not x.endswith('dll.a') and
not x.endswith('dev.a')]
if not lib:
return 'could not find libopenblas_%s*.a ' \
'in downloaded zipfile' % OPENBLAS_LONG
target = os.path.join(gettempdir(), 'openblas.a')
with open(target, 'wb') as fid:
fid.write(zf.read(lib[0]))
return target
def unpack_targz(fname):
target = os.path.join(gettempdir(), 'openblas')
if not os.path.exists(target):
os.mkdir(target)
with tarfile.open(fname, 'r') as zf:
# Strip common prefix from paths when unpacking
prefix = os.path.commonpath(zf.getnames())
extract_tarfile_to(zf, target, prefix)
return target
def extract_tarfile_to(tarfileobj, target_path, archive_path):
"""Extract TarFile contents under archive_path/ to target_path/"""
target_path = os.path.abspath(target_path)
def get_members():
for member in tarfileobj.getmembers():
if archive_path:
norm_path = os.path.normpath(member.name)
if norm_path.startswith(archive_path + os.path.sep):
member.name = norm_path[len(archive_path)+1:]
else:
continue
dst_path = os.path.abspath(os.path.join(target_path, member.name))
if os.path.commonpath([target_path, dst_path]) != target_path:
# Path not under target_path, probably contains ../
continue
yield member
tarfileobj.extractall(target_path, members=get_members())
def make_init(dirname):
'''
Create a _distributor_init.py file for OpenBlas
'''
with open(os.path.join(dirname, '_distributor_init.py'), 'wt') as fid:
fid.write(textwrap.dedent("""
'''
Helper to preload windows dlls to prevent dll not found errors.
Once a DLL is preloaded, its namespace is made available to any
subsequent DLL. This file originated in the numpy-wheels repo,
and is created as part of the scripts that build the wheel.
'''
import os
from ctypes import WinDLL
import glob
if os.name == 'nt':
# convention for storing / loading the DLL from
# numpy/.libs/, if present
try:
basedir = os.path.dirname(__file__)
except:
pass
else:
libs_dir = os.path.abspath(os.path.join(basedir, '.libs'))
DLL_filenames = []
if os.path.isdir(libs_dir):
for filename in glob.glob(os.path.join(libs_dir,
'*openblas*dll')):
# NOTE: would it change behavior to load ALL
# DLLs at this path vs. the name restriction?
WinDLL(os.path.abspath(filename))
DLL_filenames.append(filename)
if len(DLL_filenames) > 1:
import warnings
warnings.warn("loaded more than 1 DLL from .libs:\\n%s" %
"\\n".join(DLL_filenames),
stacklevel=1)
"""))
def test_setup(arches):
'''
Make sure all the downloadable files exist and can be opened
'''
def items():
for arch in arches:
yield arch, None
if arch in ('x86', 'darwin', 'windows'):
yield arch, '64_'
for arch, ilp64 in items():
if arch == '':
continue
target = None
try:
try:
target = setup_openblas(arch, ilp64)
except:
print(f'Could not setup {arch}')
raise
if not target:
raise RuntimeError(f'Could not setup {arch}')
print(target)
if arch == 'windows':
if not target.endswith('.a'):
raise RuntimeError("Not .a extracted!")
else:
files = glob.glob(os.path.join(target, "lib", "*.a"))
if not files:
raise RuntimeError("No lib/*.a unpacked!")
finally:
if target is not None:
if os.path.isfile(target):
os.unlink(target)
else:
shutil.rmtree(target)
def test_version(expected_version, ilp64=get_ilp64()):
"""
Assert that expected OpenBLAS version is
actually available via SciPy
"""
import scipy
import scipy.linalg
import ctypes
dll = ctypes.CDLL(scipy.linalg.cython_blas.__file__)
if ilp64 == "64_":
get_config = dll.openblas_get_config64_
else:
get_config = dll.openblas_get_config
get_config.restype=ctypes.c_char_p
res = get_config()
print('OpenBLAS get_config returned', str(res))
check_str = b'OpenBLAS %s' % expected_version[0].encode()
assert check_str in res
if 'dev' not in expected_version[0]:
assert b'dev' not in res
if ilp64:
assert b"USE64BITINT" in res
else:
assert b"USE64BITINT" not in res
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Download and expand an OpenBLAS archive for this ' \
'architecture')
parser.add_argument('--test', nargs='*', default=None,
help='Test different architectures. "all", or any of %s' % ARCHITECTURES)
parser.add_argument('--check_version', nargs=1, default=None,
help='Check provided OpenBLAS version string against available OpenBLAS')
args = parser.parse_args()
if args.check_version is not None:
test_version(args.check_version)
elif args.test is None:
print(setup_openblas())
else:
if len(args.test) == 0 or 'all' in args.test:
test_setup(ARCHITECTURES)
else:
test_setup(args.test)
| 37.311688
| 139
| 0.60503
|
8d624edeb8316c35d38d092c13c00547fd069259
| 1,844
|
py
|
Python
|
tests/gmprocess/subcommands/assemble_test.py
|
meramossepu1/groundmotion-processing
|
5cc19023b94e5b5b718590ce8cd05a22a4088a67
|
[
"Unlicense"
] | 54
|
2019-01-12T02:05:38.000Z
|
2022-03-29T19:43:56.000Z
|
tests/gmprocess/subcommands/assemble_test.py
|
meramossepu1/groundmotion-processing
|
5cc19023b94e5b5b718590ce8cd05a22a4088a67
|
[
"Unlicense"
] | 700
|
2018-12-18T19:44:31.000Z
|
2022-03-30T20:54:28.000Z
|
tests/gmprocess/subcommands/assemble_test.py
|
meramossepu1/groundmotion-processing
|
5cc19023b94e5b5b718590ce8cd05a22a4088a67
|
[
"Unlicense"
] | 41
|
2018-11-29T23:17:56.000Z
|
2022-03-31T04:04:23.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import shutil
import pkg_resources
from gmprocess.utils import constants
def test_assemble(script_runner):
try:
# Need to create profile first.
cdir = constants.PROJECTS_PATH_TEST
ddir = pkg_resources.resource_filename(
"gmprocess", os.path.join("data", "testdata", "demo")
)
setup_inputs = io.StringIO(
"2\ntest\n%s\n%s\nname\ntest@email.com\n" % (cdir, ddir)
)
ret = script_runner.run("gmrecords", "projects", "-c", stdin=setup_inputs)
setup_inputs.close()
assert ret.success
ret = script_runner.run("gmrecords", "assemble", "-h")
assert ret.success
ret = script_runner.run("gmrecords", "assemble")
assert ret.success
ret = script_runner.run("gmrecords", "assemble", "-e", "ci38457511", "-o")
assert ret.success
ret = script_runner.run("gmrecords", "assemble", "-n", "2", "-o")
assert ret.success
events = ["ci38457511", "ci38038071"]
out_names = ["workspace.h5"]
for event in events:
for outname in out_names:
dfile = os.path.join(ddir, event, outname)
print(dfile)
assert os.path.isfile(dfile)
except Exception as ex:
raise ex
finally:
shutil.rmtree(constants.PROJECTS_PATH_TEST)
# Remove workspace and image files
pattern = ["workspace.h5", ".png"]
for root, _, files in os.walk(ddir):
for file in files:
if any(file.endswith(ext) for ext in pattern):
os.remove(os.path.join(root, file))
# rmdir = os.path.join(ddir, 'usp000a1b0')
# shutil.rmtree(rmdir)
if __name__ == "__main__":
test_assemble()
| 29.741935
| 82
| 0.583514
|
862c40c526a8bc2ad1faa1286ab55e03c40634d7
| 29,936
|
py
|
Python
|
astropy/coordinates/sky_coordinate_parsers.py
|
ycopin/astropy
|
4c29ecd4239fc2e8f62780a9bbbeacec8e592461
|
[
"BSD-3-Clause"
] | null | null | null |
astropy/coordinates/sky_coordinate_parsers.py
|
ycopin/astropy
|
4c29ecd4239fc2e8f62780a9bbbeacec8e592461
|
[
"BSD-3-Clause"
] | 1
|
2018-11-14T14:18:55.000Z
|
2020-01-21T10:36:05.000Z
|
astropy/coordinates/sky_coordinate_parsers.py
|
ycopin/astropy
|
4c29ecd4239fc2e8f62780a9bbbeacec8e592461
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
from collections.abc import Sequence
import inspect
import numpy as np
from ..units import Unit, IrreducibleUnit
from .. import units as u
from .baseframe import (BaseCoordinateFrame, frame_transform_graph,
_get_repr_cls, _get_diff_cls,
_normalize_representation_type)
from .builtin_frames import ICRS
from .representation import (BaseRepresentation, SphericalRepresentation,
UnitSphericalRepresentation)
"""
This module contains utility functions to make the SkyCoord initializer more modular
and maintainable. No functionality here should be in the public API, but rather used as
part of creating SkyCoord objects.
"""
PLUS_MINUS_RE = re.compile(r'(\+|\-)')
J_PREFIXED_RA_DEC_RE = re.compile(
r"""J # J prefix
([0-9]{6,7}\.?[0-9]{0,2}) # RA as HHMMSS.ss or DDDMMSS.ss, optional decimal digits
([\+\-][0-9]{6}\.?[0-9]{0,2})\s*$ # Dec as DDMMSS.ss, optional decimal digits
""", re.VERBOSE)
def _get_frame_class(frame):
"""
Get a frame class from the input `frame`, which could be a frame name
string, or frame class.
"""
if isinstance(frame, str):
frame_names = frame_transform_graph.get_names()
if frame not in frame_names:
raise ValueError('Coordinate frame name "{0}" is not a known '
'coordinate frame ({1})'
.format(frame, sorted(frame_names)))
frame_cls = frame_transform_graph.lookup_name(frame)
elif inspect.isclass(frame) and issubclass(frame, BaseCoordinateFrame):
frame_cls = frame
else:
raise ValueError("Coordinate frame must be a frame name or frame "
"class, not a '{0}'".format(frame.__class__.__name__))
return frame_cls
_conflict_err_msg = ("Coordinate attribute '{0}'={1!r} conflicts with keyword "
"argument '{0}'={2!r}. This usually means an attribute "
"was set on one of the input objects and also in the "
"keyword arguments to {3}")
def _get_frame_without_data(args, kwargs):
"""
Determines the coordinate frame from input SkyCoord args and kwargs.
This function extracts (removes) all frame attributes from the kwargs and
determines the frame class either using the kwargs, or using the first
element in the args (if a single frame object is passed in, for example).
This function allows a frame to be specified as a string like 'icrs' or a
frame class like ICRS, or an instance ICRS(), as long as the instance frame
attributes don't conflict with kwargs passed in (which could require a
three-way merge with the coordinate data possibly specified via the args).
"""
from .sky_coordinate import SkyCoord
# We eventually (hopefully) fill and return these by extracting the frame
# and frame attributes from the input:
frame_cls = None
frame_cls_kwargs = {}
# The first place to check: the frame could be specified explicitly
frame = kwargs.pop('frame', None)
if frame is not None:
# Here the frame was explicitly passed in as a keyword argument.
# If the frame is an instance or SkyCoord, we extract the attributes
# and split the instance into the frame class and an attributes dict
if isinstance(frame, SkyCoord):
# If the frame was passed as a SkyCoord, we also want to preserve
# any extra attributes (e.g., obstime) if they are not already
# specified in the kwargs. We preserve these extra attributes by
# adding them to the kwargs dict:
for attr in frame._extra_frameattr_names:
if (attr in kwargs and
np.any(getattr(frame, attr) != kwargs[attr])):
# This SkyCoord attribute passed in with the frame= object
# conflicts with an attribute passed in directly to the
# SkyCoord initializer as a kwarg:
raise ValueError(_conflict_err_msg
.format(attr, getattr(frame, attr),
kwargs[attr], 'SkyCoord'))
else:
kwargs[attr] = getattr(frame, attr)
frame = frame.frame
if isinstance(frame, BaseCoordinateFrame):
# Extract any frame attributes
for attr in frame.get_frame_attr_names():
# If the frame was specified as an instance, we have to make
# sure that no frame attributes were specified as kwargs - this
# would require a potential three-way merge:
if attr in kwargs:
raise ValueError("Cannot specify frame attribute '{0}' "
"directly as an argument to SkyCoord "
"because a frame instance was passed in. "
"Either pass a frame class, or modify the "
"frame attributes of the input frame "
"instance.".format(attr))
elif not frame.is_frame_attr_default(attr):
kwargs[attr] = getattr(frame, attr)
frame_cls = frame.__class__
# Make sure we propagate representation/differential _type choices,
# unless these are specified directly in the kwargs:
kwargs.setdefault('representation_type', frame.representation_type)
kwargs.setdefault('differential_type', frame.differential_type)
if frame_cls is None: # frame probably a string
frame_cls = _get_frame_class(frame)
# Check that the new frame doesn't conflict with existing coordinate frame
# if a coordinate is supplied in the args list. If the frame still had not
# been set by this point and a coordinate was supplied, then use that frame.
for arg in args:
# this catches the "single list passed in" case. For that case we want
# to allow the first argument to set the class. That's OK because
# _parse_coordinate_arg goes and checks that the frames match between
# the first and all the others
if (isinstance(arg, (Sequence, np.ndarray)) and
len(args) == 1 and len(arg) > 0):
arg = arg[0]
coord_frame_obj = coord_frame_cls = None
if isinstance(arg, BaseCoordinateFrame):
coord_frame_obj = arg
elif isinstance(arg, SkyCoord):
coord_frame_obj = arg.frame
if coord_frame_obj is not None:
coord_frame_cls = coord_frame_obj.__class__
frame_diff = coord_frame_obj.get_representation_cls('s')
if frame_diff is not None:
# we do this check because otherwise if there's no default
# differential (i.e. it is None), the code below chokes. but
# None still gets through if the user *requests* it
kwargs.setdefault('differential_type', frame_diff)
for attr in coord_frame_obj.get_frame_attr_names():
if (attr in kwargs and
not coord_frame_obj.is_frame_attr_default(attr) and
np.any(kwargs[attr] != getattr(coord_frame_obj, attr))):
raise ValueError("Frame attribute '{0}' has conflicting "
"values between the input coordinate data "
"and either keyword arguments or the "
"frame specification (frame=...): "
"{1} =/= {2}"
.format(attr,
getattr(coord_frame_obj, attr),
kwargs[attr]))
elif (attr not in kwargs and
not coord_frame_obj.is_frame_attr_default(attr)):
kwargs[attr] = getattr(coord_frame_obj, attr)
if coord_frame_cls is not None:
if frame_cls is None:
frame_cls = coord_frame_cls
elif frame_cls is not coord_frame_cls:
raise ValueError("Cannot override frame='{0}' of input "
"coordinate with new frame='{1}'. Instead, "
"transform the coordinate."
.format(coord_frame_cls.__name__,
frame_cls.__name__))
if frame_cls is None:
frame_cls = ICRS
# By now, frame_cls should be set - if it's not, something went wrong
if not issubclass(frame_cls, BaseCoordinateFrame):
# We should hopefully never get here...
raise ValueError('Frame class has unexpected type: {0}'
.format(frame_cls.__name__))
for attr in frame_cls.frame_attributes:
if attr in kwargs:
frame_cls_kwargs[attr] = kwargs.pop(attr)
# TODO: deprecate representation, remove this in future
_normalize_representation_type(kwargs)
if 'representation_type' in kwargs:
frame_cls_kwargs['representation_type'] = _get_repr_cls(
kwargs.pop('representation_type'))
differential_type = kwargs.pop('differential_type', None)
if differential_type is not None:
frame_cls_kwargs['differential_type'] = _get_diff_cls(
differential_type)
return frame_cls, frame_cls_kwargs
def _parse_coordinate_data(frame, args, kwargs):
"""
Extract coordinate data from the args and kwargs passed to SkyCoord.
By this point, we assume that all of the frame attributes have been
extracted from kwargs (see _get_frame_without_data()), so all that are left
are (1) extra SkyCoord attributes, and (2) the coordinate data, specified in
any of the valid ways.
"""
valid_skycoord_kwargs = {}
valid_components = {}
info = None
# Look through the remaining kwargs to see if any are valid attribute names
# by asking the frame transform graph:
attr_names = list(kwargs.keys())
for attr in attr_names:
if attr in frame_transform_graph.frame_attributes:
valid_skycoord_kwargs[attr] = kwargs.pop(attr)
# By this point in parsing the arguments, anything left in the args and
# kwargs should be data. Either as individual components, or a list of
# objects, or a representation, etc.
# Get units of components
units = _get_representation_component_units(args, kwargs)
# Grab any frame-specific attr names like `ra` or `l` or `distance` from
# kwargs and move them to valid_components.
valid_components.update(_get_representation_attrs(frame, units, kwargs))
# Error if anything is still left in kwargs
if kwargs:
# The next few lines add a more user-friendly error message to a
# common and confusing situation when the user specifies, e.g.,
# `pm_ra` when they really should be passing `pm_ra_cosdec`. The
# extra error should only turn on when the positional representation
# is spherical, and when the component 'pm_<lon>' is passed.
pm_message = ''
if frame.representation_type == SphericalRepresentation:
frame_names = list(frame.get_representation_component_names().keys())
lon_name = frame_names[0]
lat_name = frame_names[1]
if 'pm_{0}'.format(lon_name) in list(kwargs.keys()):
pm_message = ('\n\n By default, most frame classes expect '
'the longitudinal proper motion to include '
'the cos(latitude) term, named '
'`pm_{0}_cos{1}`. Did you mean to pass in '
'this component?'
.format(lon_name, lat_name))
raise ValueError('Unrecognized keyword argument(s) {0}{1}'
.format(', '.join("'{0}'".format(key)
for key in kwargs),
pm_message))
# Finally deal with the unnamed args. This figures out what the arg[0]
# is and returns a dict with appropriate key/values for initializing
# frame class. Note that differentials are *never* valid args, only
# kwargs. So they are not accounted for here (unless they're in a frame
# or SkyCoord object)
if args:
if len(args) == 1:
# One arg which must be a coordinate. In this case coord_kwargs
# will contain keys like 'ra', 'dec', 'distance' along with any
# frame attributes like equinox or obstime which were explicitly
# specified in the coordinate object (i.e. non-default).
_skycoord_kwargs, _components = _parse_coordinate_arg(
args[0], frame, units, kwargs)
# Copy other 'info' attr only if it has actually been defined.
if 'info' in getattr(args[0], '__dict__', ()):
info = args[0].info
elif len(args) <= 3:
_skycoord_kwargs = {}
_components = {}
frame_attr_names = frame.representation_component_names.keys()
repr_attr_names = frame.representation_component_names.values()
for arg, frame_attr_name, repr_attr_name, unit in zip(args, frame_attr_names,
repr_attr_names, units):
attr_class = frame.representation.attr_classes[repr_attr_name]
_components[frame_attr_name] = attr_class(arg, unit=unit)
else:
raise ValueError('Must supply no more than three positional arguments, got {}'
.format(len(args)))
# The next two loops copy the component and skycoord attribute data into
# their final, respective "valid_" dictionaries. For each, we check that
# there are no relevant conflicts with values specified by the user
# through other means:
# First validate the component data
for attr, coord_value in _components.items():
if attr in valid_components:
raise ValueError(_conflict_err_msg
.format(attr, coord_value,
valid_components[attr], 'SkyCoord'))
valid_components[attr] = coord_value
# Now validate the custom SkyCoord attributes
for attr, value in _skycoord_kwargs.items():
if (attr in valid_skycoord_kwargs and
np.any(valid_skycoord_kwargs[attr] != value)):
raise ValueError(_conflict_err_msg
.format(attr, value,
valid_skycoord_kwargs[attr],
'SkyCoord'))
valid_skycoord_kwargs[attr] = value
return valid_skycoord_kwargs, valid_components, info
def _get_representation_component_units(args, kwargs):
"""
Get the unit from kwargs for the *representation* components (not the
differentials).
"""
if 'unit' not in kwargs:
units = [None, None, None]
else:
units = kwargs.pop('unit')
if isinstance(units, str):
units = [x.strip() for x in units.split(',')]
# Allow for input like unit='deg' or unit='m'
if len(units) == 1:
units = [units[0], units[0], units[0]]
elif isinstance(units, (Unit, IrreducibleUnit)):
units = [units, units, units]
try:
units = [(Unit(x) if x else None) for x in units]
units.extend(None for x in range(3 - len(units)))
if len(units) > 3:
raise ValueError()
except Exception:
raise ValueError('Unit keyword must have one to three unit values as '
'tuple or comma-separated string')
return units
def _parse_coordinate_arg(coords, frame, units, init_kwargs):
"""
Single unnamed arg supplied. This must be:
- Coordinate frame with data
- Representation
- SkyCoord
- List or tuple of:
- String which splits into two values
- Iterable with two values
- SkyCoord, frame, or representation objects.
Returns a dict mapping coordinate attribute names to values (or lists of
values)
"""
from .sky_coordinate import SkyCoord
is_scalar = False # Differentiate between scalar and list input
# valid_kwargs = {} # Returned dict of lon, lat, and distance (optional)
components = {}
skycoord_kwargs = {}
frame_attr_names = list(frame.representation_component_names.keys())
repr_attr_names = list(frame.representation_component_names.values())
repr_attr_classes = list(frame.representation.attr_classes.values())
n_attr_names = len(repr_attr_names)
# Turn a single string into a list of strings for convenience
if isinstance(coords, str):
is_scalar = True
coords = [coords]
if isinstance(coords, (SkyCoord, BaseCoordinateFrame)):
# Note that during parsing of `frame` it is checked that any coordinate
# args have the same frame as explicitly supplied, so don't worry here.
if not coords.has_data:
raise ValueError('Cannot initialize from a frame without coordinate data')
data = coords.data.represent_as(frame.representation_type)
values = [] # List of values corresponding to representation attrs
repr_attr_name_to_drop = []
for repr_attr_name in repr_attr_names:
# If coords did not have an explicit distance then don't include in initializers.
if (isinstance(coords.data, UnitSphericalRepresentation) and
repr_attr_name == 'distance'):
repr_attr_name_to_drop.append(repr_attr_name)
continue
# Get the value from `data` in the eventual representation
values.append(getattr(data, repr_attr_name))
# drop the ones that were skipped because they were distances
for nametodrop in repr_attr_name_to_drop:
nameidx = repr_attr_names.index(nametodrop)
del repr_attr_names[nameidx]
del units[nameidx]
del frame_attr_names[nameidx]
del repr_attr_classes[nameidx]
if coords.data.differentials and 's' in coords.data.differentials:
orig_vel = coords.data.differentials['s']
vel = coords.data.represent_as(frame.representation, frame.get_representation_cls('s')).differentials['s']
for frname, reprname in frame.get_representation_component_names('s').items():
if (reprname == 'd_distance' and not hasattr(orig_vel, reprname) and
'unit' in orig_vel.get_name()):
continue
values.append(getattr(vel, reprname))
units.append(None)
frame_attr_names.append(frname)
repr_attr_names.append(reprname)
repr_attr_classes.append(vel.attr_classes[reprname])
for attr in frame_transform_graph.frame_attributes:
value = getattr(coords, attr, None)
use_value = (isinstance(coords, SkyCoord)
or attr not in coords._attr_names_with_defaults)
if use_value and value is not None:
skycoord_kwargs[attr] = value
elif isinstance(coords, BaseRepresentation):
if coords.differentials and 's' in coords.differentials:
diffs = frame.get_representation_cls('s')
data = coords.represent_as(frame.representation_type, diffs)
values = [getattr(data, repr_attr_name) for repr_attr_name in repr_attr_names]
for frname, reprname in frame.get_representation_component_names('s').items():
values.append(getattr(data.differentials['s'], reprname))
units.append(None)
frame_attr_names.append(frname)
repr_attr_names.append(reprname)
repr_attr_classes.append(data.differentials['s'].attr_classes[reprname])
else:
data = coords.represent_as(frame.representation)
values = [getattr(data, repr_attr_name) for repr_attr_name in repr_attr_names]
elif (isinstance(coords, np.ndarray) and coords.dtype.kind in 'if'
and coords.ndim == 2 and coords.shape[1] <= 3):
# 2-d array of coordinate values. Handle specially for efficiency.
values = coords.transpose() # Iterates over repr attrs
elif isinstance(coords, (Sequence, np.ndarray)):
# Handles list-like input.
vals = []
is_ra_dec_representation = ('ra' in frame.representation_component_names and
'dec' in frame.representation_component_names)
coord_types = (SkyCoord, BaseCoordinateFrame, BaseRepresentation)
if any(isinstance(coord, coord_types) for coord in coords):
# this parsing path is used when there are coordinate-like objects
# in the list - instead of creating lists of values, we create
# SkyCoords from the list elements and then combine them.
scs = [SkyCoord(coord, **init_kwargs) for coord in coords]
# Check that all frames are equivalent
for sc in scs[1:]:
if not sc.is_equivalent_frame(scs[0]):
raise ValueError("List of inputs don't have equivalent "
"frames: {0} != {1}".format(sc, scs[0]))
# Now use the first to determine if they are all UnitSpherical
allunitsphrepr = isinstance(scs[0].data, UnitSphericalRepresentation)
# get the frame attributes from the first coord in the list, because
# from the above we know it matches all the others. First copy over
# the attributes that are in the frame itself, then copy over any
# extras in the SkyCoord
for fattrnm in scs[0].frame.frame_attributes:
skycoord_kwargs[fattrnm] = getattr(scs[0].frame, fattrnm)
for fattrnm in scs[0]._extra_frameattr_names:
skycoord_kwargs[fattrnm] = getattr(scs[0], fattrnm)
# Now combine the values, to be used below
values = []
for data_attr_name, repr_attr_name in zip(frame_attr_names, repr_attr_names):
if allunitsphrepr and repr_attr_name == 'distance':
# if they are *all* UnitSpherical, don't give a distance
continue
data_vals = []
for sc in scs:
data_val = getattr(sc, data_attr_name)
data_vals.append(data_val.reshape(1,) if sc.isscalar else data_val)
concat_vals = np.concatenate(data_vals)
# Hack because np.concatenate doesn't fully work with Quantity
if isinstance(concat_vals, u.Quantity):
concat_vals._unit = data_val.unit
values.append(concat_vals)
else:
# none of the elements are "frame-like"
# turn into a list of lists like [[v1_0, v2_0, v3_0], ... [v1_N, v2_N, v3_N]]
for coord in coords:
if isinstance(coord, str):
coord1 = coord.split()
if len(coord1) == 6:
coord = (' '.join(coord1[:3]), ' '.join(coord1[3:]))
elif is_ra_dec_representation:
coord = _parse_ra_dec(coord)
else:
coord = coord1
vals.append(coord) # Assumes coord is a sequence at this point
# Do some basic validation of the list elements: all have a length and all
# lengths the same
try:
n_coords = sorted(set(len(x) for x in vals))
except Exception:
raise ValueError('One or more elements of input sequence does not have a length')
if len(n_coords) > 1:
raise ValueError('Input coordinate values must have same number of elements, found {0}'
.format(n_coords))
n_coords = n_coords[0]
# Must have no more coord inputs than representation attributes
if n_coords > n_attr_names:
raise ValueError('Input coordinates have {0} values but '
'representation {1} only accepts {2}'
.format(n_coords,
frame.representation_type.get_name(),
n_attr_names))
# Now transpose vals to get [(v1_0 .. v1_N), (v2_0 .. v2_N), (v3_0 .. v3_N)]
# (ok since we know it is exactly rectangular). (Note: can't just use zip(*values)
# because Longitude et al distinguishes list from tuple so [a1, a2, ..] is needed
# while (a1, a2, ..) doesn't work.
values = [list(x) for x in zip(*vals)]
if is_scalar:
values = [x[0] for x in values]
else:
raise ValueError('Cannot parse coordinates from first argument')
# Finally we have a list of values from which to create the keyword args
# for the frame initialization. Validate by running through the appropriate
# class initializer and supply units (which might be None).
try:
for frame_attr_name, repr_attr_class, value, unit in zip(
frame_attr_names, repr_attr_classes, values, units):
components[frame_attr_name] = repr_attr_class(value, unit=unit,
copy=False)
except Exception as err:
raise ValueError('Cannot parse first argument data "{0}" for attribute '
'{1}'.format(value, frame_attr_name), err)
return skycoord_kwargs, components
def _get_representation_attrs(frame, units, kwargs):
"""
Find instances of the "representation attributes" for specifying data
for this frame. Pop them off of kwargs, run through the appropriate class
constructor (to validate and apply unit), and put into the output
valid_kwargs. "Representation attributes" are the frame-specific aliases
for the underlying data values in the representation, e.g. "ra" for "lon"
for many equatorial spherical representations, or "w" for "x" in the
cartesian representation of Galactic.
This also gets any *differential* kwargs, because they go into the same
frame initializer later on.
"""
frame_attr_names = frame.representation_component_names.keys()
repr_attr_classes = frame.representation_type.attr_classes.values()
valid_kwargs = {}
for frame_attr_name, repr_attr_class, unit in zip(frame_attr_names, repr_attr_classes, units):
value = kwargs.pop(frame_attr_name, None)
if value is not None:
valid_kwargs[frame_attr_name] = repr_attr_class(value, unit=unit)
# also check the differentials. They aren't included in the units keyword,
# so we only look for the names.
differential_type = frame.differential_type
if differential_type is not None:
for frame_name, repr_name in frame.get_representation_component_names('s').items():
diff_attr_class = differential_type.attr_classes[repr_name]
value = kwargs.pop(frame_name, None)
if value is not None:
valid_kwargs[frame_name] = diff_attr_class(value)
return valid_kwargs
def _parse_ra_dec(coord_str):
"""
Parse RA and Dec values from a coordinate string. Currently the
following formats are supported:
* space separated 6-value format
* space separated <6-value format, this requires a plus or minus sign
separation between RA and Dec
* sign separated format
* JHHMMSS.ss+DDMMSS.ss format, with up to two optional decimal digits
* JDDDMMSS.ss+DDMMSS.ss format, with up to two optional decimal digits
Parameters
----------
coord_str : str
Coordinate string to parse.
Returns
-------
coord : str or list of str
Parsed coordinate values.
"""
if isinstance(coord_str, str):
coord1 = coord_str.split()
else:
# This exception should never be raised from SkyCoord
raise TypeError('coord_str must be a single str')
if len(coord1) == 6:
coord = (' '.join(coord1[:3]), ' '.join(coord1[3:]))
elif len(coord1) > 2:
coord = PLUS_MINUS_RE.split(coord_str)
coord = (coord[0], ' '.join(coord[1:]))
elif len(coord1) == 1:
match_j = J_PREFIXED_RA_DEC_RE.match(coord_str)
if match_j:
coord = match_j.groups()
if len(coord[0].split('.')[0]) == 7:
coord = ('{0} {1} {2}'.
format(coord[0][0:3], coord[0][3:5], coord[0][5:]),
'{0} {1} {2}'.
format(coord[1][0:3], coord[1][3:5], coord[1][5:]))
else:
coord = ('{0} {1} {2}'.
format(coord[0][0:2], coord[0][2:4], coord[0][4:]),
'{0} {1} {2}'.
format(coord[1][0:3], coord[1][3:5], coord[1][5:]))
else:
coord = PLUS_MINUS_RE.split(coord_str)
coord = (coord[0], ' '.join(coord[1:]))
else:
coord = coord1
return coord
| 45.357576
| 118
| 0.601383
|
b636e888c49982f022b2c7f6d07ec738a01e026b
| 2,744
|
py
|
Python
|
configurando_git/settings.py
|
aprendacodigo/Configurando-GitHub
|
8ba6e456fe7b918ad093070ca8f4e3989e899670
|
[
"MIT"
] | null | null | null |
configurando_git/settings.py
|
aprendacodigo/Configurando-GitHub
|
8ba6e456fe7b918ad093070ca8f4e3989e899670
|
[
"MIT"
] | 2
|
2015-08-18T21:05:03.000Z
|
2015-08-19T18:46:12.000Z
|
configurando_git/settings.py
|
aprendacodigo/Configurando-GitHub
|
8ba6e456fe7b918ad093070ca8f4e3989e899670
|
[
"MIT"
] | 18
|
2017-11-17T06:42:28.000Z
|
2021-10-02T01:30:54.000Z
|
"""
Django settings for configurando_git project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '50p0zb@h(uoav#ey0b4hpp&znok!4aemhehldmr(n^!+%avdrl'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'configurando_git.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'configurando_git.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'pt-BR'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
# ALTERACAO EM AGOSTO.15
| 26.133333
| 71
| 0.707362
|
cfe9fdcd59d86df6996de9d922a7cde28f253abd
| 797
|
py
|
Python
|
converter/urls.py
|
batu23/numberConverter
|
2ea6f0f8086dbb63113168cd072fa22dee5bb4f6
|
[
"MIT"
] | null | null | null |
converter/urls.py
|
batu23/numberConverter
|
2ea6f0f8086dbb63113168cd072fa22dee5bb4f6
|
[
"MIT"
] | null | null | null |
converter/urls.py
|
batu23/numberConverter
|
2ea6f0f8086dbb63113168cd072fa22dee5bb4f6
|
[
"MIT"
] | null | null | null |
"""converter URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('cApp.urls')),
]
| 33.208333
| 77
| 0.70138
|
747e976f30bdb38234c065eb0b57d46bb72f3f5a
| 7,395
|
py
|
Python
|
Code/calculations.py
|
Jormungandr1105/Project-Atlas
|
453525b693561f5b95a42cbc2293df9c6c1d9011
|
[
"MIT"
] | 1
|
2020-11-25T03:11:46.000Z
|
2020-11-25T03:11:46.000Z
|
Code/calculations.py
|
Jormungandr1105/Project-Atlas
|
453525b693561f5b95a42cbc2293df9c6c1d9011
|
[
"MIT"
] | null | null | null |
Code/calculations.py
|
Jormungandr1105/Project-Atlas
|
453525b693561f5b95a42cbc2293df9c6c1d9011
|
[
"MIT"
] | null | null | null |
"""
Functions for all of the calculations required to aim the telescope
"""
import math
import positions as pos
# Global Variables
Pi = 3.14159265358979323846
def getDayTime(date):
"""
:type date: datetime.datetime
Verified Correct
"""
year = date.year
month = date.month
day = date.day
hours = date.hour
minutes = date.minute
seconds = date.second
#print(year,month,day,hours,minutes,seconds)
d = 367 * year - int((7 * (year + int((month + 9) / 12))) / 4) + int((275 * month) / 9) + day - 730530 + \
hours/24.0 + minutes/1440.0 + seconds/86400.0
return d
def readData(object_name, d):
data = pos.getPositionData(object_name)
N = data[0][0] + data[1][0] * d # deg
w = data[0][1] + data[1][1] * d # deg
i = data[0][2] + data[1][2] * d # deg
a = data[0][3] + data[1][3] * d
e = data[0][4] + data[1][4] * d
M = data[0][5] + data[1][5] * d # deg
N = rev(N)
w = rev(w)
i = rev(i)
M = rev(M)
geo = False
offset = False
if data[0][6] == "true":
geo = True
if data[1][6] == "true":
offset = True
return N,w,i,a,e,M,geo,offset
def calculatePosition(object_name, date):
"""
Calculates the position of a given body at a given time
:param object_name: Name of body
:param date: current date and time
:return: position
"""
d = getDayTime(date)
N,w,i,a,e,M,geo,offset = readData(object_name,d)
NRad = math.radians(N)
wRad = math.radians(w)
iRad = math.radians(i)
MRad = math.radians(M)
E0 = M + (180/Pi) * e * math.sin(MRad) * (1 + e * math.cos(MRad))
E = getEccVal(E0,e,M)
x = a * (math.cos(math.radians(E)) - e)
y = a * math.sqrt(1 - e**2) * math.sin(math.radians(E))
r = math.sqrt(x * x + y * y)
v = rev(math.degrees(math.atan2(y, x)))
vRad = math.radians(v)
#print("R-V:",r,v)
coords = pos.RVtoLatLong(r, vRad, NRad, wRad, iRad, d, offset)
if object_name == "moon":
coord_mods = addMoonPerturbations(d,N+w+M,M,w+M)
for x in range(3):
coords[x] += coord_mods[x]
RA, Decl = pos.EcliptoRA(coords[0], coords[1], coords[2], d)
#print("RA h, Decl deg:",RA/15,Decl)
topRA, topDecl = converttoTopo(geo,coords[2],pos.getTelescopeCoords()[0],RA,Decl,d)
print("RA h,Decl deg:",topRA/15,topDecl)
pos.RAtoAzimuth(topRA,topDecl,d)
showRightAscensionDeclination(topRA,topDecl)
def showRightAscensionDeclination(RA,Decl):
deg_mod = Decl/abs(Decl)
ttra = 3600*RA/15
hours = math.floor(ttra/3600)
remainder = ttra-(hours*3600)
minutes = math.floor(remainder/60)
seconds = remainder - (minutes*60)
degs = math.floor(abs(Decl))
ttdecl = 3600*(abs(Decl)-math.floor(abs(Decl)))
mins = math.floor(ttdecl/60)
secs = ttdecl - (mins*60)
print("RA: {0}h {1}m {2}s -- Decl: {3}\u00b0 {4}\' {5}\""
.format(hours,minutes,int(seconds),int(degs*deg_mod),mins,int(secs)))
def extrapolateInfo(N,w,i,a,e,M,d):
# Convert to Radians
NRad = math.radians(N)
wRad = math.radians(w)
iRad = math.radians(i)
MRad = math.radians(M)
# Calculate secondary orbital elements
L = M + w + N # mean longitude
def getEccVal(E0, e, M):
"""
Recursively finds the eccetricity of a given astral body
:param E0: First guess at eccentricity, E_0
:param e: eccentric anomaly
:param M: mean anomaly
:return: E, the eccentricity calculated
"""
E = E0 - ((E0 - (180/Pi) * e * math.sin(math.radians(E0)) - M) / (1 - e * math.cos(math.radians(E0))))
if abs(E0-E) > .005:
return getEccVal(E,e,M)
else:
return E
def rev(deg):
"""
Returns the value of a given variable between 0 and 360 degrees
:param deg:
:return: deg
"""
while deg < 0.0:
deg += 360.0
while deg >= 360.0:
deg -= 360.0
return deg
def converttoTopo(geo, r, lat, RA, Decl, d):
if geo:
par = math.degrees(math.asin(1 / r))
else:
par = (8.794 / 3600)/r # deg/A.U.
gclat = lat - 0.1924 * math.sin(2 * math.radians(lat))
rho = 0.99833 + 0.00167 * math.cos(2 * math.radians(lat))
#print("Gclat:", gclat, "Rho:", rho)
#pos.RAtoAzimuth(RA, Decl, d)
LST = findLST(d)
HA = rev(LST - RA+180)-180
DeclRad = math.radians(Decl)
HARad = math.radians(HA)
gclatRad = math.radians(gclat)
gRad = math.atan(math.tan(gclatRad) / math.cos(HARad))
#print("g:",math.degrees(gRad)+90)
topRA = RA - par * rho * math.cos(gclatRad) * math.sin(HARad) / math.cos(DeclRad)
topDecl = Decl - par * rho * math.sin(gclatRad) * math.sin(gRad - DeclRad) / math.sin(gRad)
return topRA, topDecl
def findLST(d):
UT = (d - math.floor(d)) * 24
ws = 282.9404 + 4.70935*(10**-5) * d
Ms = 356.0470 + 0.9856002585 * d
L = rev(Ms + ws)
#print("L:", L)
GMST0 = L/15 + 12
#print("GMST0", GMST0)
TLong = pos.getTelescopeCoords()[1]
#TLong = -71.15390
LST = GMST0 + UT + TLong / 15
LST = rev(LST*15)
#print("LST:", LST)
return LST # degrees
def offsetSun(d):
NS,wS,iS,aS,eS,MS,geo,offset = readData("sun", d)
MSRad = math.radians(MS)
ES = MS + (180/Pi) * eS * math.sin(MSRad) * (1 + eS * math.cos(MSRad))
#ES = getEccVal(E0, eS, MS)
xS = math.cos(math.radians(ES)) - eS
yS = math.sin(math.radians(ES)) * math.sqrt(1 - eS**2)
#print("xs-ys:",xS,yS)
rS = math.sqrt(xS**2 + yS**2)
vS = math.degrees(math.atan2(yS,xS))
#print("w,a,e,M:",wS,aS,eS,MS)
#print("vS, wS", vS, wS)
longS = rev(vS + wS)
xs = rS*math.cos(math.radians(longS))
ys = rS*math.sin(math.radians(longS))
#print("XS-Y:",xs,ys)
return xs, ys
def addMoonPerturbations(d, Lm, Mm, F):
Ms = 356.0470 + 0.9856002585 * d
Ls = 282.9404 + 4.70935*(10**-5) * d + Ms
D = Lm - Ls
LsRad = math.radians(Ls)
LmRad = math.radians(Lm)
MsRad = math.radians(Ms)
MmRad = math.radians(Mm)
DRad = math.radians(D)
FRad = math.radians(F)
# Longitude Perturbations
long = 0
long += -1.274* math.sin(MmRad - 2 * DRad) # (Evection)
long += 0.658* math.sin(2 * DRad) # (Variation)
long += -0.186* math.sin(MsRad) # (Yearly equation)
long += -0.059* math.sin(2 * MmRad - 2 * DRad)
long += -0.057* math.sin(MmRad - 2 * DRad + MsRad)
long += 0.053* math.sin(MmRad + 2 * DRad)
long += 0.046* math.sin(2 * DRad - MsRad)
long += 0.041* math.sin(MmRad - MsRad)
long += -0.035* math.sin(DRad)
long += -0.031* math.sin(MmRad + MsRad)
long += -0.015* math.sin(2 * FRad - 2 * DRad)
long += 0.011* math.sin(MmRad - 4 * DRad)
# Latitude Perturbations
lat = 0
lat += -0.173 * math.sin(FRad - 2 * DRad)
lat += -0.055 * math.sin(MmRad - FRad - 2 * DRad)
lat += -0.046 * math.sin(MmRad + FRad - 2 * DRad)
lat += 0.033 * math.sin(FRad + 2 * DRad)
lat += 0.017 * math.sin(2 * MmRad + FRad)
# Radius Perturbations
r = 0
r += -0.58 * math.cos(MmRad - 2 * DRad)
r += -0.46 * math.cos(2 * DRad)
#print("LatMod:",lat,"LongMod:",long,"RMod:",r)
return lat, long, r
| 31.334746
| 111
| 0.55497
|
ad287ec2bd8bd957354954be1220d3c3e2c36188
| 2,639
|
py
|
Python
|
python/databank_download.py
|
OxfordEconomics/Mandoline.Api.Examples
|
b599ba72581b7afdad382c558070b5e80bd22ea4
|
[
"MIT"
] | 2
|
2021-01-22T07:52:59.000Z
|
2021-07-06T19:20:45.000Z
|
python/databank_download.py
|
OxfordEconomics/Mandoline.Api.Examples
|
b599ba72581b7afdad382c558070b5e80bd22ea4
|
[
"MIT"
] | null | null | null |
python/databank_download.py
|
OxfordEconomics/Mandoline.Api.Examples
|
b599ba72581b7afdad382c558070b5e80bd22ea4
|
[
"MIT"
] | 3
|
2019-10-18T12:59:47.000Z
|
2021-10-04T18:03:50.000Z
|
# <copyright file="databank_download.py" company="Oxford Economics">
# Copyright (c) 2017 Oxford Economics Ltd. All rights reserved.
# Licensed under the MIT License. See LICENSE file in the
# project root for full license information.
# </copyright>
import json
import requests
import sys
API_KEY = # insert api key
BASE_URL = 'https://services.oxfordeconomics.com'
sample_selection = {
'DatabankCode': 'WDMacro',
'Frequency': 'Annual',
'GroupingMode': 'false',
'IndicatorSortOrder': 'AlphabeticalOrder',
'IsTemporarySelection': 'true',
'ListingType': 'Private',
'LocationSortOrder': 'AlphabeticalOrder',
'Order': 'IndicatorLocation',
'Precision': 1,
'Sequence': 'EarliestToLatest',
'StackedQuarters': 'false',
'StartYear': 1980,
'EndYear': 2045,
# note: the fields below have been assigned empty lists
'Regions': [
],
'Variables': [
]
}
# for debug purposes only. you wouldn't want to limit the number of pages
# if you were attempting to download the entire databank
def _reached_page_limit(page, page_limit):
return (page_limit != -1 and page >= page_limit)
def _download_url(base_url, page, page_size):
url = base_url + '/api/download?includemetadata=true'
return url + '&page={0}&pagesize={1}'.format(page, page_size)
def databank_download(selection_dictionary, page_size=5000, page_limit=-1):
headers = {'Accept': 'application/json',
'Api-Key': API_KEY,
'Content-Type': 'application/json; charset=utf8' }
page = 0
data_list = None
while not _reached_page_limit(page, page_limit):
print('Downloading page {0}... '.format(page + 1), end='')
sys.stdout.flush()
# note: _download_url returns a link of the form
# ../api/download?includemetadata=true&page={page}&pagesize={page_size}
response = requests.post(
_download_url(BASE_URL, page, page_size),
headers=headers,
data=json.dumps(selection_dictionary))
new_data = response.json()
page += 1
if data_list is None:
data_list = new_data
else:
data_list.extend(new_data)
print('contains {0} series, {1} total'.format(len(new_data), len(data_list)))
if len(new_data) < page_size:
break
return data_list
if __name__ == '__main__':
data = databank_download(sample_selection)
print('Finished downloading')
print('Writing to data_file.out... ', end='')
sys.stdout.flush()
data_file = open('data_file.out', 'w')
json.dump(data, data_file, indent=3)
print('Done')
| 30.333333
| 85
| 0.654415
|
3d2f853d0f645749bac98adc00361867d0c52944
| 36,783
|
py
|
Python
|
torch/ao/quantization/_dbr/quantization_state.py
|
eddieluo01/pytorch
|
e19f2e52adebd9ce36b8ac5302a9036662446d7f
|
[
"Intel"
] | 1
|
2022-02-02T11:58:26.000Z
|
2022-02-02T11:58:26.000Z
|
torch/ao/quantization/_dbr/quantization_state.py
|
eddieluo01/pytorch
|
e19f2e52adebd9ce36b8ac5302a9036662446d7f
|
[
"Intel"
] | 1
|
2022-02-03T12:43:23.000Z
|
2022-02-03T12:47:53.000Z
|
torch/ao/quantization/_dbr/quantization_state.py
|
eddieluo01/pytorch
|
e19f2e52adebd9ce36b8ac5302a9036662446d7f
|
[
"Intel"
] | null | null | null |
from typing import Callable, List, Tuple, Any, Optional, Dict, Set
import torch
import torch.nn.functional as F
from .mappings import (
ops_are_related,
)
from .utils import (
_raise_obs_not_found_error,
_raise_obs_op_mismatch,
op_needs_quantization,
SeenOpInfo,
QTensorInfo,
FuncOutputObsType,
get_func_output_obs_type,
converted_func_needs_scale_zp,
FuncOutputDTypeType,
get_func_output_dtype_type,
get_quantized_op,
get_input_observed_arg_idxs,
get_packable_tensor_arg_idxs,
get_param_name,
get_packable_nontensor_arg_idxs,
get_packable_arg_idxs,
get_weight_arg_idx,
iterate_and_apply,
get_op_packing_only_uses_module_attributes,
get_packable_tensor_kwarg_names,
get_producer_of_seen_op_info,
clone_detach_tensor_without_dispatch,
get_input_args_quant_dequant_info,
get_cur_qconfig,
)
OpConvertInfo = Tuple[
# quantized equivalent of original op (None means keep original)
Optional[Callable],
# arg_quant_infos, each element is (scale, zp) for quantized and None otherwise
List[Optional[Tuple[float, int]]],
# arg_dequant_infos, each element is True if this arg needs a dequant
List[bool],
# packed param name, if the op has a packed param
Optional[str],
# additional kwargs, such as output scale and zero_point
Dict[str, Any],
# any_arg_quant_or_dequant_needed, if False then we can skip looking at
# arg_quant_infos and arg_dequant_infos, for performance
bool,
# any_arg_kwarg_modification_needed, if False then we can return original
# args and kwargs, for performance
bool,
]
# TODO(future PR): maybe better name
# TODO(future PR): add serialization support
class AutoQuantizationState(torch.nn.Module):
"""
Contains state necessary to perform auto quantization on the parent
`nn.Module` instance.
"""
idx : int
def __init__(
self,
qconfig_dict: Dict[str, Any],
fqn: str,
input_dtypes: Any = None,
output_dtypes: Any = None,
):
super().__init__()
self.idx = 0
self.qconfig_dict = qconfig_dict
self.fqn = fqn
# this is a ModuleDict in order to properly register observers
# to be within the module hierarchy.
self.tensor_id_to_observer = torch.nn.ModuleDict()
# TODO(future PR): include kwargs
self.idx_to_seen_op_infos: Dict[int, SeenOpInfo] = {}
# qtensor_info objects of tensor outputs of the module, specified
# in order of iteration through the output type. Non-tensor outputs
# are represented with `None`.
self.output_qtensor_infos: List[Optional[QTensorInfo]] = []
self.input_dtypes = input_dtypes
self.output_dtypes = output_dtypes
# key: idx of seen op
# value: name of packed weight
# note: this is filled out right before convert
self.idx_to_packed_weight_name: Dict[int, str] = {}
self.tensor_id_to_scale_zp: Dict[int, Tuple[torch.Tensor, torch.Tensor]] = {}
# Numeric Suite add_loggers functionality
# if this flag is True, op outputs will be saved for debugging
self.log_op_outputs = False
# data structure to save op outputs for debugging
# * outer list represents the different model forward call instances
# * inner list represents the different op forward call instances in a
# model forward
# TODO(future PR): handle types which are not torch.Tensor
# TODO(future PR): use the Logger class and allow user overrides of it
self.op_outputs: List[List[Tuple[
int, # global op idx
Optional[str], # fqn
Callable, # fp32 op type (TODO future PR: add quantized op type)
torch.Tensor, # value
]]] = []
# model name to use in logging results
self.logging_model_name: Optional[str]
self.idx_to_op_convert_info: Dict[int, OpConvertInfo] = {}
# If this is True, module outputs will be checked and converted
# to the dtype specified by the user. If this is False, module outputs
# will be returned as is. This value can be precalculated and it is set
# to its final value after tracing.
self.needs_dtype_transform_on_outputs = True
# For debugging only, stores the types of ops seen by the parent which
# did not require op hooks.
self.seen_op_types_without_op_hooks: Set[Callable] = set()
def get_extra_state(self):
return {"tensor_id_to_scale_zp": self.tensor_id_to_scale_zp}
def set_extra_state(self, state):
self.tensor_id_to_scale_zp = state["tensor_id_to_scale_zp"]
for _, seen_op_info in self.idx_to_seen_op_infos.items():
self.idx_to_op_convert_info[seen_op_info.idx] = \
self.calculate_op_convert_info(seen_op_info)
def has_at_least_one_seen_op_info(self) -> bool:
return len(self.idx_to_seen_op_infos) > 0
def validate_is_at_last_seen_idx(self) -> None:
is_at_last_seen_idx = (
len(self.idx_to_seen_op_infos) == 0 or
self.idx == len(self.idx_to_seen_op_infos)
)
if not is_at_last_seen_idx:
raise AssertionError(
f"Cur idx: {self.idx}, expected idx: {len(self.idx_to_seen_op_infos)}")
def extra_repr(self) -> str:
s = ""
# idx_to_seen_op_infos
if len(self.idx_to_seen_op_infos):
s += "(seen_op_infos): {\n"
for k, v in self.idx_to_seen_op_infos.items():
s += f" {k}: {v}\n"
s += "}\n"
else:
s += "(seen_op_infos): {}\n"
# output_qtensor_infos
s += "(output_qtensor_infos): ["
for i in self.output_qtensor_infos:
s += f"{i} "
s += "]\n"
# seen_op_types_without_op_hooks
s += f"(seen_op_types_without_op_hooks): {self.seen_op_types_without_op_hooks}\n"
# idx_to_packed_weight_name
if len(self.idx_to_packed_weight_name):
s += "(idx_to_packed_weight_name): {\n"
for k, v in self.idx_to_packed_weight_name.items(): # type: ignore[assignment]
s += f" {k}: {v}\n"
s += "}\n"
else:
s += "(idx_to_packed_weight_name): {}"
if len(self.tensor_id_to_scale_zp):
s += "(tensor_id_to_scale_zp): {\n"
for k, v in self.tensor_id_to_scale_zp.items(): # type: ignore[assignment]
s += f" {k}: {v}\n"
s += "}"
return s
def _get_cur_seen_op_info(self):
return self.idx_to_seen_op_infos[self.idx]
def get_cur_output_inf_dtype(self):
return self._get_cur_seen_op_info().output_tensor_infos[0].inf_dtype
def reset_to_new_call(self):
"""
Resets the internal op counter to start a new top level module call
"""
# torch.nn.Module __setattr__ has overhead,
# this code is the explicit fast path for `self.idx = 0`
object.__setattr__(self, 'idx', 0)
if self.log_op_outputs:
self.op_outputs.append([])
def cur_op_needs_hooks(self, cur_op: Callable) -> bool:
return op_needs_quantization(cur_op)
def validate_cur_op(self, cur_op: Callable) -> None:
"""
This function is expected to be called before any new function or
module call which needs hooks. It validates that the new function or
module is of the expected type based on the order of execution.
"""
try:
seen_op_info = self._get_cur_seen_op_info()
expected_op = seen_op_info.type
except IndexError:
_raise_obs_not_found_error(cur_op)
if not ops_are_related(cur_op, expected_op, seen_op_info.type_is_module):
_raise_obs_op_mismatch(cur_op, expected_op)
def mark_cur_op_complete(self, cur_op: Callable) -> None:
"""
This function is expected to be called after a function or module
processing is complete.
"""
# torch.nn.Module __setattr__ has overhead,
# this code is the explicit fast path for `self.idx += 1`
object.__setattr__(self, 'idx', self.idx + 1)
def outputs_prepare_hook(
self,
outputs: Any,
first_call: bool,
qtensor_id: List[int],
) -> Any:
"""
This function is expected to be called on the outputs of a prepared
module right before they are returned to the parent.
"""
if first_call:
outputs = self._first_call_assign_qtensor_infos_to_mod_outputs(
outputs, qtensor_id)
return outputs
def outputs_convert_hook(
self,
outputs: Any,
) -> Any:
"""
This function is expected to be called on the outputs of a converted
module right before they are returned to the parent.
"""
outputs = self._maybe_mod_outputs_dtype_transform(outputs)
return outputs
def get_output_qtensor_infos(self) -> List[Optional[QTensorInfo]]:
"""
Used by the conversion to torch.jit.script.
"""
return self.output_qtensor_infos
def get_output_dtypes(self) -> Any:
"""
Used by the conversion to torch.jit.script.
"""
return self.output_dtypes
def op_prepare_before_hook(
self,
op: Callable,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
first_call: bool,
qtensor_id: List[int],
fqn: str,
root_module: torch.nn.Module,
) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
"""
This function is expected to be called on args and kwargs of
`op` directly before `op` is executed.
If `first_call` is True, we record the type of `op`
and the IDs of its tensor inputs. Note: we add a placeholder for IDs
of tensor outputs, the placeholder will be filled out during the
`op_prepare_after_hook`.
If `first_call` is False, we do the following:
* pass the inputs through observers, if needed
The function returns modified `args` and `kwargs`.
"""
if first_call:
return self._first_call_op_prepare_before_hook_create_subgraphs(
op, args, kwargs, first_call, qtensor_id, fqn, root_module)
else:
seen_op_info = self._get_cur_seen_op_info()
def _maybe_observe(arg, tensor_info):
tensor_id = tensor_info.id
# TODO: do not run this twice on input and output
if str(tensor_id) in self.tensor_id_to_observer:
observer = self.tensor_id_to_observer[str(tensor_id)]
return observer(arg)
else:
return arg
args = iterate_and_apply(
args, seen_op_info.input_tensor_infos, _maybe_observe)
return args, kwargs
def op_prepare_after_hook(
self,
op: Callable,
output: Any,
args: Tuple[Any, ...],
first_call: bool,
qtensor_id: List[int],
global_op_idx: List[int],
) -> Any:
"""
This function is called after an op call on a prepared model.
If `first_call` is True, we
* create an observer for the output, if needed, and record it in
`tensor_id_to_observer`
* amend the current seen op with the tensor ID of the output
If `first_call` is False, we
* observe the output, if needed
"""
seen_op_info = self._get_cur_seen_op_info()
if first_call:
self._first_call_op_prepare_after_hook_adjust_subgraphs(
op, output, args, first_call, qtensor_id,
seen_op_info)
else:
func_output_obs_type = get_func_output_obs_type(seen_op_info)
# TODO(future PR): other output types
if func_output_obs_type != FuncOutputObsType.NONE:
seen_op_info = self._get_cur_seen_op_info()
tensor_id = seen_op_info.output_tensor_infos[0].id
obs = self.tensor_id_to_observer[str(tensor_id)]
output = obs(output)
if self.log_op_outputs:
output_clone = clone_detach_tensor_without_dispatch(output)
self.op_outputs[-1].append(
(global_op_idx[0], seen_op_info.fqn, seen_op_info.type, output_clone))
global_op_idx[0] += 1
return output
def op_convert_before_hook(
self,
op: Callable,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
root_module: torch.nn.Module,
) -> Tuple[Callable, Tuple[Any, ...], Dict[str, Any]]:
"""
This function is called before an op call in a converted model.
For each arg in `args`, quantizes it if necessary.
Returns potentially modified `op`, potentially modified `args`,
potentially modified `kwargs`.
"""
# TODO generalize this for more things
# currently:
# * can quantize args (via arg_quant_infos)
# * can add scale and zp (via additional kwargs)
# needed for F.conv2d
# F.conv2d(input, weight, bias, stride, padding, dilation, groups)
# to
# q.conv2d(input, packed_params, scale, zero_point)
orig_op = op
maybe_new_op, arg_quant_infos, arg_dequant_infos, packed_param_name, \
additional_kwargs, any_arg_quant_or_dequant_needed, \
any_arg_kwarg_modification_needed = self.get_op_convert_info(op)
if maybe_new_op is not None:
op = maybe_new_op
if not any_arg_kwarg_modification_needed:
return op, args, kwargs
# print(op, arg_quant_infos, packed_param_name, additional_kwargs)
# potentially quantize args, based on arg_quant_infos
new_args = []
if any_arg_quant_or_dequant_needed:
tensor_arg_idx = 0
# TODO: refactor this to use iterate_and_apply
if orig_op is torch.cat: # torch.cat variants
# input tensors
new_first_arg = []
for arg in args[0]:
# TODO: handle non-tensor inputs
quant_info = arg_quant_infos[tensor_arg_idx]
dequant_info = arg_dequant_infos[tensor_arg_idx]
if quant_info is not None:
scale, zp = quant_info
arg = torch.quantize_per_tensor(arg, scale, zp, torch.quint8)
elif dequant_info is True:
arg = arg.dequantize()
new_first_arg.append(arg)
tensor_arg_idx += 1
new_args = [new_first_arg, *args[1:]]
else:
for arg in args:
# TODO: handle non-tensor inputs
# TODO: this is not handling non-tensor tuple args (for example,
# dilation in conv2d) correctly, it just happens to work but
# needs a fix.
quant_info = arg_quant_infos[tensor_arg_idx]
dequant_info = arg_dequant_infos[tensor_arg_idx]
if quant_info is not None:
scale, zp = quant_info
arg = torch.quantize_per_tensor(arg, scale, zp, torch.quint8)
elif dequant_info is True:
arg = arg.dequantize()
new_args.append(arg)
tensor_arg_idx += 1
else:
new_args = [*args]
# if there is a packed param, replace the relevant args
if packed_param_name is not None:
new_args_with_packed = []
packable_arg_idxs = get_packable_arg_idxs(orig_op)
added_packed = False
for idx, arg in enumerate(new_args):
if packable_arg_idxs is not None and idx in packable_arg_idxs:
if not added_packed:
packed_param = getattr(root_module, packed_param_name)
new_args_with_packed.append(packed_param)
added_packed = True
else:
new_args_with_packed.append(arg)
new_args = new_args_with_packed
# potentially extend kwargs with scale and zero_point
# TODO move op-specific logic out of here
if len(additional_kwargs):
if orig_op not in (F.conv2d, F.linear):
kwargs.update(**additional_kwargs)
else:
seen_op_info = self._get_cur_seen_op_info()
if seen_op_info.output_tensor_infos[0].inf_dtype == torch.quint8:
new_args.append(additional_kwargs['scale'])
new_args.append(additional_kwargs['zero_point'])
# TODO move op-specific logic out of here
if op is torch.ops.quantized.linear:
kwargs.pop('bias', None)
return op, tuple(new_args), kwargs
def op_convert_after_hook(
self,
op: Callable,
output,
global_op_idx: List[int],
) -> Any:
"""
This function is called aftern an op call in a converted model.
TODO: add dequant, if needed
"""
if self.log_op_outputs:
output_clone = clone_detach_tensor_without_dispatch(output)
seen_op_info = self._get_cur_seen_op_info()
self.op_outputs[-1].append(
(global_op_idx[0], seen_op_info.fqn, seen_op_info.type, output_clone))
global_op_idx[0] += 1
return output
def get_op_convert_info(
self,
op: Callable,
) -> OpConvertInfo:
"""
Returns the information needed for convert time modifications to `op`.
"""
return self.idx_to_op_convert_info[self.idx]
def calculate_op_convert_info(
self,
seen_op_info: SeenOpInfo,
) -> OpConvertInfo:
"""
This precalculates the information which will be returned by
`get_op_convert_info`.
"""
# calculate new op
maybe_new_op = get_quantized_op(seen_op_info)
# calculate quant infos
arg_quant_infos, arg_dequant_infos, any_arg_quant_or_dequant_needed = \
get_input_args_quant_dequant_info(
seen_op_info, self.tensor_id_to_scale_zp)
# get packed param name, if applicable
packed_param_name = self._get_packed_param_name(seen_op_info)
# calculate scale and zp for output
# TODO: instead of always doing this if there is an observer,
# calculate whether this is needed based on the op and dtypes
additional_kwargs = {}
needs_scale_zp = converted_func_needs_scale_zp(seen_op_info)
if needs_scale_zp:
output_tensor_infos = seen_op_info.output_tensor_infos
tensor_id = output_tensor_infos[0].id
scale, zp = self.tensor_id_to_scale_zp[tensor_id]
additional_kwargs.update({'scale': scale, 'zero_point': zp})
any_arg_kwarg_modification_needed = bool(
any_arg_quant_or_dequant_needed or
packed_param_name is not None or
len(additional_kwargs)
) # the cast to bool is to make mypy recognize this as a bool
return maybe_new_op, arg_quant_infos, arg_dequant_infos, \
packed_param_name, additional_kwargs, any_arg_quant_or_dequant_needed, \
any_arg_kwarg_modification_needed
def _get_packed_param_name(self, seen_op_info: SeenOpInfo) -> Optional[str]:
"""
If the op in seen_op_info has a quantized packed param, returns it.
Otherwise, returns None.
"""
return self.idx_to_packed_weight_name.get(seen_op_info.idx, None)
def _first_call_assign_qtensor_infos_to_mod_outputs_tensor(
self,
output: torch.Tensor,
qtensor_id: List[int],
) -> torch.Tensor:
"""
This is a helper function for _first_call_assign_qtensor_infos_to_mod_outputs
to handle iterables of tensors without code duplication.
"""
if not hasattr(output, '_qtensor_info'):
# TODO: use actual dtype instead of defaulting to float
output._qtensor_info = QTensorInfo( # type: ignore[attr-defined]
qtensor_id[0], output.dtype, torch.float)
qtensor_id[0] += 1
self.output_qtensor_infos.append(output._qtensor_info) # type: ignore[attr-defined]
# TODO(future PR): add an observer if needed
return output
def _first_call_assign_qtensor_infos_to_mod_outputs(
self,
outputs: Any,
qtensor_id: List[int],
) -> Any:
"""
Takes `outputs`, which are a set of values about to be returned from
the current module. If `_qtensor_info` attributes do not already exist
on any tensors in `outputs`, this function adds them, initializing the
dtype to `torch.float`. This allows us to reason about module output
dtypes even if the last op in the module is not quantizeable.
"""
# TODO: handle objects with deeper nested tensors
if isinstance(outputs, torch.Tensor):
self._first_call_assign_qtensor_infos_to_mod_outputs_tensor(outputs, qtensor_id)
elif isinstance(outputs, tuple):
# TODO: handle other tuple subclasses more generically
new_outputs = []
for output in outputs:
if isinstance(output, torch.Tensor):
new_outputs.append(self._first_call_assign_qtensor_infos_to_mod_outputs_tensor(
output, qtensor_id))
else:
new_outputs.append(output)
# hacky check for collections.namedtuple, TODO improve this
# https://stackoverflow.com/questions/2166818/how-to-check-if-an-object-is-an-instance-of-a-namedtuple
if hasattr(outputs, '_fields'):
outputs = outputs.__class__(*new_outputs)
else:
outputs = tuple(new_outputs)
else:
pass
return outputs
def set_needs_dtype_transform_on_outputs(self):
"""
Calculates whether a dtype transform on module outputs is needed
and stores it. This is used to skip the outputs hook if it is not
needed.
"""
self.needs_dtype_transform_on_outputs = False
if not len(self.output_qtensor_infos):
# if there are no tensor outputs, there is nothing to transform
return
qtensor_info = self.output_qtensor_infos[0]
if self.output_dtypes is not None:
assert qtensor_info is not None
# check the output dtype, and do the conversion if needed
output_dtype = self.output_dtypes[0]
if qtensor_info.inf_dtype != output_dtype:
assert output_dtype is torch.float, \
'non-float output dtypes not handled yet'
self.needs_dtype_transform_on_outputs = True
def _maybe_mod_outputs_dtype_transform(
self,
outputs: Any,
) -> Any:
"""
Takes `outputs` which are about to be returned from this module
to the caller. If this module has restrictions on the dtypes of
tensors it has to return, does the dtype conversion. Otherwise,
does nothing.
"""
if not self.needs_dtype_transform_on_outputs:
return outputs
if isinstance(outputs, torch.Tensor):
qtensor_info = self.output_qtensor_infos[0]
if self.output_dtypes is not None:
assert qtensor_info is not None
# check the output dtype, and do the conversion if needed
output_dtype = self.output_dtypes[0]
if qtensor_info.inf_dtype != output_dtype:
assert output_dtype is torch.float, \
'non-float output dtypes not handled yet'
outputs = outputs.dequantize()
else:
# if no output dtype was specified, do nothing
pass
return outputs
def _first_call_op_prepare_before_hook_create_subgraphs_tensor(
self,
op: Callable,
arg: Any,
arg_tensor_infos: List[Optional[QTensorInfo]],
qtensor_id: List[int],
) -> None:
"""
Runs the prepare hook during first_call for individual
tensors. If the input argument is a tensor, this function is
called directly. If the input argument is an iterable such
as a list or a tuple, this function is called on each element of
the iteratble.
"""
# TODO(next): fix this for torch.cat
if not isinstance(arg, torch.Tensor):
arg_tensor_infos.append(None)
return
# If a tensor does not have an ID, add it. This allows
# us to track inputs shared by multiple quantizeable modules.
if not hasattr(arg, '_qtensor_info'):
arg._qtensor_info = QTensorInfo( # type: ignore[attr-defined]
qtensor_id[0], arg.dtype, arg.dtype)
qtensor_id[0] += 1
arg_tensor_infos.append(arg._qtensor_info) # type: ignore[attr-defined]
def _first_call_op_prepare_before_hook_create_subgraphs(
self,
op: Callable,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
first_call: bool,
qtensor_id: List[int],
fqn: str,
root_module: torch.nn.Module,
) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
"""
Given an op, args, kwargs about to be executed, records the subgraph
of this op in `self`.
"""
op_packing_only_uses_module_attributes = \
get_op_packing_only_uses_module_attributes(op, args, kwargs, root_module)
arg_tensor_infos: List[Optional[QTensorInfo]] = []
for arg in args:
if isinstance(arg, (list, tuple)):
for inner_arg in arg:
self._first_call_op_prepare_before_hook_create_subgraphs_tensor(
op, inner_arg, arg_tensor_infos, qtensor_id)
else:
self._first_call_op_prepare_before_hook_create_subgraphs_tensor(
op, arg, arg_tensor_infos, qtensor_id)
packable_tensor_idx_to_name = {}
packable_nontensor_idx_to_arg = {}
packable_tensor_kwarg_name_to_name = {}
if op_packing_only_uses_module_attributes:
packable_tensor_arg_idxs = get_packable_tensor_arg_idxs(op)
if packable_tensor_arg_idxs is not None:
for arg_idx in packable_tensor_arg_idxs:
if arg_idx >= len(args):
continue
arg = args[arg_idx]
param_name = get_param_name(root_module, arg)
packable_tensor_idx_to_name[arg_idx] = param_name
packable_nontensor_arg_idxs = get_packable_nontensor_arg_idxs(op)
if packable_nontensor_arg_idxs is not None:
for arg_idx in packable_nontensor_arg_idxs:
packable_nontensor_idx_to_arg[arg_idx] = args[arg_idx]
packable_tensor_kwarg_names = \
get_packable_tensor_kwarg_names(op)
if packable_tensor_kwarg_names is not None:
for kwarg_name in packable_tensor_kwarg_names:
if kwarg_name not in kwargs:
continue
kwarg = kwargs[kwarg_name]
kwarg_name_on_module = get_param_name(root_module, kwarg)
packable_tensor_kwarg_name_to_name[kwarg_name] = \
kwarg_name_on_module
if self.idx not in self.idx_to_seen_op_infos:
op_type_is_module = isinstance(op, torch.nn.Module)
op_type : Callable = type(op) if op_type_is_module else op # type: ignore[assignment]
qconfig = get_cur_qconfig(self.qconfig_dict, fqn, op_type)
self.idx_to_seen_op_infos[self.idx] = SeenOpInfo(
self.idx, op_type, op_type_is_module, fqn, arg_tensor_infos, [],
packable_tensor_idx_to_name, packable_nontensor_idx_to_arg,
packable_tensor_kwarg_name_to_name,
op_packing_only_uses_module_attributes, qconfig)
return args, kwargs
def _first_call_op_prepare_after_hook_adjust_subgraphs(
self,
op: Callable,
output: Any,
args: Tuple[Any, ...],
first_call: bool,
qtensor_id: List[int],
seen_op_info: SeenOpInfo,
) -> None:
"""
After `op` was just executed, modifies the subgraph recorded
for this op with the information about the output. Note, this
has to be done in the "after" hook because the output of the op
does not exist in the "before" hook.
"""
# TODO(future PR): check if _qtensor_id needs to become an actual
# attribute of Tensor
# TODO(future PR): handle non-tensor outputs
func_output_dtype_type = get_func_output_dtype_type(seen_op_info)
if func_output_dtype_type == FuncOutputDTypeType.DTYPE_DEPENDS_ON_QCONFIG:
if isinstance(op, torch.nn.Module):
# For now, assume that eager mode convert has attached qconfig
# objects to any leaf module which needs quantization
if hasattr(op, 'activation_post_process'):
dtype_to_use = op.activation_post_process.dtype
else:
dtype_to_use = torch.float
else:
qconfig = get_cur_qconfig(self.qconfig_dict, seen_op_info.fqn, op)
if qconfig is None:
dtype_to_use = torch.float
else:
dtype_to_use = qconfig.activation().dtype
elif func_output_dtype_type == FuncOutputDTypeType.DTYPE_DEFAULT_BC_UNSUPPORTED_SYNTAX:
dtype_to_use = torch.float
else:
# TODO(future PR): respect qconfig for torch.cat
if isinstance(args[0], (tuple, list)): # for torch.cat
unique_arg_dtypes = [
arg._qtensor_info.inf_dtype for arg in args[0]]
assert len(set(unique_arg_dtypes)) == 1, \
'an iterable with arguments with different inference ' + \
'dtypes is not supported yet'
dtype_to_use = args[0][0]._qtensor_info.inf_dtype
else:
dtype_to_use = args[0]._qtensor_info.inf_dtype
def _add_output_qtensor_info(output):
output._qtensor_info = QTensorInfo(
qtensor_id[0], output.dtype, dtype_to_use) # type: ignore[arg-type]
self.idx_to_seen_op_infos[self.idx].output_tensor_infos.append(
output._qtensor_info)
qtensor_id[0] += 1
if isinstance(output, torch.Tensor):
_add_output_qtensor_info(output)
elif isinstance(output, tuple):
for element in output:
if isinstance(element, torch.Tensor):
_add_output_qtensor_info(element)
def _maybe_insert_input_observers(self, seen_op_info: SeenOpInfo):
func_output_dtype_type = get_func_output_dtype_type(seen_op_info)
input_observed_arg_idxs = get_input_observed_arg_idxs(
seen_op_info.type, seen_op_info.type_is_module)
if func_output_dtype_type == FuncOutputDTypeType.DTYPE_DEPENDS_ON_QCONFIG:
for idx, tensor_info in enumerate(seen_op_info.input_tensor_infos):
if tensor_info is None:
continue
if input_observed_arg_idxs is not None and \
idx not in input_observed_arg_idxs:
continue
qconfig = get_cur_qconfig(
self.qconfig_dict, seen_op_info.fqn, seen_op_info.type)
if qconfig is None:
# If qconfig is None, we do not need any input observers
continue
elif tensor_info.inf_dtype != torch.quint8:
# TODO(future PR): this assumes current dtype is quint8,
# this is not always true
# TODO(future PR): currently this only handles float32 and
# quint8, we need to extend it to other dtypes
tensor_id = tensor_info.id # type: ignore[attr-defined]
weight_arg_idx = get_weight_arg_idx(seen_op_info.type)
obs = qconfig.weight() if idx == weight_arg_idx else \
qconfig.activation()
self.tensor_id_to_observer[str(tensor_id)] = obs
def _maybe_insert_output_observers(
self,
seen_op_info: SeenOpInfo,
root_module: torch.nn.Module,
):
func_output_obs_type = get_func_output_obs_type(seen_op_info)
output_tensor_id = seen_op_info.output_tensor_infos[0].id
if func_output_obs_type == FuncOutputObsType.NEW_OBS:
# TODO(future PR): check qconfig is None
qconfig = get_cur_qconfig(
self.qconfig_dict, seen_op_info.fqn, seen_op_info.type)
assert qconfig is not None
self.tensor_id_to_observer[str(output_tensor_id)] = \
qconfig.activation()
elif func_output_obs_type == FuncOutputObsType.REUSES_FIRST_INPUT_OBS:
assert seen_op_info.input_tensor_infos[0] is not None
first_input_tensor_id = seen_op_info.input_tensor_infos[0].id
first_input_obs = None
if str(first_input_tensor_id) in self.tensor_id_to_observer:
first_input_obs = \
self.tensor_id_to_observer[str(first_input_tensor_id)]
else:
# This observer may be in a module (handled by eager
# convert), in which case it's not in our map. For now,
# copy it from the module. In the future, we could look
# into having a soft link.
# TODO: make this handle more cases
# TODO: handle module -> add_scalar -> add_scalar
prev_op = get_producer_of_seen_op_info(
self.idx_to_seen_op_infos, seen_op_info)
assert prev_op is not None
# TODO: the following line needs to only check fqn
# for modules, not for functions
fqn_last_part = prev_op.fqn.split('.')[-1]
if hasattr(root_module, fqn_last_part):
first_input_mod = getattr(root_module, fqn_last_part)
else:
first_input_mod = None
# Currently, both tracing for module fusion and tracing for
# quantization go through this code path. When tracing
# for module fusion, quantizeable modules do not have
# observers yet. For this path to not crash, we create one.
# When tracing for quantization, this will be ignored.
# TODO(future PR): refactor to avoid this.
if first_input_mod and hasattr(first_input_mod, 'activation_post_process'):
first_input_obs = first_input_mod.activation_post_process
else:
# TODO(future PR): check qconfig is None
qconfig = get_cur_qconfig(
self.qconfig_dict, seen_op_info.fqn, seen_op_info.type)
assert qconfig is not None
first_input_obs = qconfig.activation()
self.tensor_id_to_observer[str(output_tensor_id)] = first_input_obs
def insert_observers(self, root_module: torch.nn.Module):
for idx, seen_op_info in self.idx_to_seen_op_infos.items():
self._maybe_insert_input_observers(seen_op_info)
self._maybe_insert_output_observers(seen_op_info, root_module)
# This is a hack to enable nn.Sequential to properly work with
# this class.
# TODO(future): remove the hack
def forward(self, x):
raise NotImplementedError('Calling AutoQuantizationState.forward is not supported')
# return x
def add_seen_op_type_without_op_hooks(self, op_type: Callable) -> None:
self.seen_op_types_without_op_hooks.add(op_type)
| 41.562712
| 114
| 0.61504
|
9383ad114de2ab5dbd37c2b50ec6ddd12beee799
| 6,176
|
py
|
Python
|
main.py
|
MatMark/ZIwM
|
20175c2783330d8c0bb2dd013697eded27d784f7
|
[
"MIT"
] | null | null | null |
main.py
|
MatMark/ZIwM
|
20175c2783330d8c0bb2dd013697eded27d784f7
|
[
"MIT"
] | null | null | null |
main.py
|
MatMark/ZIwM
|
20175c2783330d8c0bb2dd013697eded27d784f7
|
[
"MIT"
] | null | null | null |
import pandas as pd
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import sklearn.feature_selection as fs
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
from sklearn.base import clone
from scipy.stats import ttest_ind
from tabulate import tabulate
clfs = {
'256layers_momentum': MLPClassifier(hidden_layer_sizes=(256,),
max_iter=5000, nesterovs_momentum=True,
solver='sgd', random_state=1,
momentum=0.9),
'512layers_momentum': MLPClassifier(hidden_layer_sizes=(512,),
max_iter=5000, nesterovs_momentum=True,
solver='sgd', random_state=1,
momentum=0.9),
'1024layers_momentum': MLPClassifier(hidden_layer_sizes=(1024,),
max_iter=5000, nesterovs_momentum=True,
solver='sgd', random_state=1,
momentum=0.9),
'256layers_without': MLPClassifier(hidden_layer_sizes=(256,),
max_iter=5000, solver='sgd', momentum=0,
random_state=1),
'512layers_without': MLPClassifier(hidden_layer_sizes=(512,),
max_iter=5000, solver='sgd', momentum=0,
random_state=1),
'1024layers_without': MLPClassifier(hidden_layer_sizes=(1024,),
max_iter=5000, solver='sgd', momentum=0,
random_state=1),
}
def main():
x, y = load_data()
_, scores = feature_selection(x, y)
if len(sys.argv) > 1:
max_features = int(sys.argv[1])
else:
max_features = 31
if (max_features > 31 or max_features < 1):
raise ValueError("Must check for at least one feature and max 31")
train_evaluate(x, y, max_features)
ttest()
def load_data():
file = 'data.csv'
df = pd.read_csv(file, header=None)
df = df.to_numpy()
x = df[:, 0:31] # features columns;
y = df[:, 31] # class column;
return x, y.astype(int)
def feature_selection(x, y, k=31):
selector = fs.SelectKBest(score_func=fs.chi2, k=k)
fit = selector.fit(x, y)
fit_x = selector.transform(x)
scores = []
for j in range(len(fit.scores_)):
scores.append([j+1, fit.scores_[j]])
scores.sort(key=lambda x: x[1], reverse=True)
return fit_x, scores
def train_evaluate(x, y, max_features=31):
mean_scores = np.empty((max_features, (len(clfs))))
for i in range(1, max_features + 1):
print(str(i) + " features")
fit_x, _ = feature_selection(x, y, i)
kfold = RepeatedStratifiedKFold(
n_splits=2, n_repeats=5, random_state=1)
scores = np.zeros((len(clfs), 2*5))
for fold_id, (train, test) in enumerate(kfold.split(fit_x, y)):
for clf_id, clf_name in enumerate(clfs):
clf = clone(clfs[clf_name])
clf.fit(fit_x[train], y[train])
prediction = clf.predict(fit_x[test])
scores[clf_id, fold_id] = accuracy_score(y[test], prediction)
mean_score = np.mean(scores, axis=1)
np.save('results/results_' + str(i), scores)
# only for ploting
for idx, score in np.ndenumerate(mean_score):
mean_scores[i-1][idx[0]] = score
print(str(int((i/max_features)*100)) + "%" + " completed")
for clf_id, clf_name in enumerate(clfs):
x_axis_values = []
for j in range(0, max_features):
x_axis_values.append(mean_scores[j][clf_id])
features = list(range(1, max_features + 1))
plt.plot(features, x_axis_values, label=clf_name,
linewidth=1, marker='o', markersize=5)
plt.xlabel('Feature Count')
plt.ylabel('Mean Score')
plt.xlim([0, max_features + 1])
plt.ylim([0, 1])
plt.gca().xaxis.set_major_locator(ticker.MultipleLocator(1))
plt.grid(True)
plt.grid(which='both')
plt.grid(which='minor', alpha=0.2)
plt.grid(which='major', alpha=0.5)
plt.legend()
plt.savefig("W_" + str(i) + ".png", dpi=600)
plt.clf()
return mean_scores
def ttest():
scores = np.load('results/results_23.npy') # have best results
t_statistic = np.zeros((len(clfs), len(clfs)))
p_value = np.zeros((len(clfs), len(clfs)))
alfa = .05
for i in range(len(clfs)):
for j in range(len(clfs)):
t_statistic[i, j], p_value[i, j] = ttest_ind(
scores[i], scores[j])
headers = []
names_column = np.empty(((len(clfs), 1)), dtype='object')
for clf_id, clf_name in enumerate(clfs):
headers.append(clf_name)
names_column[clf_id][0] = clf_name
t_statistic_table = np.concatenate((names_column, t_statistic), axis=1)
t_statistic_table = tabulate(t_statistic_table, headers, floatfmt=".2f")
p_value_table = np.concatenate((names_column, p_value), axis=1)
p_value_table = tabulate(p_value_table, headers, floatfmt=".2f")
print("t-statistic:\n", t_statistic_table, "\n\np-value:\n", p_value_table)
advantage = np.zeros((len(clfs), len(clfs)))
advantage[t_statistic > 0] = 1
advantage_table = tabulate(np.concatenate(
(names_column, advantage), axis=1), headers)
print("Advantage:\n", advantage_table)
significance = np.zeros((len(clfs), len(clfs)))
significance[p_value <= alfa] = 1
significance_table = tabulate(np.concatenate(
(names_column, significance), axis=1), headers)
print("Statistical significance (alpha = 0.05):\n", significance_table)
stat_better = significance * advantage
stat_better_table = tabulate(np.concatenate(
(names_column, stat_better), axis=1), headers)
print("Statistically significantly better:\n", stat_better_table)
if __name__ == "__main__":
main()
| 38.842767
| 80
| 0.597474
|
8aceffd08848e490cc2973bec5aaf3313d232721
| 58
|
py
|
Python
|
pwdgen/__init__.py
|
ghadd/password_learner
|
310e5f37661c52d1ba265f2b68d49932c0a299c2
|
[
"MIT"
] | null | null | null |
pwdgen/__init__.py
|
ghadd/password_learner
|
310e5f37661c52d1ba265f2b68d49932c0a299c2
|
[
"MIT"
] | null | null | null |
pwdgen/__init__.py
|
ghadd/password_learner
|
310e5f37661c52d1ba265f2b68d49932c0a299c2
|
[
"MIT"
] | null | null | null |
from .password_generator import *
from .password import *
| 19.333333
| 33
| 0.793103
|
2729de48389ba3b535a49a248bfe53dae0ef59d1
| 882
|
py
|
Python
|
mmcls/datasets/tct.py
|
zhaoyang97/mmclassification
|
22362e4a351eea028a7fba081b274f435dbc872a
|
[
"Apache-2.0"
] | null | null | null |
mmcls/datasets/tct.py
|
zhaoyang97/mmclassification
|
22362e4a351eea028a7fba081b274f435dbc872a
|
[
"Apache-2.0"
] | null | null | null |
mmcls/datasets/tct.py
|
zhaoyang97/mmclassification
|
22362e4a351eea028a7fba081b274f435dbc872a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Author : zhaoyang
import numpy as np
import mmcv
from .builder import DATASETS
from .base_dataset import BaseDataset
@DATASETS.register_module()
class TCT(BaseDataset):
# CLASSES = ["normal", "ascus", "asch", "lsil", "hsil_scc_omn", "agc_adenocarcinoma_em",
# "vaginalis", "monilia", "dysbacteriosis_herpes_act", "ec"]
def load_annotations(self):
assert isinstance(self.ann_file, str)
data_infos = []
with open(self.ann_file) as f:
samples = [x.strip().split(' ') for x in f.readlines()]
for filename, gt_label in samples:
info = {'img_prefix': self.data_prefix}
info['img_info'] = {'filename': filename}
info['gt_label'] = np.array(gt_label, dtype=np.int64)
data_infos.append(info)
return data_infos
| 31.5
| 92
| 0.603175
|
912278bad9db9576bad95d704bf5891c5749c3af
| 1,393
|
py
|
Python
|
code/python/IRNConfiguration/v1/fds/sdk/IRNConfiguration/apis/__init__.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 6
|
2022-02-07T16:34:18.000Z
|
2022-03-30T08:04:57.000Z
|
code/python/IRNConfiguration/v1/fds/sdk/IRNConfiguration/apis/__init__.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 2
|
2022-02-07T05:25:57.000Z
|
2022-03-07T14:18:04.000Z
|
code/python/IRNConfiguration/v1/fds/sdk/IRNConfiguration/apis/__init__.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | null | null | null |
# flake8: noqa
# Import all APIs into this package.
# If you have many APIs here with many many models used in each API this may
# raise a `RecursionError`.
# In order to avoid this, import only the API that you directly need like:
#
# from .api.contacts___custom_fields_api import ContactsCustomFieldsApi
#
# or import this package, but before doing it, use:
#
# import sys
# sys.setrecursionlimit(n)
# Import APIs into API package:
from fds.sdk.IRNConfiguration.api.contacts___custom_fields_api import ContactsCustomFieldsApi
from fds.sdk.IRNConfiguration.api.contacts___phone_number_types_api import ContactsPhoneNumberTypesApi
from fds.sdk.IRNConfiguration.api.contacts___relationship_category_api import ContactsRelationshipCategoryApi
from fds.sdk.IRNConfiguration.api.contacts___relationships_api import ContactsRelationshipsApi
from fds.sdk.IRNConfiguration.api.contacts___roles_api import ContactsRolesApi
from fds.sdk.IRNConfiguration.api.contacts___types_api import ContactsTypesApi
from fds.sdk.IRNConfiguration.api.custom_symbols___custom_fields_api import CustomSymbolsCustomFieldsApi
from fds.sdk.IRNConfiguration.api.custom_symbols___relationships_api import CustomSymbolsRelationshipsApi
from fds.sdk.IRNConfiguration.api.custom_symbols___types_api import CustomSymbolsTypesApi
from fds.sdk.IRNConfiguration.api.notes___configuration_api import NotesConfigurationApi
| 51.592593
| 109
| 0.86145
|
3b63fbfe8cdf1f2eede1c86e62e40c068ff507a9
| 574
|
py
|
Python
|
proxySTAR_V3/certbot/venv.1509389747.bak/lib/python2.7/site-packages/pylint/test/input/func_noerror_static_method.py
|
mami-project/lurk
|
98c293251e9b1e9c9a4b02789486c5ddaf46ba3c
|
[
"Apache-2.0"
] | 2
|
2017-07-05T09:57:33.000Z
|
2017-11-14T23:05:53.000Z
|
Libraries/Python/pylint/v1.4.4/pylint/test/input/func_noerror_static_method.py
|
davidbrownell/Common_Environment
|
4015872aeac8d5da30a6aa7940e1035a6aa6a75d
|
[
"BSL-1.0"
] | 1
|
2019-01-17T14:26:22.000Z
|
2019-01-17T22:56:26.000Z
|
Libraries/Python/pylint/v1.4.4/pylint/test/input/func_noerror_static_method.py
|
davidbrownell/Common_Environment
|
4015872aeac8d5da30a6aa7940e1035a6aa6a75d
|
[
"BSL-1.0"
] | 1
|
2017-08-31T14:33:03.000Z
|
2017-08-31T14:33:03.000Z
|
"""Checks if static / class methods works fine in Pylint
"""
__revision__ = ''
class MyClass(object):
"""doc
"""
def __init__(self):
pass
def static_met(var1, var2):
"""This is a static method
"""
print(var1, var2)
def class_met(cls, var1):
"""This is a class method
"""
print(cls, var1)
static_met = staticmethod(static_met)
class_met = classmethod(class_met)
if __name__ == '__main__':
MyClass.static_met("var1", "var2")
MyClass.class_met("var1")
| 20.5
| 57
| 0.559233
|
f03fb9e60721e25afec7386ca40d237fdce7842c
| 15,593
|
py
|
Python
|
com/vmware/appliance/logging_client.py
|
vishal-12/vsphere-automation-sdk-python
|
9cf363971db77ea5a12928eecd5cf5170a7fcd8a
|
[
"MIT"
] | null | null | null |
com/vmware/appliance/logging_client.py
|
vishal-12/vsphere-automation-sdk-python
|
9cf363971db77ea5a12928eecd5cf5170a7fcd8a
|
[
"MIT"
] | null | null | null |
com/vmware/appliance/logging_client.py
|
vishal-12/vsphere-automation-sdk-python
|
9cf363971db77ea5a12928eecd5cf5170a7fcd8a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2019 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.appliance.logging.
#---------------------------------------------------------------------------
"""
The ``com.vmware.appliance.logging_client`` module provides classes for
managing log forwarding in the appliance. The module is available starting in
vSphere 6.5.
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class Forwarding(VapiInterface):
"""
The ``Forwarding`` class provides methods to manage forwarding of log
messages to remote logging servers. This class was added in vSphere API
6.7.
"""
_VAPI_SERVICE_ID = 'com.vmware.appliance.logging.forwarding'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ForwardingStub)
class Protocol(Enum):
"""
The ``Forwarding.Protocol`` class defines transport protocols for outbound
log messages. This enumeration was added in vSphere API 6.7.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
TLS = None
"""
Log messages will be forwarded to the remote host by using the TLS
protocol. This class attribute was added in vSphere API 6.7.
"""
UDP = None
"""
Log messages will be forwarded to the remote host using the UDP protocol.
This class attribute was added in vSphere API 6.7.
"""
TCP = None
"""
Log messages will be forwarded to the remote host using the TCP protocol.
This class attribute was added in vSphere API 6.7.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`Protocol` instance.
"""
Enum.__init__(string)
Protocol._set_values([
Protocol('TLS'),
Protocol('UDP'),
Protocol('TCP'),
])
Protocol._set_binding_type(type.EnumType(
'com.vmware.appliance.logging.forwarding.protocol',
Protocol))
class Config(VapiStruct):
"""
The ``Forwarding.Config`` class defines the configuration for log message
forwarding to remote logging servers. This class was added in vSphere API
6.7.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
hostname=None,
port=None,
protocol=None,
):
"""
:type hostname: :class:`str`
:param hostname: FQDN or IP address of the logging server to which messages are
forwarded. This attribute was added in vSphere API 6.7.
:type port: :class:`long`
:param port: The port on which the remote logging server is listening for
forwarded log messages. This attribute was added in vSphere API
6.7.
:type protocol: :class:`Forwarding.Protocol`
:param protocol: Transport protocol used to forward log messages. This attribute was
added in vSphere API 6.7.
"""
self.hostname = hostname
self.port = port
self.protocol = protocol
VapiStruct.__init__(self)
Config._set_binding_type(type.StructType(
'com.vmware.appliance.logging.forwarding.config', {
'hostname': type.StringType(),
'port': type.IntegerType(),
'protocol': type.ReferenceType(__name__, 'Forwarding.Protocol'),
},
Config,
False,
None))
class ConnectionStatus(VapiStruct):
"""
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
_validator_list = [
UnionValidator(
'state',
{
'DOWN' : [('message', False)],
'UP' : [],
'UNKNOWN' : [],
}
),
]
def __init__(self,
hostname=None,
state=None,
message=None,
):
"""
:type hostname: :class:`str`
:param hostname: FQDN or IP address of the configured remote logging servers. This
attribute was added in vSphere API 6.7.
:type state: :class:`Forwarding.ConnectionStatus.State`
:param state: State of the configured remote logging server. This attribute was
added in vSphere API 6.7.
:type message: :class:`com.vmware.vapi.std_client.LocalizableMessage` or ``None``
:param message: Message associated with the state of the configured remote logging
server. This attribute was added in vSphere API 6.7.
If None, there is no message to be shown.
"""
self.hostname = hostname
self.state = state
self.message = message
VapiStruct.__init__(self)
class State(Enum):
"""
The ``Forwarding.ConnectionStatus.State`` class defines the state values
that a remote logging server can be in. This enumeration was added in
vSphere API 6.7.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
UP = None
"""
The remote logging server is reachable. This class attribute was added in
vSphere API 6.7.
"""
DOWN = None
"""
The remote logging server is not reachable. This class attribute was added
in vSphere API 6.7.
"""
UNKNOWN = None
"""
The status of remote logging server is unknown. This class attribute was
added in vSphere API 6.7.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`State` instance.
"""
Enum.__init__(string)
State._set_values([
State('UP'),
State('DOWN'),
State('UNKNOWN'),
])
State._set_binding_type(type.EnumType(
'com.vmware.appliance.logging.forwarding.connection_status.state',
State))
ConnectionStatus._set_binding_type(type.StructType(
'com.vmware.appliance.logging.forwarding.connection_status', {
'hostname': type.StringType(),
'state': type.ReferenceType(__name__, 'Forwarding.ConnectionStatus.State'),
'message': type.OptionalType(type.ReferenceType('com.vmware.vapi.std_client', 'LocalizableMessage')),
},
ConnectionStatus,
False,
None))
def test(self,
send_test_message=None,
):
"""
Validates the current log forwarding configuration by checking the
liveness of the remote machine and optionally sending a test diagnostic
log message from the appliance to all configured logging servers to
allow manual end-to-end validation. The message that is sent is: "This
is a diagnostic log test message from vCenter Server.". This method was
added in vSphere API 6.7.
:type send_test_message: :class:`bool` or ``None``
:param send_test_message: Flag specifying whether a default test message should be sent to
the configured logging servers.
If None, no test message will be sent to the configured remote
logging servers.
:rtype: :class:`list` of :class:`Forwarding.ConnectionStatus`
:return: Information about the status of the connection to each of the
remote logging servers.
"""
return self._invoke('test',
{
'send_test_message': send_test_message,
})
def set(self,
cfg_list,
):
"""
Sets the configuration for forwarding log messages to remote log
servers. This method was added in vSphere API 6.7.
:type cfg_list: :class:`list` of :class:`Forwarding.Config`
:param cfg_list: The cfgList is a list of Config structure that contains the log
message forwarding rules in terms of the host, port, protocol of
the log message.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
if an invalid configuration is provided.
:raise: :class:`com.vmware.vapi.std.errors_client.UnableToAllocateResource`
if the number of configurations exceeds the maximum number of
supported configurations.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if there is any internal error during the execution of the
operation.
"""
return self._invoke('set',
{
'cfg_list': cfg_list,
})
def get(self):
"""
Returns the configuration for forwarding log messages to remote logging
servers. This method was added in vSphere API 6.7.
:rtype: :class:`list` of :class:`Forwarding.Config`
:return: Information about the configuration for forwarding log messages to
remote logging servers.
"""
return self._invoke('get', None)
class _ForwardingStub(ApiInterfaceStub):
def __init__(self, config):
# properties for test operation
test_input_type = type.StructType('operation-input', {
'send_test_message': type.OptionalType(type.BooleanType()),
})
test_error_dict = {}
test_input_value_validator_list = [
]
test_output_validator_list = [
]
test_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/appliance/logging/forwarding?action=test',
path_variables={
},
query_parameters={
}
)
# properties for set operation
set_input_type = type.StructType('operation-input', {
'cfg_list': type.ListType(type.ReferenceType(__name__, 'Forwarding.Config')),
})
set_error_dict = {
'com.vmware.vapi.std.errors.invalid_argument':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidArgument'),
'com.vmware.vapi.std.errors.unable_to_allocate_resource':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'UnableToAllocateResource'),
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
}
set_input_value_validator_list = [
]
set_output_validator_list = [
]
set_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/appliance/logging/forwarding',
path_variables={
},
query_parameters={
}
)
# properties for get operation
get_input_type = type.StructType('operation-input', {})
get_error_dict = {}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/appliance/logging/forwarding',
path_variables={
},
query_parameters={
}
)
operations = {
'test': {
'input_type': test_input_type,
'output_type': type.ListType(type.ReferenceType(__name__, 'Forwarding.ConnectionStatus')),
'errors': test_error_dict,
'input_value_validator_list': test_input_value_validator_list,
'output_validator_list': test_output_validator_list,
'task_type': TaskType.NONE,
},
'set': {
'input_type': set_input_type,
'output_type': type.VoidType(),
'errors': set_error_dict,
'input_value_validator_list': set_input_value_validator_list,
'output_validator_list': set_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ListType(type.ReferenceType(__name__, 'Forwarding.Config')),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'test': test_rest_metadata,
'set': set_rest_metadata,
'get': get_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.appliance.logging.forwarding',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class StubFactory(StubFactoryBase):
_attrs = {
'Forwarding': Forwarding,
}
| 36.775943
| 113
| 0.584365
|
8ccdaa962bff088803bd14a043878abbc3f078fe
| 391
|
py
|
Python
|
fun_app/asgi.py
|
Rajarshi07/fun_app
|
2a5c9fca2eb9d16b9739605867a346c635445a53
|
[
"MIT"
] | 2
|
2021-09-06T02:45:59.000Z
|
2021-11-28T16:59:46.000Z
|
fun_app/asgi.py
|
Rajarshi07/fun_app
|
2a5c9fca2eb9d16b9739605867a346c635445a53
|
[
"MIT"
] | 1
|
2020-08-05T06:05:52.000Z
|
2020-08-05T06:05:52.000Z
|
fun_app/asgi.py
|
Rajarshi07/fun_app
|
2a5c9fca2eb9d16b9739605867a346c635445a53
|
[
"MIT"
] | 4
|
2020-08-04T00:00:59.000Z
|
2020-08-07T02:02:35.000Z
|
"""
ASGI config for fun_app project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fun_app.settings')
application = get_asgi_application()
| 23
| 78
| 0.785166
|
281a34fb60d609e94a6123b511a4e5b6fecb1282
| 667
|
py
|
Python
|
edx/lecture-12/multiple_inheritance.py
|
spradeepv/dive-into-python
|
ec27d4686b7b007d21f9ba4f85d042be31ee2639
|
[
"MIT"
] | null | null | null |
edx/lecture-12/multiple_inheritance.py
|
spradeepv/dive-into-python
|
ec27d4686b7b007d21f9ba4f85d042be31ee2639
|
[
"MIT"
] | null | null | null |
edx/lecture-12/multiple_inheritance.py
|
spradeepv/dive-into-python
|
ec27d4686b7b007d21f9ba4f85d042be31ee2639
|
[
"MIT"
] | null | null | null |
class A(object):
def __init__(self):
self.a = 1
def x(self):
print "A.x"
def y(self):
print "A.y"
def z(self):
print "A.z"
class B(A):
def __init__(self):
A.__init__(self)
self.a = 2
self.b = 3
def y(self):
print "B.y"
def z(self):
print "B.z"
class C(object):
def __init__(self):
self.a = 4
self.c = 5
def y(self):
print "C.y"
def z(self):
print "C.z"
class D(C, B):
def __init__(self):
C.__init__(self)
B.__init__(self)
self.d = 6
def z(self):
print "D.z"
obj = D()
print obj.a
| 17.102564
| 24
| 0.46027
|
e2ab518748861d3f552e19c1260cc2b07e72d80f
| 179,203
|
py
|
Python
|
src/pyGCluster.py
|
StSchulze/pyGCluster
|
92bb6855faa9e40582326cabfb0323eace72d4b3
|
[
"MIT"
] | null | null | null |
src/pyGCluster.py
|
StSchulze/pyGCluster
|
92bb6855faa9e40582326cabfb0323eace72d4b3
|
[
"MIT"
] | null | null | null |
src/pyGCluster.py
|
StSchulze/pyGCluster
|
92bb6855faa9e40582326cabfb0323eace72d4b3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2.7
"""
pyGCluster is a clustering algorithm focusing on noise injection for subsequent cluster validation.
By requesting identical cluster identity, the reproducibility of a large amount of clusters
obtained with agglomerative hierarchical clustering (AHC) is assessed.
Furthermore, a multitude of different distance-linkage combinations (DLCs) are evaluated.
Finally, associations of highly reproducible clusters, called communities, are created.
Graphical representation of the results as node maps and expression maps is implemented.
The pyGCluster module contains the main class :py:class:`pyGCluster.Cluster` and some functions
| :py:func:`pyGCluster.create_default_alphabet`
| :py:func:`pyGCluster.resampling_multiprocess`
| :py:func:`pyGCluster.seekAndDestry`
| :py:func:`pyGCluster.yield_noisejected_dataset`
"""
#
# pyGCluster
#
# Copyright (C) D. Jaeger and C. Fufezan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
import sys, os
from collections import defaultdict as ddict
from collections import OrderedDict
import math
import time
import random
import subprocess
import string
import codecs
import bisect
import multiprocessing
import itertools
if sys.version_info[0] == 3:
import pickle
def str(x, errors=None):
return x
input = input
else: # 2k, explicitely import cPickle
import pickle as pickle
input = raw_input
def yield_noisejected_dataset(data, iterations):
'''
Generator yielding a re-sampled dataset with each iteration.
A re-sampled dataset is created by re-sampling each data point
from the normal distribution given by its associated mean and standard deviation value.
See the example in Supplementary Material in pyGCluster's publication for how to define an own noise-function (e.g. uniform noise).
:param data: dictionary ( OrderedDict! ) holding the data to be re-sampled.
:type data: collections.OrderedDict()
:param iterations: the number of re-sampled datasets this generator will yield.
:type iterations: int
:rtype: none
'''
import numpy
# the check that no condition is missing in arg: data is made prior, in Cluster.__init__()
# this is required, because only equally shaped arrays can be clustered!
# otherwise, 'ValueError: setting an array element with a sequence.'
Random = numpy.random.RandomState() # get instance for new seed!
n_conditions = len( data[ sorted( data.keys() )[ 0 ] ] )
simulated_dataset = numpy.zeros( ( len( data ), n_conditions ) )
for i in range( iterations ):
for row_index, identifier in enumerate( data ):
for col_index, (condition, data_tuple) in enumerate( data[ identifier ].items() ):
mean, sd = data_tuple
new_ratio = Random.normal( mean, sd )
simulated_dataset[ row_index ][ col_index ] = new_ratio
yield simulated_dataset
return
def create_default_alphabet():
'''
Returns the default alphabet which is used to save clusters in a lesser memory-intense form:
instead of saving e.g. a cluster containing identifiers with indices of 1,20,30 as "1,20,30", the indices are converted to a baseX system -> "1,k,u".
The default alphabet that is returned is:
>>> string.printable.replace( ',', '' )
:rtype: string
'''
return string.printable.replace( ',', '' )
def seekAndDestry(processes):
'''
Any multiprocesses given by processes are terminated.
:param processes: list containing multiprocess.Process()
:type processes: list
:rtype: none
'''
for p in processes:
if p.is_alive():
p.terminate()
return
def resampling_multiprocess(
DataQ = None,
data = None,
iterations = 5000,
alphabet = None,
dlc = None,
min_cluster_size = 4,
min_cluster_freq_2_retain = 0.001,
function_2_generate_noise_injected_datasets = None
):
'''
This is the function that is called for each multiprocesses that is evoked internally in pyGCluster during the re-sampling routine.
Agglomerative hierarchical clustering is performed for each distance-linkage combination (DLC) on each of iteration datasets.
Clusters from each hierarchical tree are extracted, and their counts are saved in a temporary cluster-count matrix.
After *iterations* iterations, clusters are filtered according to min_cluster_freq_2_retain.
These clusters, together with their respective counts among all DLCs, are returned.
The return value is a list containing tuples with two elements: cluster (string) and counts ( one dimensional np.array )
:param DataQ: data queue which is used to pipe the re-sampling results back to pyGCluster.
:type DataQ: multiprocessing.Queue()
:param data: dictionary ( OrderedDict! ) holding the data to be clustered -> passed through to the noise-function.
:type data: collections.OrderedDict()
:param iterations: the number of iterations this multiprocess is going to perform.
:type iterations: int
:param alphabet: in order to save memory, the indices describing a cluster are converted to a specific alphabet (rather than decimal system).
:type alphabet: string
:param dlc: list of the distance-linkage combinations that are going to be evaluated.
:type dlc: list
:param min_cluster_size: minimum size of a cluster to be considered in the re-sampling routine (smaller clusters are discarded)
:type min_cluster_size: int
:param min_cluster_freq_2_retain: once all iterations are performed, clusters are filtered according to 50% (because typically forwarded from pyGCluster) of this threshold.
:type min_cluster_freq_2_retain: float
:param function_2_generate_noise_injected_datasets: function to generate re-sampled datasets.
:type function_2_generate_noise_injected_datasets: function
:rtype: list
'''
import numpy
import scipy.spatial.distance as ssd
imported_from_scipy = False
try:
from fastcluster import linkage as ahc
except ImportError:
try:
from scipy.cluster.hierarchy import linkage as ahc
imported_from_scipy = True
except ImportError:
print('You do require either "fastcluster" or "scipy"!')
if DataQ is None or data is None:
print( '[ ERROR ] need a Data-Queune and a data object! Returning ...' )
return
if alphabet is None:
alphabet = create_default_alphabet()
assert ',' not in alphabet, '[ ERROR ] the alphabet must not contain a comma (",")!'
if dlc is None:
dlc = [ 'euclidean-average' ] # NOTE maybe better have all as default ! :)
if function_2_generate_noise_injected_datasets is None:
function_2_generate_noise_injected_datasets = yield_noisejected_dataset
n_objects = len( data.keys() )
n_dlc = len( dlc )
metrices = set( [ combo.split( '-' )[ 0 ] for combo in dlc ] )
# build lookup-dict to convert index into baseX system, given by alphabet
baseX = len( alphabet )
index2baseX = { 0 : '0' }
for index in range( 1, n_objects ):
old_index = index
digits = [] # modified ref: http://stackoverflow.com/questions/2267362/convert-integer-to-a-string-in-a-given-numeric-base-in-python
while index:
digits.append( alphabet[ index % baseX ] )
index = int( round( index / baseX ) )
digits.reverse()
converted_index = ''.join( digits )
index2baseX[ old_index ] = converted_index
# build initial template of 'clusters'-dict (which is needed to extract clusters from the hierarchical tree)
clusters_template = { ID : [ index2baseX[ ID ] ] for ID in range( n_objects ) }
# initialize temporary cluster-count matrix and the other necessary objects to fill it
tmpstruct_clustercount_monitor = {}
tmpstruct_clustercount_monitor[ 'Cluster counts' ] = numpy.zeros( ( 10 ** 6, n_dlc ), dtype = numpy.uint32 )
tmpstruct_clustercount_monitor[ 'Cluster 2 clusterID' ] = {}
tmpstruct_clustercount_monitor[ 'Distance-linkage combinations' ] = dlc
tmpstruct_clustercount_monitor[ 'Cluster sieve' ] = set()
tmpstruct_clustercount_monitor[ 'Discarded IDs' ] = set()
# get simulated datasets
for simulated_dataset in function_2_generate_noise_injected_datasets( data, iterations ):
# calculate distance matrices:
metric2condenseddist = {}
if not imported_from_scipy:
for metric in metrices:
metric2condenseddist[ metric ] = ssd.pdist( simulated_dataset, metric = metric )
# perform AHC:
for dlc_index, combo in enumerate( dlc ):
metric, linkage = combo.split( '-' )
'''
linkage matrix example:
original data:
[[1,2,3],
[3,2,1],
[1,3,5]]
Linkage matrix representing AHC with euclidean distance and ward linkage:
[[ 0. , 2. , 2.23606798, 2. ], CLUSTER ID 3
[ 1. , 3. , 4.2031734 , 3. ]] CLUSTER ID 4
^ child1 ^ child2 ^ distance ^ cluster size
Hence, element 0 and 2 were merged into cluster with ID = 3 (size = 2),
then element 1 and cluster 3 are merged into the root cluster with ID = 4 (size = 3).
'''
# perform AHC
if imported_from_scipy:
linkage_matrix = ahc( simulated_dataset, method = linkage, metric = metric )
else:
linkage_matrix = ahc( metric2condenseddist[ metric ], method = linkage, preserve_input = True )
# reconstruct clusters from the linkage matrix
clusters = {} # key = clusterID, value = cluster-indices
clusters.update( clusters_template )
clusterID_linkagematrix = n_objects - 1
for childID_1, childID_2, dist, size in linkage_matrix:
clusterID_linkagematrix += 1
cluster_linkagematrix = sorted( clusters[ childID_1 ] + clusters[ childID_2 ] )
clusters[ clusterID_linkagematrix ] = cluster_linkagematrix
if len( cluster_linkagematrix ) < min_cluster_size:
continue
cluster = ','.join( cluster_linkagematrix )
# insert cluster into tmpstruct_clustercount_monitor and update it:
# but add only if its count > 1 (determined via the 'Cluster sieve'):
add = False
if cluster in tmpstruct_clustercount_monitor[ 'Cluster 2 clusterID' ]:
clusterID = tmpstruct_clustercount_monitor[ 'Cluster 2 clusterID' ][ cluster ]
add = True
else:
if cluster in tmpstruct_clustercount_monitor[ 'Cluster sieve' ]:
if tmpstruct_clustercount_monitor[ 'Discarded IDs' ]:
try:
clusterID = tmpstruct_clustercount_monitor[ 'Discarded IDs' ].pop()
except KeyError: # KeyError: 'pop from an empty set' = set is empty
clusterID = len( tmpstruct_clustercount_monitor[ 'Cluster 2 clusterID' ] )
else:
clusterID = len( tmpstruct_clustercount_monitor[ 'Cluster 2 clusterID' ] )
tmpstruct_clustercount_monitor[ 'Cluster 2 clusterID' ][ cluster ] = clusterID
add = True
else:
tmpstruct_clustercount_monitor[ 'Cluster sieve' ].add( cluster )
add = False
if add:
# increase count by 1
# if new cluster, add 10 ** 5 new rows
try:
tmpstruct_clustercount_monitor[ 'Cluster counts' ][ clusterID ][ dlc_index ] += 1
except IndexError:
tmpstruct_clustercount_monitor[ 'Cluster counts' ] = numpy.concatenate(
( tmpstruct_clustercount_monitor['Cluster counts'],
numpy.zeros( ( 10 ** 5, n_dlc ), dtype = numpy.uint32 )
)
)
tmpstruct_clustercount_monitor[ 'Cluster counts' ][ clusterID ][ dlc_index ] += 1 # increase count by 1
del clusters
del metric2condenseddist
del simulated_dataset
# only transfer clusters equal or above 50% of 'min_cluster_freq_2_retain' threshold to pyGCluster:
min_count = int( min_cluster_freq_2_retain * iterations * 0.5 )
clusterIDs2retain = set( numpy.nonzero( tmpstruct_clustercount_monitor[ 'Cluster counts' ] >= min_count )[ 0 ] )
cluster_counts_list = []
for cluster, clusterID in tmpstruct_clustercount_monitor[ 'Cluster 2 clusterID' ].items():
if clusterID in clusterIDs2retain:
counts = tmpstruct_clustercount_monitor[ 'Cluster counts' ][ clusterID ]
cluster_counts_list.append( (cluster, counts) )
del tmpstruct_clustercount_monitor
DataQ.put( cluster_counts_list )
del cluster_counts_list
return
class Cluster(dict):
'''
The pyGCluster class
:param working_directory: directory in which all results are written (requires write-permission!).
:type working_directory: string
:param verbosity_level: either 0, 1 or 2.
:type verbosity_level: int
:param data: Dictionary containing the data which is to be clustered.
:type data: dict
In order to work with the default noise-injection function as well as plot
expression maps correctly, the data-dict **has** to have the following
structure.
Example:
>>> data = {
... Identifier1 : {
... condition1 : ( mean11, sd11 ),
... condition2 : ( mean12, sd12 ),
... condition3 : ( mean13, sd13 ),
... },
... Identifier2 : {
... condition2 : ( mean22, sd22 ),
... condition3 : ( mean23, sd23 ),
... condition3 : ( mean13, sd13 ),
... },
... }
>>> import pyGCluster
>>> ClusterClass = pyGCluster.Cluster(data=data, verbosity_level=1, working_directory=...)
.. note ::
If any condition for an identifier in the "nested_data_dict"-dict is missing,
this entry is discarded, i.e. not imported into the Cluster Class.
This is because pyGCluster does not implement any missing value estimation.
One possible solution is to replace missing values by a mean value and a standard
deviation that is representative for the complete data range in the given condition.
pyGCluster inherits from the regular Python Dictionary object.
Hence, the attributes of pyGCluster can be accessed as Python Dictionary keys.
A selection of the most important attributes / keys are:
>>> # general
>>> ClusterClass[ 'Working directory' ]
... # this is the directory where all pyGCluster results
... # (pickle objects, expression maps, node map, ...) are saved into.
/Users/Shared/moClusterDirectory
>>> # original data ca be accessed via
>>> ClusterClass[ 'Data' ]
... # this collections.OrderedDict contains the data that has been
... # or will be clustered (see also below).
... plenty of data ;)
>>> ClusterClass[ 'Conditions' ]
... # sorted list of all conditions that are defined in the "Data"-dictionary
[ 'condition1', 'condition2', 'condition3' ]
>>> ClusterClass[ 'Identifiers' ]
... # sorted tuple of all identifiers, i.e. ClusterClass[ 'Data' ].keys()
( 'Identifier1', 'Identifier2' , ... 'IdentifierN' )
>>> # re-sampling paramerters
>>> ClusterClass[ 'Iterations' ]
... # the number of datasets that were clustered.
1000000
>>> ClusterClass[ 'Cluster 2 clusterID' ]
... # dictionary with clusters as keys, and their respective row index
... # in the "Cluster count"-matrix (= clusterID) as values.
{ ... }
>>> ClusterClass[ 'Cluster counts' ]
... # numpy.uint32 matrix holding the counts for each
... # distance-linkage combination of the clusters.
>>> ClusterClass[ 'Distance-linkage combinations' ]
... # sorted list containing the distance-linkage combinations
... # that were evaluted in the re-sampling routine.
>>> # Communities
>>> ClusterClass[ 'Communities' ]
... # see function pyGCluster.Cluster.build_nodemap for further information.
>>> # Visualization
>>> ClusterClass[ 'Additional labels' ]
... # dictionary with an identifier of the "Data"-dict as key,
... # and a list of additional information (e.g. annotation, GO terms) as value.
{
'Identifier1' :
['Photosynthesis related' , 'zeroFactor: 12.31' ],
'Identifier2' : [ ... ] ,
...
}
>>> ClusterClass[ 'for IO skip clusters bigger than' ]
... # Default = 100. Since some clusters are really large
... # (with sizes close to the root (the cluster holding all objects)),
... # clusters with more objects than this value
... # are not plotted as expression maps or expression profile plots.
pyGCluster offers the possibility to save the analysis (e.g. after re-sampling)
via :py:func:`pyGCluster.Cluster.save` , and continue
via :py:func:`pyGCluster.Cluster.load`
Initializes pyGCluster.Cluster class
Classically, users start the multiprocessing clustering routine with multiple
distance linkage combinations via the :py:func:`pyGCluster.Cluster.do_it_all`
function. This function allows to update the pyGCluster class with all user
parameters before it calls :py:func:`pyGCluster.Cluster.resample`.
The main advantage in calling :py:func:`pyGCluster.Cluster.do_it_all` is
that all general plotting functions are called afterwards as well, these are:
| :py:func:`pyGCluster.Cluster.plot_clusterfreqs`
| :py:func:`pyGCluster.Cluster.build_nodemap`
| :py:func:`pyGCluster.Cluster.write_dot`
| :py:func:`pyGCluster.Cluster.draw_community_expression_maps`
If one choses, one can manually update the parameters (setting the key, value
pairs in pyGCluster) and then evoke :py:func:`pyGCluster.Cluster.resample`
with the appropriate parameters. This useful if certain memory intensive
distance-linkage combinations are to be clustered on a specific computer.
.. note ::
Cluster Class can be initilized empty and filled using :py:func:`pyGCluster.Cluster.load`
'''
def __init__(self, data = None, working_directory = None, verbosity_level = 1):
self.delete_resampling_results() # initializes important variables
if working_directory is None:
working_directory = os.getcwd()
self[ 'Working directory' ] = working_directory
self[ 'for IO skip clusters bigger than' ] = 100
self[ 'Version' ] = (0, 7, 1)
self[ 'Verbosity level' ] = verbosity_level
self[ 'Additional labels' ] = {} # will be used as dict in draw functions, i.e. ids
self[ 'Data' ] = None
self[ 'Heat map'] = {
'Params': { 'title' : 'pyGCluster expression map',
'font family' : 'Helvetica',
'font size' : 14 ,
'rBox width' : 40,
'rBox height' : 20,
'left border' : 10,
'top border' : 70, # will be adjusted depending on the labels :)
'text spacing' : 2,
'text width' : 2000,
'separator width': 7,
'min' : None,
'max' : None,
'legend filename': 'legend.svg',
'heat map filename' : 'expression_map.svg',
'default color' : [255, 255, 255],
'color gradient' : 'default',
},
'Color Gradients' : {
'default' : [(-1, (255,40,255)), (-0.40,(255,40,40)), (-0.05,(40,40,40)), (0,(0,0,0)), (+0.05,(40,40,40)), (+0.40,(40,255,40)), (+1,(255,255,40)) ],
'Daniel' : [(-1, (255,0,0)), (-0.01, (0,0,255)), (0, (0,0,0)), (0.01, (255,255,0)), (0.5, (0,255,0)), (1, (0,255,255))],
'barplot' : [(-1, ( 0,0,0)), (0, (0,0,0)), (0.0000001, (255,255,0)), (0.2, (255,0,0)), (1, (120,120,120))],
'1337' : [(-1, (255,0,0)), (-0.5,(255,0,255)), (-0.02,(77,77,77)), (0,(0,0,0)) ,(+0.02,(77,77,77)), (+0.5,(255,255,0)), (+1,(0,255,0)) ],
'BrBG' : [(-1, (166, 97, 26)), (-0.5, (223, 194, 125)), (0, (245, 245, 245)), (+0.5, (128, 205, 193)), (+1, (1, 133, 113)) ],
'PiYG' : [(-1, (208, 28, 139)), (-0.5, (241, 182, 218)), (0, (247, 247, 247)), (+0.5, (184, 225, 134)), (+1, (77, 172, 38)) ],
'PRGn' : [(-1, (123, 50, 148)), (-0.5, (194, 165, 207)), (0, (247, 247, 247)), (+0.5, (166, 219, 160)), (+1, (0, 136, 55)) ],
'PuOr' : [(-1, (230, 97, 1)), (-0.5, (253, 184, 99)), (0, (247, 247, 247)), (+0.5, (178, 171, 210)), (+1, (94, 60, 153)) ],
'RdBu' : [(-1, (202, 0, 32)), (-0.5, (244, 165, 130)), (0, (247, 247, 247)), (+0.5, (146, 197, 222)), (+1, (5, 113, 176)), ],
'RdGy' : [(-1, (202, 0, 32)), (-0.5, (244, 165, 130)), (0, (255, 255, 255)), (+0.5, (186, 186, 186)), (+1, (64, 64, 64)), ],
'RdYlBu' : [(-1, (215, 25, 28)), (-0.5, (253, 174, 97)), (0, (255, 255, 191)), (+0.5, (171, 217, 233)), (+1, (44, 123, 182)), ],
'RdYlGn' : [(-1, (215, 25, 28)), (-0.5, (253, 174, 97)), (0, (255, 255, 191)), (+0.5, (166, 217, 106)), (+1, (26, 150, 65)), ],
'Spectral' : [(-1, (215, 25, 28)), (-0.5, (253, 174, 97)), (0, (255, 255, 191)), (+0.5, (171, 221, 164)), (+1, (43, 131, 186)), ],
'Spectral_up' : [(-1, (215, 25, 28)), (-0.75, (215, 25, 28)), (-0.5, (215, 25, 28)), (-0.25, (215, 25, 28)), (-0.01, (215, 25, 28)), (0, (215, 25, 28)), (+0.01, (215, 25, 28)), (+0.25, (253, 174, 97)), (+0.5, (255, 255, 191)), (+0.75, (171, 221, 164)), (+1, (43, 131, 186)) ],
},
'SVG box styles' : {
'modern' : '''
<g id="rowPos{0}_conPos{1}">
<title>{ratio}±{std} - [{x0}.{y0} w:{width} h:{height}</title>
<rect x="{x0}" y="{y0}" width="{width}" height="{height}" style="fill:rgb({r},{g},{b});fill-opacity:0.2;stroke:white;stroke-width:1;" title="{ratio}±{std}" />
<path d = "M {x0} {y0} L {x3} {y0} L {x2} {y1} L {x1} {y1} L {x1} {y2} L {x0} {y3} L {x0} {y0}" style="fill:rgb({r},{g},{b});stroke:black;stroke-width:1;stroke-opacity:0.0;"/>
<path d = "M {x2} {y2} L {x1} {y2} L {x2} {y1} L {x2} {y2}" style="fill:rgb({r},{g},{b});stroke:black;stroke-width:1;stroke-opacity:0.0;"/>
<path d = "M {x1} {y1} L {x1} {y2} L {x2} {y1} L {x1} {y1}" style="fill:rgb({r},{g},{b}); fill-opacity:0.7; stroke:red;stroke-width:1;stroke-opacity:0.0;"/>
</g>''',
'fusion' : '''
<g id="rowPos{0}_conPos{1}">
<title>{ratio}±{std} - [{x0}.{y0} w:{width} h:{height}</title>
<rect x="{x0}" y="{y0}" width="{width}" height="{height}" style="fill:rgb({r},{g},{b});fill-opacity:0.7;stroke:white;stroke-width:1;" title="{ratio}±{std}" />
<path d = "M {x0} {y0} L {x3} {y0} L {x2} {y1} L {x1} {y1} L {x1} {y2} L {x0} {y3} L {x0} {y0}" style="fill:rgb({r},{g},{b});stroke:black;stroke-width:1;stroke-opacity:0.0;"/>
<path d = "M {x2} {y2} L {x1} {y2} L {x2} {y1} L {x2} {y2}" style="fill:rgb({r},{g},{b});stroke:black;stroke-width:1;stroke-opacity:0.0;"/>
<path d = "M {x1} {y1} L {x1} {y2} L {x2} {y1} L {x1} {y1}" style="fill:rgb({r},{g},{b}); fill-opacity:0.7; stroke:red;stroke-width:1;stroke-opacity:0.0;"/>
<rect x="{x1}" y="{y1}" width="{widthNew}" height="{heightNew}" style="fill:None;stroke:black;stroke-width:1;" title="{ratio}±{std}" />
</g>''',
'classic' : '''
<g id="rowPos{0}_conPos{1}">
<title>{ratio}±{std} - [{x0}.{y0} w:{width} h:{height}</title>
<rect x="{x0}" y="{y0}" width="{width}" height="{height}" style="fill:rgb({r},{g},{b});stroke:white;stroke-width:1;" title="{ratio}±{std}" />
<rect x="{x1}" y="{y1}" width="{widthNew}" height="{heightNew}" style="fill:None;stroke:black;stroke-width:1;" title="{ratio}±{std}" />
</g>''',
}
}
# check if data is valid, i.e. contains a value for each condition
data_as_ordered_dict = OrderedDict()
if data != None:
conditions = set()
# determine number of different conditions:
for identifier in data.keys():
for condition in data[ identifier ].keys():
conditions.add(condition)
for identifier in list(data.keys()):
# discard entry if any condition is missing:
missing_conditions = conditions - set( data[ identifier ].keys() )
if len(missing_conditions) > 0:
del data[identifier]
for identifier in sorted( data ):
data_as_ordered_dict[ identifier ] = OrderedDict()
for condition in sorted( data[ identifier ] ):
data_as_ordered_dict[ identifier ][ condition ] = data[ identifier ][ condition ]
self[ 'Conditions' ] = sorted( conditions )
self[ 'Data' ] = data_as_ordered_dict
self[ 'Identifiers' ] = tuple( sorted( data ) )
self[ 'Root size' ] = len( data )
self[ 'Root' ] = tuple( range( self[ 'Root size' ] ) )
if not self.check_if_data_is_log2_transformed():
self._print( '[ WARNING ] there are NO ratios < 0! Is the data log2 transformed?', file=sys.stderr, verbosity_level = 1 )
s = 'pyGCluster initialized with {0} objects among {1} different conditions.'
self._print( s.format( len( data.keys() ), len( conditions ) ), verbosity_level = 1 )
return
def draw_expression_map( self, identifiers = None, data = None, conditions = None, additional_labels = None, min_value_4_expression_map = None, max_value_4_expression_map = None, expression_map_filename = None, legend_filename = None, color_gradient = None , box_style = 'classic' ):
'''
Draws expression map as SVG
:param min_value_4_expression_map: lower bound for color coding of values in the expression map. Remember that log2-values are expected, i.e. this value should be < 0!
:type min_value_4_expression_map: float
:param max_value_4_expression_map: upper bound for color coding of values in the expression map.
:type max_value_4_expression_map: float
:param color_gradient: name of the color gradient used for plotting the expression map. Currently supported are default, Daniel, barplot, 1337, BrBG, PiYG, PRGn, PuOr, RdBu, RdGy, RdYlBu, RdYlGn and Spectral
:type color_gradient: string
:param expression_map_filename: file name for expression map. .svg will be added if required.
:type expression_map_filename: string
:param legend_filename: file name for legend .svg will be added if required.
:type legend_filename: string
:param box_style: the way the relative standard deviation is visualized in the expression map. Currently supported are 'modern', 'fusion' or 'classic'.
:type box_style: string
:param additional_labels: dictionary, where additional labels can be defined which will be added in the expression map plots to the gene/protein names
:type additional_labels: dict
:rtype: none
Data has to be a nested dict in the following format:
>>> data = {
... fastaID1 : {
... cond1 : ( mean, sd ) , cond2 : ( mean, sd ), ...
... }
... fastaID2 : {
... cond1 : ( mean, sd ) , cond2 : ( mean, sd ), ...
... }
... }
optional and, if needed, data will be extracted from
| self[ 'Data' ]
| self[ 'Identifiers' ]
| self[ 'Conditions' ]
'''
if additional_labels is None:
additional_labels = {}
if conditions is None:
conditions = set()
for identifier in data.keys():
conditions |= set( data[ identifier ].keys() )
conditions = sorted( list( conditions ) )
if identifiers is None:
if type(data) == type(OrderedDict()):
identifiers = list( data.keys() )
else:
identifiers = sorted(list( data.keys() ))
#
# Updating self[ 'Additional labels' ]
#
if additional_labels != None:
for identifier in additional_labels.keys():
if identifier not in self[ 'Additional labels' ].keys():
self[ 'Additional labels' ][ identifier ] = []
# self[ 'Additional labels' ][ identifier ] += additional_labels[ identifier ]
#
# Updating min/max if required
#
if max_value_4_expression_map != None:
self[ 'Heat map'][ 'Params' ][ 'max' ] = max_value_4_expression_map
if min_value_4_expression_map != None:
self[ 'Heat map'][ 'Params' ][ 'min' ] = min_value_4_expression_map
#
# determine range id needed
#
if self[ 'Heat map'][ 'Params' ][ 'min' ] is None or self[ 'Heat map'][ 'Params' ][ 'max' ] is None:
allValues = []
for identifier in data.keys():
for condition in data[ identifier ].keys():
allValues.append( data[ identifier ][ condition][0] )
if self[ 'Heat map' ][ 'Params' ][ 'min' ] is None:
self[ 'Heat map' ][ 'Params' ][ 'min' ] = math.floor( min( allValues ) )
if self[ 'Heat map' ][ 'Params' ][ 'max' ] is None:
self[ 'Heat map' ][ 'Params' ][ 'max' ] = math.ceil( max( allValues ) )
#
# setting default color gradient if match is found
#
if color_gradient != None:
if color_gradient not in self[ 'Heat map' ][ 'Color Gradients' ].keys():
print('Do not know color gradient {0}, falling back to default'.format( color_gradient ), file = sys.stderr)
color_gradient = 'default'
self[ 'Heat map' ][ 'Params' ][ 'color gradient' ] = color_gradient
#
#
#
if expression_map_filename != None:
self[ 'Heat map'][ 'Params' ][ 'heat map filename' ] = expression_map_filename
if legend_filename != None:
self[ 'Heat map'][ 'Params' ][ 'legend filename' ] = legend_filename
self[ 'Heat map'][ 'Params' ][ 'expression profile filename' ] = self[ 'Heat map'][ 'Params' ][ 'heat map filename' ]+'_expP.svg'
for filename in ['heat map filename', 'legend filename', 'expression profile filename']:
if '.svg' not in self[ 'Heat map'][ 'Params' ][ filename ]:
self[ 'Heat map'][ 'Params' ][ filename ] += '.svg'
#
# recalculate topBorder
#
for pos, line in enumerate( conditions ):
lineHeight = len( line ) * self[ 'Heat map'][ 'Params' ]['font size']
if lineHeight > self[ 'Heat map'][ 'Params' ][ 'top border' ]:
self[ 'Heat map'][ 'Params' ][ 'top border' ] = lineHeight
#
#
#
expProf = {}
assert type(identifiers) == type( [] ) , 'require a list of identifiers!'
# self._draw_expression_map_legend()
svgOut = codecs.open(
os.path.join(
self[ 'Working directory' ],
self[ 'Heat map' ][ 'Params' ]['heat map filename']
),
'w',
'utf-8'
)
svgWidth = len( conditions ) * self[ 'Heat map'][ 'Params' ][ 'rBox width' ] + self[ 'Heat map'][ 'Params' ]['left border'] + self[ 'Heat map'][ 'Params' ]['text width']
svgHeight = len( identifiers ) * self[ 'Heat map'][ 'Params' ][ 'rBox height' ] + self[ 'Heat map'][ 'Params' ]['top border']
number_of_separators = 0
print("""<svg
xmlns="http://www.w3.org/2000/svg"
version="1.1"
preserveAspectRatio="xMinYMin meet"
width="{0}"
height="{1}"
font-size="{font size}px"
font-family="{font family}"
fill="black"
text-anchor="beginning"
baseline-alignment="middle"
>
<title>{title}</title>
""".format(
svgWidth,
svgHeight,
**self[ 'Heat map'][ 'Params' ]
),
file = svgOut
)
#
# write top legend
#
for condPos, condition in enumerate( conditions ):
x = int(self[ 'Heat map'][ 'Params' ][ 'left border' ] + (condPos) * self[ 'Heat map'][ 'Params' ]['rBox width'] + self[ 'Heat map'][ 'Params' ]['rBox width'] / 2.0 )
y = int(self[ 'Heat map'][ 'Params' ][ 'top border' ] - self[ 'Heat map'][ 'Params' ]['text spacing'] )
print( str(
' <text x="{0}" y="{1}" text-anchor="left" transform="rotate(-90, {0}, {1})">{2}</text>'.format(
x,
y,
condition
),
errors = 'replace'
),
file = svgOut
)
for rowPos, identifier in enumerate( identifiers ):
adjustedRowPos = rowPos - number_of_separators
if identifier == '_placeholder_':
shapeDict = self._HM_calcShapeAndColor(
x = 0,
y = adjustedRowPos,
ratio = 0,
std = 0,
number_of_separators = number_of_separators,
)
shapeDict['x1_separator'] = shapeDict['x0']
shapeDict['x2_separator'] = shapeDict['x0'] + ( self[ 'Heat map'][ 'Params' ]['rBox width'] * len( conditions ))
print( str('''
<line x1="{x1_separator}" y1="{y0}" x2="{x2_separator}" y2="{y0}" style="stroke:rgb{0};stroke-width:{1}"/>
'''.format(
self[ 'Heat map'][ 'Params' ]['default color'],
self[ 'Heat map'][ 'Params' ]['separator width'],
**shapeDict
),
errors = 'replace'
),
file = svgOut
)
number_of_separators += 1
else:
expProf[ identifier ] = [ [] ]
for conPos, condition in enumerate( conditions ):
try:
ratio, std = data[ identifier ][ condition ]
insertion_point = int( len( expProf[ identifier ][ -1 ] ) / 2 )
# first entry in profile
expProf[ identifier ][ -1 ].insert( insertion_point, ratio - std )
expProf[ identifier ][ -1 ].insert( insertion_point, ratio + std )
except:
ratio, std = None, None
expProf[ identifier ].append( [] )
shapeDict = self._HM_calcShapeAndColor(
x = conPos,
y = adjustedRowPos,
ratio = ratio,
std = std,
number_of_separators = number_of_separators,
)
print( str( self['Heat map']['SVG box styles'][ box_style ].format(
rowPos,
conPos,
**shapeDict
),
errors = 'replace'
),
file = svgOut
)
#
shapeDict['x_text'] = (conPos + 1 ) * self[ 'Heat map'][ 'Params' ]['rBox width'] + self[ 'Heat map'][ 'Params' ]['left border'] + self[ 'Heat map'][ 'Params' ]['text spacing']
shapeDict['y_text'] = (adjustedRowPos + 0.77) * self[ 'Heat map'][ 'Params' ]['rBox height'] + self[ 'Heat map'][ 'Params' ]['top border'] + (self[ 'Heat map'][ 'Params' ]['separator width'] * number_of_separators)
shapeDict['text'] = ''
shapeDict['text'] += '{0}'.format( identifier )
if identifier in additional_labels.keys():
shapeDict['text'] += ' '.join(additional_labels[ identifier ])
if identifier in self[ 'Additional labels' ].keys():
shapeDict['text'] += ' '.join( self[ 'Additional labels' ][ identifier ])
print( str('''
<g id="Text rowPos{0}_conPos{1}">
<title>{ratio}±{std}</title>
<text xml:space='preserve' x="{x_text}" y="{y_text}">{text}</text>
</g>'''.format(
rowPos,
conPos,
**shapeDict
),
errors = 'replace'
),
file = svgOut
)
# eof
print("</svg>", file = svgOut )
svgOut.close()
#
# Drawing legend
#
svgLegendOut = codecs.open(
os.path.join(
self[ 'Working directory' ],
self[ 'Heat map' ][ 'Params' ]['legend filename']
),
'w',
'utf-8'
)
svgWidth = len( conditions ) * self[ 'Heat map'][ 'Params' ][ 'rBox width' ] + self[ 'Heat map'][ 'Params' ]['left border'] + self[ 'Heat map'][ 'Params' ]['text width']
svgHeight = 11 * self[ 'Heat map'][ 'Params' ][ 'rBox height' ] + self[ 'Heat map'][ 'Params' ]['top border']
number_of_separators = 0
print("""<svg
xmlns="http://www.w3.org/2000/svg"
version="1.1"
preserveAspectRatio="xMinYMin meet"
width="{0}"
height="{1}"
font-size="{font size}px"
font-family="{font family}"
fill="black"
text-anchor="beginning"
baseline-alignment="middle"
>
<title>Legend</title>
<text x="{2}" y="{3}" text-anchor="left" transform="rotate(-90, {2}, {3})">ratio</text>
<text x="{4}" y="{3}" text-anchor="left" transform="rotate(-90, {4}, {3})">rel. std</text>
""".format(
svgWidth,
svgHeight,
int(self[ 'Heat map'][ 'Params' ][ 'left border' ] + 2 * self[ 'Heat map'][ 'Params' ]['rBox width'] + self[ 'Heat map'][ 'Params' ]['rBox width'] / 2.0 ),
int(self[ 'Heat map'][ 'Params' ][ 'top border' ] - self[ 'Heat map'][ 'Params' ]['text spacing'] ) - 10,
int(self[ 'Heat map'][ 'Params' ][ 'left border' ] + 3 * self[ 'Heat map'][ 'Params' ]['rBox width'] + self[ 'Heat map'][ 'Params' ]['rBox width'] / 2.0 ),
**self[ 'Heat map'][ 'Params' ]
),
file = svgLegendOut
)
positive_step_size = self[ 'Heat map' ]['Params'][ 'max' ] / 5.0
negative_step_size = self[ 'Heat map' ]['Params'][ 'min' ] / 5.0
number_of_separators = 0
for y in range(0,11):
_ = 5 - y
if _ >= 0:
ratio = positive_step_size * _
else:
ratio = negative_step_size * -1 * _
shapeDict = self._HM_calcShapeAndColor(
x = 2,
y = y,
ratio = ratio,
std = 0.0
)
print( str( self['Heat map']['SVG box styles'][ box_style ].format(
y,
2,
**shapeDict
),
errors = 'replace'
),
file = svgLegendOut
)
std = y * 0.1
shapeDict = self._HM_calcShapeAndColor(
x = 3,
y = y,
ratio = 1.0,
std = std
)
shapeDict['r'] = 147
shapeDict['g'] = 147
shapeDict['b'] = 147
print( str(self['Heat map']['SVG box styles'][ box_style ].format(
y,
3,
**shapeDict
),
errors = 'replace'
),
file = svgLegendOut
)
shapeDict['x_text_left'] = self[ 'Heat map'][ 'Params' ]['rBox width'] + self[ 'Heat map'][ 'Params' ]['left border'] + self[ 'Heat map'][ 'Params' ]['text spacing']
shapeDict['x_text_right'] = 4 * self[ 'Heat map'][ 'Params' ]['rBox width'] + self[ 'Heat map'][ 'Params' ]['left border'] + self[ 'Heat map'][ 'Params' ]['text spacing']
shapeDict['y_text_left'] = (y + 0.77) * self[ 'Heat map'][ 'Params' ]['rBox height'] + self[ 'Heat map'][ 'Params' ]['top border'] + (self[ 'Heat map'][ 'Params' ]['separator width'] * number_of_separators)
shapeDict['text_left'] = '{0:3.2f}'.format( ratio )
shapeDict['text_right'] = '{0:2.1f}'.format( std )
print( str('''
<g id="Legend {0}">
<title>{ratio}±{std}</title>
<text xml:space='preserve' x="{x_text_left}" y="{y_text_left}">{text_left}</text>
<text xml:space='preserve' x="{x_text_right}" y="{y_text_left}">{text_right}</text>
</g>'''.format(
y,
**shapeDict
),
errors = 'replace'
),
file = svgLegendOut
)
print("</svg>", file = svgLegendOut )
svgLegendOut.close()
return
def _HM_calcShapeAndColor(self, x = None, y = None, ratio = None, std = None, number_of_separators = 0):
'''
Internal function to determine shape and color of expression map entries
'''
shapeDict = {}
shapeDict['ratio'] = ratio
shapeDict['std'] = std
shapeDict['r'], shapeDict['g'], shapeDict['b'] = self._HM_visualizeColor( ratio )
shapeDict['x0'] = int(self[ 'Heat map'][ 'Params' ]['left border'] + self[ 'Heat map'][ 'Params' ]['rBox width'] * x)
shapeDict['y0'] = int(self[ 'Heat map'][ 'Params' ]['top border'] + self[ 'Heat map'][ 'Params' ]['rBox height'] * y)
shapeDict['width'] = self[ 'Heat map'][ 'Params' ]['rBox width']
shapeDict['height'] = self[ 'Heat map'][ 'Params' ]['rBox height']
if std != None or (std is None and ratio is None): # or std != 0.0:
if std is None:
# ratio and sd for this entry are None, this will lead to white box
stdAsPercentOfRatio = 0
else:
if ratio == 0.0:
ratio += 0.01
stdAsPercentOfRatio = abs( std / float( ratio ) )
if stdAsPercentOfRatio > 1:
stdAsPercentOfRatio = 1
shapeDict['widthNew'] = int(round( (1 - stdAsPercentOfRatio) * self[ 'Heat map'][ 'Params' ]['rBox width'] ))
shapeDict['heightNew'] = int(round( (1 - stdAsPercentOfRatio) * self[ 'Heat map'][ 'Params' ]['rBox height'] ))
shapeDict['x1'] = int(shapeDict['x0'] + 0.5 * (self[ 'Heat map'][ 'Params' ]['rBox width'] - shapeDict['widthNew']))
shapeDict['y1'] = int(shapeDict['y0'] + 0.5 * (self[ 'Heat map'][ 'Params' ]['rBox height'] - shapeDict['heightNew']))
shapeDict['y0'] += self[ 'Heat map'][ 'Params' ]['separator width'] * number_of_separators
shapeDict['y1'] += self[ 'Heat map'][ 'Params' ]['separator width'] * number_of_separators
shapeDict['height_half'] = shapeDict['height'] / 2.0
shapeDict['y3'] = shapeDict['y0'] + shapeDict['height']
shapeDict['x3'] = shapeDict['x0'] + shapeDict['width']
shapeDict['y2'] = shapeDict['y1'] + shapeDict['heightNew']
shapeDict['x2'] = shapeDict['x1'] + shapeDict['widthNew']
return shapeDict
def _HM_visualizeColor( self, ratio ):
'''
determine color for expression map values
'''
##
color = self[ 'Heat map'][ 'Params' ][ 'default color' ][:]
colorGradient = self[ 'Heat map' ][ 'Color Gradients' ][ self[ 'Heat map' ]['Params']['color gradient'] ]
if ratio != None:
if ratio >= 0:
scaling = self[ 'Heat map' ]['Params'][ 'max' ] / float( colorGradient[-1][0] )
else:
scaling = self[ 'Heat map' ]['Params'][ 'min' ] / float( colorGradient[0][0] )
scaled_ratio = ratio / scaling
idx = bisect.bisect( colorGradient, ( scaled_ratio, ) )
if idx == 0:
color = colorGradient[0][1]
elif idx == len( colorGradient):
color = colorGradient[-1][1]
else:
# linear interpolation ... between idx-1 & idx
dX = ( scaled_ratio - colorGradient[ idx - 1 ][ 0 ] ) / ( colorGradient[ idx ][ 0 ] - colorGradient[ idx - 1 ][ 0 ] )
for color_chanel in range(3):
d_ = dX * ( colorGradient[ idx ][ 1 ][ color_chanel ] - colorGradient[ idx - 1 ][ 1 ][ color_chanel ])
if abs( d_ ) <= sys.float_info.epsilon :
color[ color_chanel ] = int(round( colorGradient[idx - 1][ 1 ][ color_chanel ]))
else:
color[ color_chanel ] = int(round( colorGradient[idx - 1][ 1 ][ color_chanel ] + d_))
return color
def draw_expression_map_for_cluster(self, clusterID = None, cluster = None, filename = None, min_value_4_expression_map = None, max_value_4_expression_map = None, color_gradient = 'default', box_style = 'classic' ):
'''
Plots an expression map for a given cluster.
Either the parameter "clusterID" or "cluster" can be defined.
This function is useful to plot a user-defined cluster, e.g. knowledge-based cluster (TCA-cluster, Glycolysis-cluster ...). In this case, the parameter "cluster" should be defined.
:param clusterID: ID of a cluster (those are obtained e.g. from the plot of cluster frequencies or the node map)
:type clusterID: int
:param cluster: tuple containing the indices of the objects describing a cluster.
:type cluster: tuple
:param filename: name of the SVG file for the expression map.
:type filename: string
The following parameters are passed to :py:func:`pyGCluster.Cluster.draw_expression_map`:
:param min_value_4_expression_map: lower bound for color coding of values in the expression map. Remember that log2-values are expected, i.e. this value should be < 0!
:type min_value_4_expression_map: float
:param max_value_4_expression_map: upper bound for color coding of values in the expression map.
:type max_value_4_expression_map: float
:param color_gradient: name of the color gradient used for plotting the expression map. Currently supported are default, Daniel, barplot, 1337, BrBG, PiYG, PRGn, PuOr, RdBu, RdGy, RdYlBu, RdYlGn and Spectral
:type color_gradient: string
:param box_style: name of box style used in SVG. Currently supported are classic, modern, fusion.
:type box_style: string
:rtype: none
'''
# check if function call was valid:
if clusterID is None and cluster is None:
self._print( '[ ERROR ] call function "draw_expression_map_for_cluster" with either a clusterID or a cluster.', verbosity_level = 0 )
return
elif clusterID != None and cluster != None:
self._print( '[ ERROR ] call function "draw_expression_map_for_cluster" with either a clusterID or a cluster.', verbosity_level = 0 )
return
# if clusterID is given, get the corresponding cluster:
elif clusterID != None:
for c, cID in self[ 'Cluster 2 clusterID' ].items():
if cID == clusterID:
break
cluster = c
# determine hm_filename:
if filename is None:
filename = '{0}.svg'.format( self[ 'Cluster 2 clusterID' ][ cluster ] )
hm_filename = os.path.join( self[ 'Working directory' ], filename )
# prepare for drawing of expression map ...
identifiers = []
data = {}
additional_labels = {}
try:
cFreq, cFreqDict = self.frequencies( cluster = cluster )
except KeyError:
cFreq = 0.0
for index in cluster:
identifier = self[ 'Identifiers' ][ index ]
identifiers.append( identifier )
data[ identifier ] = {}
for condition in self[ 'Conditions' ]:
data[ identifier ][ condition ] = self[ 'Data' ][ identifier ][ condition ]
additional_labels[ identifier ] = [ '{0:3.4f}'.format( cFreq ) ]
self.draw_expression_map(
identifiers = identifiers,
data = data,
conditions = self[ 'Conditions' ],
additional_labels = additional_labels,
min_value_4_expression_map = min_value_4_expression_map,
max_value_4_expression_map = max_value_4_expression_map,
expression_map_filename = hm_filename,
legend_filename = None,
color_gradient = color_gradient,
box_style = box_style
)
self._print( '... expression map saved as "{0}".'.format( hm_filename ), verbosity_level = 1 )
return
def draw_expression_map_for_community_cluster(self, name, min_value_4_expression_map = None, max_value_4_expression_map = None, color_gradient = '1337', sub_folder = None, min_obcofreq_2_plot = None, box_style = 'classic'):
'''
Plots the expression map for a given "community cluster":
Any cluster in the community node map is internally represented as a tuple with two elements:
"cluster" and "level". Those objects are stored as keys in self[ 'Communities' ],
from where they may be extracted and fed into this function.
:param name: "community cluster" -> best obtain from self[ 'Communities' ].keys()
:type name: tuple
:param min_obcofreq_2_plot: minimum obCoFreq of an cluster's object to be shown in the expression map.
:type min_obcofreq_2_plot: float
The following parameters are passed to :py:func:`pyGCluster.Cluster.draw_expression_map`:
:param min_value_4_expression_map: lower bound for color coding of values in the expression map. Remember that log2-values are expected, i.e. this value should be < 0!
:type min_value_4_expression_map: float
:param max_value_4_expression_map: upper bound for color coding of values in the expression map.
:type max_value_4_expression_map: float
:param color_gradient: name of the color gradient used for plotting the expression map. Currently supported are default, Daniel, barplot, 1337, BrBG, PiYG, PRGn, PuOr, RdBu, RdGy, RdYlBu, RdYlGn and Spectral
:type color_gradient: string
:param box_style: name of box style used in SVG. Currently supported are classic, modern, fusion.
:type box_style: string
:param sub_folder: if specified, the expression map is saved in this folder, rather than in pyGCluster's working directory.
:type sub_folder: string
:rtype: none
'''
identifiers = []
data = {}
additional_labels = {}
for index in self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ]:
identifier = None
if index > 0:
normalized_obCoFreq = self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ][ index ]
if normalized_obCoFreq < min_obcofreq_2_plot:
continue
identifier = self[ 'Identifiers' ][ index ]
identifiers.append( identifier )
data[ identifier ] = {}
for condition in self[ 'Conditions' ]:
data[ identifier ][ condition ] = self[ 'Data' ][ identifier ][ condition ]
additional_labels[ identifier ] = [ '{0:3.4f}'.format( normalized_obCoFreq ) ]
else:
identifiers.append( '_placeholder_' )
hm_filename = '{0}-{1}.svg'.format( self[ 'Communities' ][ name ][ 'cluster ID' ], name[ 1 ] )
if sub_folder != None:
if not os.path.exists( os.path.join( self[ 'Working directory' ], sub_folder ) ):
os.mkdir( os.path.join( self[ 'Working directory' ], sub_folder ) )
hm_filename = os.path.join( sub_folder , hm_filename )
self.draw_expression_map(
identifiers = identifiers,
data = data,
conditions = self[ 'Conditions' ],
additional_labels = additional_labels,
min_value_4_expression_map = min_value_4_expression_map,
max_value_4_expression_map = max_value_4_expression_map,
expression_map_filename = hm_filename,
legend_filename = None,
color_gradient = color_gradient,
box_style = box_style
)
return
def draw_community_expression_maps(self, min_value_4_expression_map = None, max_value_4_expression_map = None, color_gradient = 'default', box_style = 'classic', conditions= None, additional_labels=None):
'''
Plots the expression map for each community showing its object composition.
The following parameters are passed to :py:func:`pyGCluster.Cluster.draw_expression_map`:
:param min_value_4_expression_map: lower bound for color coding of values in the expression map. Remember that log2-values are expected, i.e. this value should be < 0!
:type min_value_4_expression_map: float
:param max_value_4_expression_map: upper bound for color coding of values in the expression map.
:type max_value_4_expression_map: float
:param color_gradient: name of the color gradient used for plotting the expression map. Currently supported are default, Daniel, barplot, 1337, BrBG, PiYG, PRGn, PuOr, RdBu, RdGy, RdYlBu, RdYlGn and Spectral
:type color_gradient: string
:param box_style: name of box style used in SVG. Currently supported are classic, modern, fusion.
:type box_style: string
:param additional_labels: dict with additional labels, k = identified and v = list of additional labels.
:type additional_labels: dict
:rtype: none
'''
if conditions is None:
conditions = self[ 'Conditions' ]
max_level = max( [ name[ 1 ] for name in self[ 'Communities' ] ] )
for cluster in self._get_levelX_clusters( level = max_level ):
name = ( cluster, max_level )
if len( cluster ) > self[ 'for IO skip clusters bigger than' ]:
continue
identifiers = []
data = {}
internal_additional_labels = {}
for index in self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ].keys():
identifier = None
if index > 0:
# try:
identifier = self[ 'Identifiers' ][ index ]
# except:
# print( index , self[ 'Communities' ][ name ])
# print( list( self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ].keys() ) )
# print('Tried to access Identifier # {0} and failed'.format( index ) )
# print('Total length of Identifiers is {0}'.format( len( self[ 'Identifiers' ] )))
# # for index in self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ].keys():
# # print( index )
# # print( len( self[ 'Data' ] ) )
# # exit(1)
# # identifier = 'WTF?'
# continue
identifiers.append( identifier )
data[ identifier ] = {}
for condition in self[ 'Conditions' ]:
data[ identifier ][ condition ] = self[ 'Data' ][ identifier ][ condition ]
internal_additional_labels[ identifier ] = [ '{0:4.2f}'.format( self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ][ index ] ) ]
else:
identifiers.append( '_placeholder_' )
if additional_labels != None:
for k in internal_additional_labels.keys():
if k in additional_labels.keys():
internal_additional_labels[ k ] += additional_labels[ k ]
hm_filename = '{0}-{1}.svg'.format( self[ 'Communities' ][ name ][ 'cluster ID' ], name[ 1 ] )
self.draw_expression_map(
identifiers = identifiers,
data = data,
conditions = conditions,
additional_labels = internal_additional_labels,
min_value_4_expression_map = min_value_4_expression_map,
max_value_4_expression_map = max_value_4_expression_map,
expression_map_filename = hm_filename,
legend_filename = None,
color_gradient = color_gradient,
box_style = box_style
)
self._print( '... community expression maps saved in "{0}"'.format( self[ 'Working directory' ] ), verbosity_level = 1 )
return
def delete_resampling_results(self):
'''
Resets all variables holding any result of the re-sampling process.
This includes the convergence determination as well as the community structure.
Does not delete the data that is intended to be clustered.
:rtype: None
'''
self[ 'Cluster 2 clusterID' ] = {}
self[ 'Cluster counts' ] = None
self[ 'Distances' ] = []
self[ 'Linkages' ] = []
self[ 'Distance-linkage combinations' ] = []
self[ 'Iterations' ] = 0
self[ 'Convergence determination - params' ] = {}
self[ 'Convergence determination - iteration 2 n_mostfreq' ] = {}
self[ 'Convergence determination - first detected at iteration' ] = 0
self[ 'Communities' ] = {}
self[ 'Function parameters' ] = {}
return
def check_if_data_is_log2_transformed(self):
'''
Simple check if any value of the data_tuples (i.e. any mean) is below zero.
Below zero indicates that the input data was log2 transformed.
:rtype: boolean
'''
for identifier in self[ 'Data' ].keys():
for condition, data_tuple in self[ 'Data' ][ identifier ].items():
for value in data_tuple:
if value < 0:
return True
return False
def __add__(self, other):
'''
Adds re-sampling results of a pyGCluster instance into another one.
If the clustered data differs among those two instances, the other instance is NOT added.
If the distance-linkage combinations among those two instances differ, the other instance is NOT added.
:param other: the pyGCluster instance that is to be added to self.
:type other: pyGCluster instance
:rtype: None
'''
import numpy
assert self[ 'Data' ] == other[ 'Data' ], '[ ERROR ] pyGCluster-instances with different clustered data cannot be merged!'
assert sorted( self[ 'Distance-linkage combinations' ] ) == sorted( other[ 'Distance-linkage combinations' ] ), '[ ERROR ] pyGCluster-instances with a different distance-linkage combinations cannot be merged!'
self[ 'Iterations' ] += other[ 'Iterations' ]
if self[ 'Cluster counts' ] is None:
self[ 'Cluster counts' ] = numpy.zeros(
( 10 ** 4, len( self[ 'Distance-linkage combinations' ] ) ),
dtype = numpy.uint32
)
otherDLC2selfDLC = {}
for other_dlc_index, dlc in enumerate( other[ 'Distance-linkage combinations' ] ):
self_dlc_index = self[ 'Distance-linkage combinations' ].index( dlc )
otherDLC2selfDLC[ other_dlc_index ] = self_dlc_index
# merge clusters from other into self
for cluster, other_clusterID in other[ 'Cluster 2 clusterID' ].items():
if cluster not in self[ 'Cluster 2 clusterID' ]:
self[ 'Cluster 2 clusterID' ][ cluster ] = len( self[ 'Cluster 2 clusterID' ] ) # new cluster found, assign index
self_clusterID = self[ 'Cluster 2 clusterID' ][ cluster ]
for other_dlc_index, self_dlc_index in otherDLC2selfDLC.items():
try:
self[ 'Cluster counts' ][ self_clusterID ]
except IndexError:
self[ 'Cluster counts' ] = numpy.concatenate(
(
self[ 'Cluster counts' ],
numpy.zeros( ( 10 ** 4, len( self[ 'Distance-linkage combinations' ] ) ), dtype = numpy.uint32 )
)
) # add rows at bottom
self[ 'Cluster counts' ][ self_clusterID ][ self_dlc_index ] += other[ 'Cluster counts' ][ other_clusterID ][ other_dlc_index ]
return
def resample(self, distances, linkages, function_2_generate_noise_injected_datasets = None, min_cluster_size = 4, alphabet = None, force_plotting = False, min_cluster_freq_2_retain = 0.001, pickle_filename = 'pyGCluster_resampled.pkl', cpus_2_use = None, iter_tol = 0.01 / 100000, iter_step = 5000, iter_max = 250000, iter_top_P = 0.001, iter_window = 50000, iter_till_the_end = False):
'''
Routine for the assessment of cluster reproducibility (re-sampling routine).
To this, a high number of noise-injected datasets are created, which are subsequently clustered by AHC.
Those are created via :py:func:`pyGCluster.function_2_generate_noise_injected_datasets` (default = usage of Gaussian distributions).
Each 'simulated' dataset is then subjected to AHC x times, where x equals the number of distance-linkage combinations that come from all possible combinations of "distances" and "linkages".
In order to speed up the re-sampling routine, it is distributed to multiple threads, if cpus_2_use > 1.
The re-sampling routine stops once either convergence (see below) is detected or iter_max iterations have been performed.
Eventually, only clusters with a maximum frequency of at least min_cluster_freq_2_retain are stored; all others are discarded.
In order to visually inspect convergence, a convergence plot is created.
For more information about the convergence estimation, see Supplementary Material of pyGCluster's publication.
For a complete list of possible
Distance matrix calculations
see: http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
or Linkage methods
see: http://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
.. note ::
If memory is of concern (e.g. for a large dataset, > 5000 objects), cpus_2_use should be kept low.
:param distances: list of distance metrices, given as strings, e.g. [ 'correlation', 'euclidean' ]
:type distances: list
:param linkages: list of distance metrices, given as strings, e.g. [ 'average', 'complete', 'ward' ]
:type linkages: list
:param function_2_generate_noise_injected_datasets: function to generate noise-injected datasets. If None (default), Gaussian distributions are used.
:type function_2_generate_noise_injected_datasets: function
:param min_cluster_size: minimum size of a cluster, so that it is included in the assessment of cluster reproducibilities.
:type min_cluster_size: int
:param alphabet: alphabet used to convert decimal indices to characters to save memory. Defaults to string.printable, without ','.
:type alphabet: string
.. note ::
If alphabet contains ',', this character is removed from alphabet, because the indices comprising a cluster are saved comma-seperated.
:param force_plotting: the convergence plot is created after each iter_step iteration (otherwise only when convergence is detected).
:type force_plotting: boolean
:param min_cluster_freq_2_retain: ]0, 1[ minimum frequency of a cluster (only the maximum of the dlc-frequencies matters here) it has to exhibit to be stored in pyGCluster once all iterations are finished.
:type min_cluster_freq_2_retain: float
:param cpus_2_use: number of threads that are evoked in the re-sampling routine.
:type cpus_2_use: int
:param iter_max: maximum number of re-sampling iterations.
:type iter_max: int
Convergence determination:
:param iter_tol: ]0, 1e-3[ value for the threshold of the median of normalized slopes, in order to declare convergence.
:type iter_tol: float
:param iter_step: number of iterations each multiprocess performs and simultaneously the interval in which to check for convergence.
:type iter_step: int
:param iter_top_P: ]0, 1[ for the convergence estmation, the amount of most frequent clusters is examined. This is the threshold for the minimum frequency of a cluster to be included.
:type iter_top_P: float
:param iter_window: size of the sliding window in iterations. The median is obtained from normalized slopes inside this window - *should be a multiple of iter_step*
:type iter_window: int
:param iter_till_the_end: if set to True, the convergence determination is switched off; hence, re-sampling is performed until iter_max is reached.
:type iter_till_the_end: boolean
:rtype: None
'''
self[ 'Function parameters' ][ self.resample.__name__ ] = { k : v for k, v in list(locals().items()) if k != 'self' }
import numpy
import scipy.cluster.hierarchy as sch
import scipy.spatial.distance as ssd
if function_2_generate_noise_injected_datasets is None:
function_2_generate_noise_injected_datasets = yield_noisejected_dataset
if alphabet is None:
alphabet = string.printable
alphabet = alphabet.replace( ',', '' )
## create distance-linkage combinations (dlc)
self[ 'Distances' ] = distances
self[ 'Linkages' ] = linkages
# check if all distance metrices are valid:
invalid_dists = set( self[ 'Distances' ] ) - set( dir( ssd ) )
if invalid_dists:
s = '[ WARNING ] invalid distance metrices! "{0}" are not in "scipy.spatial.distance".'
self._print( s.format( ', '.join( invalid_dists ) ), verbosity_level = 0 )
# check if all linkage methods are valid:
invalid_linkages = set( self[ 'Linkages' ] ) - set(sch._LINKAGE_METHODS.keys())
if invalid_linkages:
s = '[ WARNING ] invalid linkage methods! "{0}" are not in "scipy.cluster.hierarchy".'
self._print( s.format( ', '.join( invalid_linkages ) ), verbosity_level = 0 )
# get all possible distance-linkage combinations:
self[ 'Distance-linkage combinations' ] = []
for distance in self[ 'Distances' ]:
for linkage in self[ 'Linkages' ]:
if distance != 'euclidean' and linkage in sch._EUCLIDEAN_METHODS:
continue
self[ 'Distance-linkage combinations' ].append( '{0}-{1}'.format( distance, linkage ) )
n_dlc = len( self[ 'Distance-linkage combinations' ] )
self._print( '{0} distance-linkage combinations are evaluated.'.format( n_dlc ), verbosity_level = 2 )
self._print( '... those are: {0}.'.format( ', '.join( self[ 'Distance-linkage combinations' ] ) ), verbosity_level = 2 )
## check if permission to write:
if pickle_filename:
try:
with open( os.path.join( self[ 'Working directory' ], 'tmp.txt' ), 'w' ) as fout:
pass
os.remove( os.path.join( self[ 'Working directory' ], 'tmp.txt' ) )
except IOError:
s = '[ WARNING ] You do not have permission, or folder does not exist!\n\tresults in "{0}" are NOT pickled!'
self._print( s.format( self[ 'Working directory' ] ), verbosity_level = 0 )
pickle_filename = False
## check if a pickle file with the same name is already in the "Working directory"
## this indicates that clustering is likely to be continued:
if pickle_filename in os.listdir( self[ 'Working directory' ] ):
self._print( 'Pickle file with the same name detected! Pickle "{0}" will be loaded and clustering continued ...'.format( pickle_filename ), verbosity_level = 0 )
loaded = Cluster()
loaded.load( os.path.join( self[ 'Working directory' ], pickle_filename ) )
self + loaded
## create tmp_struct to store the cluster counts:
tmpstruct_clustercounts = {}
tmpstruct_clustercounts[ 'Cluster counts' ] = numpy.zeros( ( 10 ** 5, n_dlc ), dtype = numpy.uint32 )
tmpstruct_clustercounts[ 'Cluster 2 clusterID' ] = {}
tmpstruct_clustercounts[ 'Discarded IDs' ] = set()
## initialize variables for the convergence determination
self[ 'Convergence determination - params' ] = {}
self[ 'Convergence determination - params' ][ 'iter_step' ] = iter_step
self[ 'Convergence determination - params' ][ 'iter_top_P' ] = iter_top_P
self[ 'Convergence determination - params' ][ 'iter_tol' ] = iter_tol
self[ 'Convergence determination - params' ][ 'iter_window' ] = iter_window
self[ 'Convergence determination - params' ][ 'iter_max' ] = iter_max
if iter_window % iter_step:
s = '[ WARNING ] iter_window = {0} is NOT a multiple of iter_step = {1}. Better re-call with a multiple of iter_step!'
self._print( s.format( iter_window, iter_step ), verbosity_level = 1 )
## prepare for multiprocesses:
if cpus_2_use != None:
if cpus_2_use > multiprocessing.cpu_count():
s = '[ WARNING ] You requested to perform re-sampling on {0} threads, but only {1} available -> better re-call with "cpus_2_use = {1}"!'
self._print( s.format( cpus_2_use, multiprocessing.cpu_count() ), verbosity_level = 0 )
n_multiprocesses = cpus_2_use
else:
n_multiprocesses = multiprocessing.cpu_count()
DataQ = multiprocessing.Queue()
kwargs4multiprocess = {}
kwargs4multiprocess[ 'DataQ' ] = DataQ
kwargs4multiprocess[ 'data' ] = self[ 'Data' ]
kwargs4multiprocess[ 'iterations' ] = iter_step
kwargs4multiprocess[ 'alphabet' ] = alphabet
kwargs4multiprocess[ 'dlc' ] = self[ 'Distance-linkage combinations' ]
kwargs4multiprocess[ 'min_cluster_size' ] = min_cluster_size
kwargs4multiprocess[ 'min_cluster_freq_2_retain' ] = min_cluster_freq_2_retain
kwargs4multiprocess[ 'function_2_generate_noise_injected_datasets' ] = function_2_generate_noise_injected_datasets
#this does not work on windows... we have to check that
try:
sys.getwindowsversion()
except:
os.nice( 10 )
min_count = int( min_cluster_freq_2_retain * kwargs4multiprocess[ 'iterations' ] * 0.5 )
if min_count < 2:
s = '[ WARNING ] params "min_cluster_freq_2_retain" = {0} and "iter_step" = {1}, hence min_count = {2}\n\t-> huge accumulation of unstable clusters in pyGCluster!'
self._print( s.format( min_cluster_freq_2_retain, kwargs4multiprocess[ 'iterations' ], min_count ), verbosity_level = 1 )
# check if multiprocess are valid:
self._print( 'checking if multiprocesses are functioning ...', end = ' ', verbosity_level = 2 )
try:
tmp_kwargs4multiprocess = { k : v for k, v in kwargs4multiprocess.items() }
tmp_kwargs4multiprocess[ 'iterations' ] = 1
p = multiprocessing.Process( target = resampling_multiprocess, kwargs = tmp_kwargs4multiprocess )
p.start()
del tmp_kwargs4multiprocess
except:
self._print( '[ ERROR ] Failed to launch multi-processes!', file = sys.stderr, verbosity_level = 0 )
seekAndDestry( [ p ] )
raise
try:
DataQ.get()
p.join()
except:
self._print( '[ ERROR ] Failed to collect multi-processes!', file = sys.stderr, verbosity_level = 0 )
seekAndDestry( [ p ] )
raise
self._print( 'success!', verbosity_level = 2 )
## other stuff:
self[ 'Convergence determination - iteration 2 n_mostfreq' ] = {}
iteration = 0
converged = False
ask2continue = False
iter_to_continue = False
### now comes the actual re-sampling routine :)
while not converged:
# prevent exceeding iter_max:
if iter_max < iteration + n_multiprocesses * iter_step and not iter_to_continue:
n_multiprocesses_tmp = int( math.ceil( float( iter_max - iteration ) / iter_step ) )
s = 'Continuing another {0} (# processes) * {1} (iter_step) iterations would exceed iter_max (= {2}). Hence, # processes are lowered to {3} so that {4} iterations have been totally performed.'
self._print( s.format( n_multiprocesses, iter_step, iter_max, n_multiprocesses_tmp, iteration + n_multiprocesses_tmp * iter_step ), verbosity_level = 2 )
n_multiprocesses = n_multiprocesses_tmp
# Launching multi-processes
processes = []
for i in range( n_multiprocesses ):
p = multiprocessing.Process( target = resampling_multiprocess, kwargs = kwargs4multiprocess )
p.start()
processes.append( p )
time.sleep( random.random() ) # to increase randomness!
# Collecting Process outputs and transfer cluster-counts into 'tmpstruct_clustercounts'
for i in range( n_multiprocesses ):
cluster_counts_list = DataQ.get()
iteration += kwargs4multiprocess[ 'iterations' ]
self._print( "Clustering. Resampling data : iteration {0: >7}/{1}".format( iteration, iter_max ), end = '\r', file = sys.stderr, verbosity_level = 1 )
for cluster, counts in cluster_counts_list:
# get cluster ID:
if cluster in tmpstruct_clustercounts[ 'Cluster 2 clusterID' ]:
clusterID = tmpstruct_clustercounts[ 'Cluster 2 clusterID' ][ cluster ]
else:
# if available, get a discarded ID and assign this ID to the cluster:
if tmpstruct_clustercounts[ 'Discarded IDs' ]:
try:
clusterID = tmpstruct_clustercounts[ 'Discarded IDs' ].pop()
except KeyError: # KeyError: 'pop from an empty set' = set is empty
clusterID = len( tmpstruct_clustercounts[ 'Cluster 2 clusterID' ] )
else:
clusterID = len( tmpstruct_clustercounts[ 'Cluster 2 clusterID' ] )
tmpstruct_clustercounts[ 'Cluster 2 clusterID' ][ cluster ] = clusterID
# update counts:
try:
tmpstruct_clustercounts[ 'Cluster counts' ][ clusterID ] += counts
except IndexError:
tmpstruct_clustercounts[ 'Cluster counts' ] = numpy.concatenate(
( tmpstruct_clustercounts[ 'Cluster counts' ],
numpy.zeros( ( 10 ** 5, n_dlc ), dtype = numpy.uint32 )
)
)
tmpstruct_clustercounts[ 'Cluster counts' ][ clusterID ] += counts
# determine most frequent clusters:
min_count = iteration * iter_top_P
mostfreqIDs = numpy.unique( numpy.nonzero( tmpstruct_clustercounts[ 'Cluster counts' ] >= min_count )[ 0 ] )
self[ 'Convergence determination - iteration 2 n_mostfreq' ][ iteration ] = len( mostfreqIDs )
del mostfreqIDs
# check if converged:
if iter_till_the_end == False:
converged = self.check4convergence()
if converged or force_plotting:
self.convergence_plot()
del cluster_counts_list
# terminate processes:
for p in processes:
p.join()
# once all processes finished iter_step clusterings, perform a purging step:
# discard all clusters with a maximum count of the threshold:
min_required_count = int( min_cluster_freq_2_retain * 0.5 * ( kwargs4multiprocess[ 'iterations' ] * n_multiprocesses ) )
self._print('\nDiscarding {0}-count-clusters ...'.format( min_required_count ), end = ' ', file = sys.stderr, verbosity_level = 2)
max_counts = numpy.amax( tmpstruct_clustercounts[ 'Cluster counts' ], axis = 1 ) # get max count for each cluster
IDs2discard = set( numpy.nonzero( max_counts == 1 )[ 0 ] )
del max_counts
# reset counts:
for ID in IDs2discard:
tmpstruct_clustercounts[ 'Cluster counts' ][ ID ][ : ] = 0
# delete those clusters which were attributed the discarded clusterIDs
clusters2discard = [ c for c, cID in tmpstruct_clustercounts[ 'Cluster 2 clusterID' ].items() if cID in IDs2discard ]
for cluster in clusters2discard:
del tmpstruct_clustercounts[ 'Cluster 2 clusterID' ][ cluster ]
del cluster
del clusters2discard
self._print( '{0} discarded.'.format( len( IDs2discard ) ), file = sys.stderr, verbosity_level = 2 )
tmpstruct_clustercounts[ 'Discarded IDs' ] = IDs2discard
del IDs2discard
if converged and iteration < iter_max and not iter_till_the_end:
ask2continue = True
elif iteration >= iter_max:
self._print( '\niter_max reached. See convergence plot. Stopping re-sampling if not defined otherwise ...', verbosity_level = 1 )
converged = True
self.convergence_plot()
ask2continue = True
# ask if user wants to continue with the re-sampling process:
if ask2continue and self[ 'Verbosity level' ] > 0:
self._print( '\nEnter how many iterations you would like to continue. (Has to be a multiple of iterstep = {0})'.format( iter_step ), verbosity_level = 1 )
self._print( '(enter "0" to stop resampling.)', verbosity_level = 1 )
self._print( '(enter "-1" to resample until iter_max (= {0}) is reached.)'.format( iter_max ), verbosity_level = 1 )
while True:
answer = eval(input( 'Enter a number ...' ))
try:
iter_to_continue = int( answer )
break
except:
self._print( 'INT conversion failed. Please try again!', verbosity_level = 1 )
converged = False
if iter_to_continue == 0:
converged = True
elif iter_to_continue == -1:
iter_till_the_end = True
ask2continue = False
if iteration == iter_max:
converged = True
else:
iter_to_continue = int( iter_step * round(iter_to_continue / float(iter_step)) )
if iter_to_continue < iter_step:
iter_to_continue = iter_step
iter_to_continue = int( math.ceil( float( iter_to_continue ) / n_multiprocesses ) )
self._print( 'Resampling will continue another {0} iterations.'.format( iter_to_continue * n_multiprocesses ), verbosity_level = 1 )
kwargs4multiprocess[ 'iterations' ] = iter_to_continue
# final filtering: store only clusters in pyGCluster whose max_frequencies are above min_cluster_freq_2_retain (default 0.001):
min_count = iteration * min_cluster_freq_2_retain
clusterIDs2retain = set( numpy.nonzero( tmpstruct_clustercounts[ 'Cluster counts' ] >= min_count )[0] )
self._print( '{0} clusters above threshold of {1}. '.format( len( clusterIDs2retain ), min_cluster_freq_2_retain ), verbosity_level = 2 )
self[ 'Cluster counts' ] = numpy.zeros( ( len( clusterIDs2retain ), n_dlc ), dtype = numpy.uint32 )
baseX = len( alphabet )
tmp = {}
tmp[ 'Iterations' ] = iteration
tmp[ 'Cluster 2 clusterID' ] = {}
tmp[ 'Cluster counts' ] = tmpstruct_clustercounts[ 'Cluster counts' ]
tmp[ 'Distance-linkage combinations' ] = self[ 'Distance-linkage combinations' ]
tmp[ 'Data' ] = self[ 'Data' ]
for cluster, clusterID in tmpstruct_clustercounts[ 'Cluster 2 clusterID' ].items():
if clusterID in clusterIDs2retain:
final_cluster = []
# map cluster back to decimal indices:
for baseXstring in cluster.split( ',' ):
index = 0
for i, digit in enumerate( baseXstring[ ::-1 ] ):
index += alphabet.find( digit ) * baseX ** i
final_cluster.append( index )
final_cluster.sort()
final_cluster = tuple( final_cluster )
tmp[ 'Cluster 2 clusterID' ][ final_cluster ] = clusterID
self.__add__( tmp )
# pickle results:
if pickle_filename:
self.save( pickle_filename )
s = 're-sampling routine for {0} iterations finished. {1} clusters were obtained.'
self._print( s.format( iteration, len(clusterIDs2retain) ), verbosity_level = 1 )
return
def _get_normalized_slope(self, y2, y1, iter_step):
'''
Calculates the normalized slope between two 2D-coordinates:
i.e. ( y2 / y1 ) - (1.0) / iter_step,
where y = amount of most frequent clusters at a certain iteration,
and iter_step = x2 - x1.
:param y2: the y-coordinate of the second point.
:type y2: float
:param y1: the y-coordinate of the first point.
:type y1: float
:param iter_step: the difference between the x-coordinates of the two points, i.e. x2 - x1.
:type iter_step: float
rtype: float
'''
numerator = float( y2 ) / float( y1 ) - 1.0
norm_slope = numerator / float( iter_step )
return norm_slope
def check4convergence(self):
'''
Checks if the re-sampling routine may be terminated, because the number of most frequent clusters remains almost constant.
This is done by examining a plot of the amount of most frequent clusters vs. the number of iterations.
Convergence is declared once the median normalized slope in a given window of iterations is equal or below "iter_tol".
For further information see Supplementary Material of the corresponding publication.
:rtype: boolean
'''
converged = False
sorted_iter2nfreqs = sorted( self[ 'Convergence determination - iteration 2 n_mostfreq' ].items() )
iter_step = self[ 'Convergence determination - params' ][ 'iter_step' ]
iter_window = self[ 'Convergence determination - params' ][ 'iter_window' ]
iter_tol = self[ 'Convergence determination - params' ][ 'iter_tol' ]
# determine normalized slope:
norm_slopes = []
for i, ( iteration, n_mostfreq ) in enumerate( sorted_iter2nfreqs ):
if i == 0:
continue
n_mostfreq_before = sorted_iter2nfreqs[ i - 1 ][ 1 ]
norm_slope = self._get_normalized_slope( y2 = n_mostfreq, y1 = n_mostfreq_before, iter_step = iter_step )
norm_slopes.append( norm_slope )
# determine convergence - is the median of normalized slopes in iter_window iterations <= iter_tol?
n_slopes = int( round( float( iter_window ) / iter_step ) ) # prepare for sliding window
for i in range( len( norm_slopes ) - n_slopes + 1 ):
iteration = iter_step + iter_step * n_slopes + i * iter_step
slopes_in_sliding_window = norm_slopes[ i : i + n_slopes ]
median_slope = self.median( slopes_in_sliding_window )
if -iter_tol <= median_slope <= iter_tol:
converged = True
self._print( '\npotentially converged. Check convergence plot!', file = sys.stderr, verbosity_level = 2 )
self[ 'Convergence determination - first detected at iteration' ] = iteration
break
return converged
def convergence_plot(self, filename = 'convergence_plot.pdf'):
'''
Creates a two-sided PDF file containing the full picture of the convergence plot, as well as a zoom of it.
The convergence plot illustrates the development of the amount of most frequent clusters vs. the number of iterations.
The dotted line in this plots represents the normalized slope, which is used for internal convergence determination.
If rpy2 cannot be imported, a CSV file is created instead.
:param filename: the filename of the PDF (or CSV) file.
:type filename: string
:rtype: none
'''
try:
from rpy2.robjects import IntVector, FloatVector, StrVector
from rpy2.robjects.packages import importr
graphics = importr( 'graphics' )
grdevices = importr( 'grDevices' )
except ImportError:
filename = filename.replace( '.pdf', '.csv' )
with open( os.path.join( self[ 'Working directory' ], filename ), 'w' ) as fout:
print( 'iteration,amount of most frequent clusters', file = fout )
for iteration, n_mostfreq in self[ 'Convergence determination - iteration 2 n_mostfreq' ].items():
print( '{0},{1}'.format( iteration, n_mostfreq ), file = fout )
self._print( '[ INFO ] Since rpy2 could not be imported, a CSV file instead of a PDF plot of convergence was created. See in "{0}".'.format( os.path.join( self[ 'Working directory' ], filename ) ), file = sys.stderr, verbosity_level = 1 )
return
def _add_lines( points2connect, lty = 1, color = 'black' ):
for i, ( x, y ) in enumerate( points2connect ):
if i == 0:
continue
x_before, y_before = points2connect[ i - 1 ]
graphics.lines( IntVector( [ x_before, x ] ),
FloatVector( [ y_before, y ] ),
lty = lty,
col = color
)
iter_step = self[ 'Convergence determination - params' ][ 'iter_step' ]
iter_window = self[ 'Convergence determination - params' ][ 'iter_window' ]
iter_tol = self[ 'Convergence determination - params' ][ 'iter_tol' ]
iteration2mostfreq = self[ 'Convergence determination - iteration 2 n_mostfreq' ]
sorted_iter2mostfreq = sorted( iteration2mostfreq.items() )
# plot convergence curve:
grdevices.pdf( file = os.path.join( self[ 'Working directory' ], filename ), width = 12, height = 12 )
for tag in [ 'full', 'zoom' ]:
points = sorted_iter2mostfreq
Ys = [ y for x, y in points ]
if tag == 'full':
ylim = ( min( Ys ), max( Ys ) )
title = '#most_freq (left y-axis) and normalized_slope (= (current / before - 1.0) / iter_step) (right y-axis)'
elif tag == 'zoom':
ylim = ( min( Ys ), min( Ys ) * 1.075 )
title = 'ZOOM'
subtitle = 'iter_top_P = {0}, iter_step = {1}, iter_tol = {2}, iter_window = {4}, iter_max = {3}'
subtitle = subtitle.format(
self[ 'Convergence determination - params' ][ 'iter_top_P' ],
iter_step,
iter_tol,
self[ 'Convergence determination - params' ][ 'iter_max' ],
iter_window
)
graphics.plot(
IntVector( [ x for x, y in points ] ),
IntVector( Ys ),
main = title,
sub = subtitle,
xlab = 'iteration', xaxt = 'n',
ylab = 'len(most_freq)', ylim = IntVector( ylim ),
col = 'black',
pch = 16
)
_add_lines( points, lty = 1, color = 'black' )
x_axis_ticks = tuple( range( iter_step, max( iteration2mostfreq.keys() ) + 1, iter_step ) )
graphics.axis(1, at = IntVector( x_axis_ticks ), labels = [ '{0}k'.format( tick / 1000 ) for tick in x_axis_ticks ], las = 2, **{ 'cex.axis' : 0.75 } )
graphics.axis(3, at = IntVector( x_axis_ticks ), labels = StrVector( [ '' for tick in x_axis_ticks ] ) )
graphics.legend(
'bottomleft',
legend = StrVector( [ '#most_freq', 'normalized_slope' ] ),
lty = IntVector( [1, 2] ),
pch = IntVector( [16, 1] ),
bty = 'n'
)
# add second plot = normalized_slope-plot:
graphics.par( new = True )
critical_interval = ( -iter_tol, iter_tol )
try:
firstConvergedAtIter = self[ 'Convergence determination - first detected at iteration' ]
except KeyError:
self.check4convergence()
firstConvergedAtIter = self[ 'Convergence determination - first detected at iteration' ]
iter2normslope = [ ( iter_step, -1.0 ) ]
for i, (iteration, n_mostfreq) in enumerate( sorted_iter2mostfreq[ 1: ] ):
iteration_before, n_mostfreq_before = sorted_iter2mostfreq[ i ] # iteration2mostfreq[ iteration - iter_step ]
norm_slope = self._get_normalized_slope( y2 = n_mostfreq, y1 = n_mostfreq_before, iter_step = iteration - iteration_before )
iter2normslope.append( ( iteration, norm_slope ) )
points = iter2normslope
Ys = [ y for x, y in points ]
ylim = ( critical_interval[ 0 ] * 20, critical_interval[ 1 ] * 20)
graphics.plot(
IntVector( [ x for x, y in points ] ),
FloatVector( Ys ),
main = '',
xlab = '',
xaxt = 'n',
ylab = '',
yaxt = 'n',
ylim = FloatVector( ylim ),
pch = 1
)
_add_lines( points, lty = 2, color = 'black' )
graphics.abline( v = firstConvergedAtIter, lty = 1, col = 'blue' )
graphics.lines( IntVector( [ firstConvergedAtIter - iter_window, firstConvergedAtIter ] ), FloatVector( [ 0, 0 ] ), col = 'darkgreen' )
graphics.text( firstConvergedAtIter, 0, str( firstConvergedAtIter / 1000 ), col = 'blue' )
graphics.abline( h = critical_interval[ 0 ], lty = 3, col = 'darkgreen' )
graphics.abline( h = critical_interval[ 1 ], lty = 3, col = 'darkgreen' )
graphics.axis(4, FloatVector( [ ylim[ 0 ], ylim[ 0 ] / 20. * 10, 0, ylim[ 1 ] / 20. * 10, ylim[ 1 ] ] ) )
grdevices.dev_off()
self._print( '... plot of convergence finished. See plot in "{0}".'.format( os.path.join( self[ 'Working directory' ], filename ) ), file = sys.stderr, verbosity_level = 2 )
return
def plot_clusterfreqs(self, min_cluster_size = 4, top_X_clusters = None, threshold_4_the_lowest_max_freq = 0.01):
'''
Plot the frequencies of each cluster as a expression map:
which cluster was found by which distance-linkage combination, and with what frequency?
The plot's filename is prefixed by 'clusterFreqsMap', followed by the values of the parameters.
E.g. 'clusterFreqsMap_minSize4_top0clusters_top10promille.svg'.
Clusters are sorted by size.
:param min_cluster_size: only clusters with a size equal or greater than min_cluster_size appear in the plot of the cluster freqs.
:type min_cluster_size: int
:param threshold_4_the_lowest_max_freq: ]0, 1[ Clusters must have a maximum frequency of at least threshold_4_the_lowest_max_freq to appear in the plot.
:type threshold_4_the_lowest_max_freq: float
:param top_X_clusters: Plot of the top X clusters in the sorted list (by freq) of clusters having a maximum cluster frequency of at least threshold_4_the_lowest_max_freq (clusterfreq-plot is still sorted by size).
:type top_X_clusters: int
:rtype: None
'''
self[ 'Function parameters' ][ self.plot_clusterfreqs.__name__ ] = { k : v for k, v in list(locals().items()) if k != 'self' }
allClusters_sortedByLength_l = sorted( self._get_most_frequent_clusters(min_cluster_size = min_cluster_size, top_X_clusters = top_X_clusters, threshold_4_the_lowest_max_freq = threshold_4_the_lowest_max_freq), key = len, reverse = True )
identifiers = []
data = {}
freqs = set()
for cluster in allClusters_sortedByLength_l:
identifier = 'Cluster ID: {0}, size: {1}'.format( self[ 'Cluster 2 clusterID' ][ cluster ], len( cluster ) )
identifiers.append( identifier )
data[ identifier ] = {}
cFreq, cFreqDict = self.frequencies( cluster = cluster )
for dlc, frequency in sorted( cFreqDict.items() ):
data[ identifier ][ dlc ] = ( frequency, sys.float_info.epsilon )
freqs.add( round( frequency, 2) )
hm_filename = 'clusterFreqsMap_minSize{0}_top{1}clusters_top{2:.0f}promille'.format( min_cluster_size, top_X_clusters, threshold_4_the_lowest_max_freq * 1000 )
# max_value_4_expression_map = sorted( freqs )[ -3 ] # since root cluster has a freq of 1.0, position -1 is always 1.0 (and -2 close to 1.0 (root, too)!)
self.draw_expression_map(
identifiers = identifiers,
data = data,
conditions = sorted( cFreqDict.keys() ),
additional_labels = {},
# min_value_4_expression_map = 0.0,
max_value_4_expression_map = max( freqs ),
expression_map_filename = hm_filename+'.svg',
legend_filename = hm_filename+'_legend.svg',
color_gradient = 'barplot'
)
self._print( '... clusterfreqs_expressionmap saved as: "{0}"'.format( hm_filename+'.svg' ), verbosity_level = 1 )
return
def _get_most_frequent_clusters(self, min_cluster_size = 4, top_X_clusters = None, threshold_4_the_lowest_max_freq = 0.01):
'''
Gets the most frequent clusters. Filters either according to a frequency-threshold or gets the top X clusters.
.. note ::
Each cluster has attributed X counts, or frequencies, where X = len( Distance-linkage combinations ).
For determination of most frequent clusters, only the max( X frequencies ) matters.
Hence, a single frequency above threshold_4_the_lowest_max_freq is sufficient to include that cluster.
:param min_cluster_size: only clusters bigger or equal than threshold are considered; e.g. 4
:type min_cluster_size: int
:param threshold_4_the_lowest_max_freq: ]0, 1[ get all clusters with a max frequency above threshold, e.g. 0.01 => 1%-clusters
:type threshold_4_the_lowest_max_freq: float
:param top_X_clusters: get the top X clusters in the sorted list (by freq) of clusters having a maximum cluster frequency of at least threshold_4_the_lowest_max_freq.
:type top_X_clusters: int
:rtype: List of the most frequent clusters in ARBITRARY order.
'''
import numpy
threshold_4_the_lowest_max_freq = float( threshold_4_the_lowest_max_freq )
topP_count = self[ 'Iterations' ] * threshold_4_the_lowest_max_freq
most_freq = []
max_counts = numpy.amax( self[ 'Cluster counts' ], axis = 1 ) # get max count for each cluster
if top_X_clusters is None:
mostfreqIDs = set( numpy.nonzero( max_counts >= topP_count )[ 0 ] )
for cluster, clusterID in self[ 'Cluster 2 clusterID' ].items():
if len( cluster ) >= min_cluster_size:
if clusterID in mostfreqIDs:
most_freq.append( cluster )
else: # top_X_clusters filter is requested:
cID_mask = [ cID for c, cID in self[ 'Cluster 2 clusterID' ].items() if len( c ) < min_cluster_size ]
clusterIDs2retain = []
for cID, _ in enumerate( max_counts >= topP_count ):
if _:
if cID in cID_mask:
continue
clusterIDs2retain.append( ( max_counts[ cID ], cID ) )
clusterIDs2retain.sort( reverse = True )
topX_clusterIDs = set( [ cID for count, cID in clusterIDs2retain[ : int(top_X_clusters) ] ] )
for cluster, clusterID in self[ 'Cluster 2 clusterID' ].items():
if clusterID in topX_clusterIDs:
most_freq.append(cluster)
s = '{0} clusters are found for a threshold for {1} and a min cluster len of {2}.'
self._print( s.format( len( most_freq ), threshold_4_the_lowest_max_freq, min_cluster_size ), verbosity_level = 1 )
return most_freq
def plot_nodetree(self, tree_filename = 'tree.dot'):
'''
plot the dendrogram for the clustering of the most_frequent_clusters.
- node label = nodeID internally used for self['Nodemap'] (not the same as clusterID!).
- node border color is white if the node is a close2root-cluster (i.e. larger than self[ 'for IO skip clusters bigger than' ] ).
- edge label = distance between parent and children.
- edge - color codes:
- black = default; highlights child which is not a most_frequent_cluster but was created during formation of the dendrogram.
- green = children are connected with the root.
- red = highlights child which is a most_frequent_cluster.
- yellow = most_frequent_cluster is directly connected with the root.
:param tree_filename: name of the Graphviz DOT file containing the dendrogram of the AHC of most frequent clusters. Best given with ".dot"-extension!
:type tree_filename: string
:rtype: none
'''
with open( os.path.join( self[ 'Working directory' ], tree_filename ), 'w' ) as fout:
print( 'digraph "pyGCluster nodemap_of_the_clustering_of_most_freq_clusters" {', file = fout )
node2tag = {}
# draw nodes:
for tag, node in enumerate( self[ 'Nodemap - binary tree' ] ):
color = 'black'
try:
label = str( self[ 'Cluster 2 clusterID' ][ tuple( sorted( set( node ) ) ) ] )
except KeyError:
label = '-1'
label = 'size={0}, id={1}'.format( len( set( node ) ), label )
if self[ 'Root size' ] > len( set( node ) ) > self[ 'for IO skip clusters bigger than' ]:
color = 'white'
print( '"{0}" [label="{1}", color = "{2}"];'.format( tag, label, color ), file = fout )
node2tag[ node ] = tag
# insert connecting arrows:
for tag, parent in enumerate( self[ 'Nodemap - binary tree' ] ):
is_root_node = False
if len( set( parent ) ) == self[ 'Root size' ]:
is_root_node = True
for child in self[ 'Nodemap - binary tree' ][ parent ][ 'children' ]:
color = 'black'
if len( self[ 'Nodemap - binary tree' ][ child ][ 'children' ] ) == 0:
color = 'red'
if is_root_node:
if color == 'red':
color = 'yellow'
else:
color = 'green'
print( '"{0}" -> "{1}" [color="{2}"];'.format( tag, node2tag[ child ], color ), file = fout )
print( '}', file = fout )
# plot tree:
try:
input_file = '{0}'.format( os.path.join( self[ 'Working directory' ], tree_filename ) )
output_file = '{0}'.format( os.path.join( self[ 'Working directory' ], '{0}.pdf'.format( tree_filename[ :-4 ] ) ) )
subprocess.Popen( [ 'dot', '-Tpdf', input_file, '-o', output_file ] ).communicate()
except:
self._print( '[ INFO ] plotting via "dot -Tpdf ..." of the binary cluster-tree failed; only DOT file created.', verbosity_level = 1 )
return
def calculate_distance_matrix(self, clusters, min_overlap = 0.25):
'''
Calculates the specifically developed distance matrix for the AHC of clusters:
(1) Clusters sharing *not* the minimum overlap are attributed a distance of "self[ 'Root size' ]" (i.e. len( self[ 'Data' ] ) ).
(2) Clusters are attributed a distance of "self[ 'Root size' ] - 1" to the root cluster.
(3) Clusters sharing the minimum overlap are attributed a distance of "size of the larger of the two clusters minus size of the overlap".
The overlap betweeen a pair of clusters is relative, i.e. defined as the size of the overlap divided by the size of the larger of the two clusters.
The resulting condensed distance matrix in not returned, but rather stored in self[ 'Nodemap - condensed distance matrix' ].
:param clusters: the most frequent clusters whose "distance" is to be determined.
:type clusters: list of clusters. Clusters are represented as tuples consisting of their object's indices.
:param min_overlap: ]0, 1[ threshold value to determine if the distance between two clusters is calculated according to (1) or (3).
:type min_overlap: float
:rtype: none
'''
self._print( 'calculating distance matrix for {0} clusters ...'.format( len( clusters ) ) , end = ' ', verbosity_level = 2 )
condensed_dist_matrix = []
a, b = 1, 1
clusters = [ set( c ) for c in clusters ]
for clusterI, clusterJ in itertools.combinations( clusters, 2 ):
if len( clusterI ) == self[ 'Root size' ] or len( clusterJ ) == self[ 'Root size' ]:
dist = a * self[ 'Root size' ] - b
else:
overlap = clusterI & clusterJ
n_overlap = float( len( overlap ) )
n_sizeI = float( len( clusterI ) )
n_sizeJ = float( len( clusterJ ) )
if n_sizeI > n_sizeJ:
max_size = n_sizeI
min_size = n_sizeJ
else:
max_size = n_sizeJ
min_size = n_sizeI
if float( n_overlap ) / float( max_size ) < min_overlap:
dist = a * self[ 'Root size' ]
else:
dist = a * max_size - b * n_overlap
condensed_dist_matrix.append( dist )
self[ 'Nodemap - condensed distance matrix' ] = condensed_dist_matrix
self._print( 'done.', verbosity_level = 2 )
return
def _get_levelX_clusters(self, level):
'''
Returns a list of all clusters that are present on a specific level in the node map.
Each level corresponds to an iteration in the community construction.
:param level: [0, max community-iterations] sets the level (or iteration) from which the clusters are to be returned.
:type level: int
:rtype: list
'''
cluster_list = []
for name in self[ 'Communities' ]:
cluster, current_level = name
if current_level == level:
cluster_list.append( cluster )
return sorted( cluster_list )
def build_nodemap(self, min_cluster_size = 4, top_X_clusters = None, threshold_4_the_lowest_max_freq = 0.01, starting_min_overlap = 0.1, increasing_min_overlap = 0.05):
'''
Construction of communities from a set of most_frequent_cluster.
This set is obtained via :py:func:`pyGCluster.Cluster._get_most_frequent_clusters`, to which the first three parameters are passed.
These clusters are then subjected to AHC with complete linkage.
The distance matrix is calculated via :py:func:`pyGCluster.Cluster.calculate_distance_matrix`.
The combination of complete linkage and the distance matrix assures that all clusters in a community exhibit at least the "starting_min_overlap" to each other.
From the resulting cluster tree, a "first draft" of communities is obtained.
These "first" communities are then themselves considered as clusters, and subjected to AHC again, until the community assignment of clusters remains constant.
By this, clusters are inserted into a target community, which initially did not overlap with each cluster inside the target community,
but do overlap if the clusters in the target community are combined into a single cluster.
By this, the degree of stringency is reduced; the clusters fit into a community in a broader sense.
For further information on the community construction, see the publication of pyGCluster.
Internal structure of communities:
>>> name = ( cluster, level )
... # internal name of the community.
... # The first element in the tuple ("cluster") contains the indices
... # of the objects that comprise a community.
... # The second element gives the level,
... # or iteration when the community was formed.
>>> self[ 'Communities' ][ name ][ 'children' ]
... # list containing the clusters that build the community.
>>> self[ 'Communities' ][ name ][ '# of nodes merged into community' ]
... # the number of clusters that build the community.
>>> self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ]
... # an OrderedDict in which each index is assigned its obCoFreq.
... # Negative indices correspond to "placeholders",
... # which are required for the insertion of black lines into expression maps.
... # Black lines in expression maps separate the individual clusters
... # that form a community, sorted by when
... # they were inserted into the community.
>>> self[ 'Communities' ][ name ][ 'highest obCoFreq' ]
... # the highest obCoFreq encountered in a community.
>>> self[ 'Communities' ][ name ][ 'cluster ID' ]
... # the ID of the cluster containing the object with the highest obCoFreq.
Of the following parameters, the first three are passed to :py:func:`pyGCluster.Cluster._get_most_frequent_clusters`:
:param min_cluster_size: clusters smaller than this threshold are not considered for the community construction.
:type min_cluster_size: int
:param top_X_clusters: form communities from the top X clusters sorted by their maximum frequency.
:type top_X_clusters: int
:param threshold_4_the_lowest_max_freq: [0, 1[ form communities from clusters whose maximum frequency is at least this value.
:type threshold_4_the_lowest_max_freq: float
:param starting_min_overlap: ]0, 1[ minimum required relative overlap between clusters so that they are assigned the same community. The relative overlap is defined as the size of the overlap between two clusters, divided by the size of the larger cluster.
:type starting_min_overlap: float
:param increasing_min_overlap: defines the increase of the required overlap between communities
:type increasing_min_overlap: float
:rtype: none
'''
self[ 'Function parameters' ][ self.build_nodemap.__name__ ] = { k : v for k, v in list(locals().items()) if k != 'self' }
import scipy.spatial.distance as ssd
imported_from_scipy = False
try:
from fastcluster import linkage as ahc
except ImportError:
try:
from scipy.cluster.hierarchy import linkage as ahc
imported_from_scipy = True
except ImportError:
self._print( '[ ERROR ] You do require either "fastcluster" or "scipy" for the construction of communities.', verbosity_level = 0 )
# The algorithm is as follows:
# Starting from the top, all descendants of any cluster that is smaller than the root are determined.
# Those descendants form a community.
def communities_by_ahc(cluster_list, min_overlap):
# calculate distance matrix
self.calculate_distance_matrix( clusters = cluster_list, min_overlap = min_overlap )
# perform AHC
self._print( 'performing AHC for {0} clusters ...'.format( len( cluster_list ) ), end = ' ', verbosity_level = 2 )
# avoid scipy crash when only 2 objects are subjected to AHC:
if len( self[ 'Nodemap - condensed distance matrix' ] ) == 1 and len( cluster_list ) == 2:
self[ 'Nodemap - linkage matrix' ] = [ [ 0, 1, -99, len( set( cluster_list[ 0 ] + cluster_list[ 1 ] ) ) ] ]
else:
if imported_from_scipy:
self[ 'Nodemap - linkage matrix' ] = ahc( self[ 'Nodemap - condensed distance matrix' ], method = 'complete' )
else:
self[ 'Nodemap - linkage matrix' ] = ahc( self[ 'Nodemap - condensed distance matrix' ], method = 'complete', preserve_input = True )
self._print( 'done.', verbosity_level = 2 )
# parse clusters
self._print( 'parsing clusters ...', end = ' ', verbosity_level = 2 )
clusters = {} # required to reconstruct the clusters from the linkage matrix
nodemap = {} # each node = value is a dict with two keys: 'parent' -> parent cluster (as tuple), 'children' -> set of child clusters (tuples)
for i, cluster in enumerate( cluster_list ):
clusters[ i ] = cluster
nodemap[ cluster ] = { 'children' : [], 'parent' : None }
parentID = len( cluster_list ) - 1
for childID_1, childID_2, distance, size in self[ 'Nodemap - linkage matrix' ]:
parentID += 1
child1 = clusters[ childID_1 ]
child2 = clusters[ childID_2 ]
parent = child1 + child2
clusters[ parentID ] = parent
nodemap[ child1 ][ 'parent' ] = parent
nodemap[ child2 ][ 'parent' ] = parent
nodemap[ parent ] = { 'children' : [ child1, child2 ], 'parent' : None }
self[ 'Nodemap - binary tree' ] = nodemap
self._print( 'done.', verbosity_level = 2 )
# recursive function 2 find communities:
def get_communities( node , community_list = None ):
if community_list is None:
community_list = []
for child in nodemap[ node ][ 'children' ]:
if len( set( child ) ) == self[ 'Root size' ]:
community_list = get_communities( child, community_list = community_list )
else:
community_list.append( self._get_descendants_in_binary_tree( node = child ) + [ child ] )
return community_list
# get root_node = top node of the tree:
for node in nodemap:
if nodemap[ node ][ 'parent' ] is None:
root_node = node
break
community_list = get_communities( node = root_node, community_list = None )
clusters_combined_into_communities = []
for community in community_list:
endnodes = []
for cluster in community:
if cluster in cluster_list:
endnodes.append( cluster )
clusters_combined_into_communities.append( endnodes )
return clusters_combined_into_communities
def update_communities(level, clusters_combined_into_communities):
for community in clusters_combined_into_communities:
# find cluster with highest freq
community_obCoFreq2cluster_list = []
community_indices = set()
for cluster in community:
highest_obCoFreq = self[ 'Communities' ][ ( cluster, level ) ][ 'highest obCoFreq' ]
community_obCoFreq2cluster_list.append( ( highest_obCoFreq, cluster ) )
community_indices |= set( cluster )
community_obCoFreq2cluster_list.sort( reverse = True )
# print( level , 'level', community_indices)
first_cluster = community_obCoFreq2cluster_list[ 0 ][ 1 ]
name = ( tuple( sorted( community_indices ) ), level + 1 )
if name in self[ 'Communities' ]:
current_highest_obCoFreq = community_obCoFreq2cluster_list[ 0 ][ 0 ]
if current_highest_obCoFreq > self[ 'Communities' ][ name ][ 'highest obCoFreq' ]:
self[ 'Communities' ][ name ][ 'cluster ID' ] = self[ 'Communities' ][ ( first_cluster, level ) ][ 'cluster ID' ]
community_obCoFreq2cluster_list.insert( 0, None ) # assure that the first cluster is also properly inserted
else:
# import copy
# print( self[ 'Communities' ][ ( first_cluster, level ) ][ 'index 2 obCoFreq dict' ] )
self[ 'Communities' ][ name ] = {}
self[ 'Communities' ][ name ][ 'children' ] = [ first_cluster ]
self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ] = self[ 'Communities' ][ ( first_cluster, level ) ][ 'index 2 obCoFreq dict' ].copy()
self[ 'Communities' ][ name ][ 'cluster ID' ] = self[ 'Communities' ][ ( first_cluster, level ) ][ 'cluster ID' ]
self[ 'Communities' ][ name ][ 'highest obCoFreq' ] = None
self[ 'Communities' ][ name ][ '# of nodes merged into community' ] = self[ 'Communities' ][ ( first_cluster, level ) ][ '# of nodes merged into community' ]
self[ 'Communities' ][ name ][ '# of nodes merged into community' ] += len( community_obCoFreq2cluster_list ) - 1.
# insert children and update obCoFreq-Dict:
for _, cluster in community_obCoFreq2cluster_list[ 1 : ]:
self[ 'Communities' ][ name ][ 'children' ].append( cluster )
placeholder_added = False
for index in cluster:
obCoFreq = self[ 'Communities' ][ ( cluster, level ) ][ 'index 2 obCoFreq dict' ].get( index, 0. )
if index in self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ]:
self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ][ index ] += obCoFreq
else:
if not placeholder_added:
placeholder = len( self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ] ) * -1
self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ][ placeholder ] = -99
placeholder_added = True
self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ][ index ] = obCoFreq
max_freq = max(
list(
self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ].values()
) + [0.0]
)
self[ 'Communities' ][ name ][ 'highest obCoFreq' ] = max_freq
return
def init_cluster2community0_level(min_cluster_size = None, top_X_clusters = None, threshold_4_the_lowest_max_freq = None):
most_frequent_clusters = self._get_most_frequent_clusters( min_cluster_size = min_cluster_size, top_X_clusters = top_X_clusters, threshold_4_the_lowest_max_freq = threshold_4_the_lowest_max_freq )
level = 0
maxIndex = 0
self[ 'Communities' ] = {}
for cluster in sorted( most_frequent_clusters ):
index2obCoFreq = OrderedDict()
cFreq, cFreqDict = self.frequencies( cluster = cluster )
for index in cluster:
index2obCoFreq[ index ] = cFreq
# if index > 146:
# print("<")
# exit(1)
max_freq = cFreq # max_freq = max( index2obCoFreq.values() ) = cFreq, because the indices are only from a single cluster at level 0
name = ( cluster, level )
self[ 'Communities' ][ name ] = {}
self[ 'Communities' ][ name ][ 'children' ] = []
self[ 'Communities' ][ name ][ 'index 2 obCoFreq dict' ] = index2obCoFreq
self[ 'Communities' ][ name ][ 'highest obCoFreq' ] = max_freq
self[ 'Communities' ][ name ][ 'cluster ID' ] = self[ 'Cluster 2 clusterID' ][ cluster ]
self[ 'Communities' ][ name ][ '# of nodes merged into community' ] = 1.
return
min_overlap = starting_min_overlap
init_cluster2community0_level(min_cluster_size = min_cluster_size, top_X_clusters = top_X_clusters, threshold_4_the_lowest_max_freq = threshold_4_the_lowest_max_freq)
level = 0
community_snapshot = None
while True:
cluster_list = self._get_levelX_clusters( level = level )
clusters_combined_into_communities = communities_by_ahc( cluster_list, min_overlap )
if community_snapshot == sorted( clusters_combined_into_communities ) or min_overlap >= 1.0:
break
self.plot_nodetree( 'AHCofClusters_binaryTree_iteration{0}.dot'.format(level) )
update_communities( level = level, clusters_combined_into_communities = clusters_combined_into_communities )
community_snapshot = sorted( clusters_combined_into_communities )
min_overlap += increasing_min_overlap
level += 1
return
def _get_descendants_in_binary_tree(self, node, children = None):
'''
Recursively determines the descendants of a given node in an AHC tree.
:param node: tuple describing uniquely a node in the AHC tree. It resembles a pyGCluster-cluster, but may contain the same index several times, e.g. if (1,2) and (1,3) are merged into (1,1,2,3).
:type node: tuple
:param children: all descendants determined so far. Should equal None for the first call.
:type children: list
:rtype: list
'''
if children is None:
children = []
if len( self[ 'Nodemap - binary tree' ][ node ][ 'children' ] ) > 0:
for child in self[ 'Nodemap - binary tree' ][ node ][ 'children' ]:
children.append( child )
self._get_descendants_in_binary_tree( node = child, children = children )
return children
def _get_descendants_in_community_tree(self, parent_name, children = None):
'''
Recursively determines the descendants of a given node in the community tree.
In contrast to :py:func:`pyGCluster.Cluster._get_descendants_in_binary_tree` , the community tree is not a binary tree;
and "parent_name" differs from the "node"-parameter of the former (see below).
:param parent_name: tuple with two elements: ( cluster, level ). Here, cluster is a pyGCluster-cluster, i.e. a tuple containing each index describing a cluster only once.
:type parent_name: tuple
:param children: all descendants determined so far. Should equal None for the first call.
:type children: list
:rtype: list
'''
if children is None:
children = []
if len( self[ 'Communities' ][ parent_name ][ 'children' ] ) > 0:
parent, level = parent_name
for child in self[ 'Communities' ][ parent_name ][ 'children' ]:
child_name = ( child, level - 1 )
children.append( child_name )
self._get_descendants_in_community_tree( parent_name = child_name, children = children )
return children
def create_rainbow_colors( self, n_colors = 10):
'''
Returns a list of rainbow colors. Colors are expressed as hexcodes of RGB values.
:param n_colors: number of rainbow colors.
:type n_colors: int
:rtype: list
'''
import colorsys
colors = []
for i in range( n_colors ):
# i has to be [0.0, 1.0[
i /= float( n_colors )
rgb = [ int(value) for value in colorsys.hsv_to_rgb(i, 1, 255) ]
hexcode = '#'
for _ in rgb:
_hex = hex(_)[2:]
if len(_hex) == 1:
_hex = '0{}'.format(_hex.upper())
else:
_hex = '{}'.format(_hex.upper())
hexcode += _hex
colors.append(hexcode)
return colors
def write_legend(self, filename = 'legend.txt'):
'''
Creates a legend for the community node map as a TXT file.
Herein, the object composition of each cluster of the node map as well as its frequencies are recorded.
Since this function is internally called by :py:func:`pyGCluster.Cluster.write_dot`, it is typically not necessary to call this function.
:param filename: name of the legend TXT file, best given with extension ".txt".
:type filename: string
:rtype: none
'''
with open( os.path.join( self[ 'Working directory' ] , filename ), 'w') as legend:
print( "Frequency order:\n{0}\n".format( ', '.join( sorted( self[ 'Distance-linkage combinations' ] ) ) ), file = legend )
for name in self[ 'Communities' ]:
cluster, level = name
if len( cluster ) > self[ 'for IO skip clusters bigger than' ]:
continue
if cluster in self[ 'Cluster 2 clusterID' ]:
cFreq, cFreqDict = self.frequencies( cluster = cluster )
else:
cFreqDict = { None : -99 }
nodeID = '{0}, {1}'.format( self[ 'Communities' ][ name ][ 'cluster ID' ], level )
print( 'label = "{nodeID:0>3}", size = {size:0>3}, frequencies = {frequencies}'.format(
nodeID = nodeID,
size = len( cluster ),
frequencies = ', '.join( [ '{0:5.4f}'.format( f ) for method, f in sorted( cFreqDict.items() ) ] )
), file = legend
)
for index in cluster:
addOn = ''
try:
addOn = self[ 'Additional Labels' ][ self[ 'Identifiers' ][ index ] ]
if type(addOn) == type(list()) or type(addOn) == type(set()):
addOn = ".oOo.".join(list(set(addOn)))
except:
pass
print( '{0}\t{1}'.format( self[ 'Identifiers' ][ index ], addOn ), file = legend )
print( '+' * 50 , file = legend )
self._print( '... nodemap saved in "{0}"'.format( self[ 'Working directory' ] ), verbosity_level = 2 )
return
def write_dot(self, filename , scaleByFreq = True, min_obcofreq_2_plot = None, n_legend_nodes = 5, min_value_4_expression_map = None, max_value_4_expression_map = None, color_gradient = '1337', box_style = 'classic'):
'''
Writes a Graphviz DOT file representing the cluster composition of communities.
Herein, each node represents a cluster. Its name is a combination of the cluster's ID, followed by the level / iteration it was inserted into the community:
- The node's size reflects the cluster's cFreq.
- The node's shape illustrates by which distance metric the cluster was found (if the shape is a point, this illustrates that this cluster was not among the most_frequent_clusters, but only formed during AHC of clusters).
- The node's color shows the community membership; except for clusters which are larger than self[ 'for IO skip clusters bigger than' ], those are highlighted in grey.
- The node connecting all clusters is the root (the cluster holding all objects), which is highlighted in white.
The DOT file may be rendered with "Graphviz" or further processed with other appropriate programs such as e.g. "Gephi".
If "Graphviz" is available, the DOT file is eventually rendered with "Graphviz"'s dot-algorithm.
In addition, a expression map for each cluster of the node map is created (via :py:func:`pyGCluster.Cluster.draw_expression_map_for_community_cluster`).
Those are saved in the sub-folder "communityClusters".
This function also calls :py:func:`pyGCluster.Cluster.write_legend`,
which creates a TXT file containing the object composition of all clusters, as well as their frequencies.
:param filename: file name of the Graphviz DOT file representing the node map, best given with extension ".dot".
:type filename: string
:param scaleByFreq: switch to either scale nodes (= clusters) by cFreq or apply a constant size to each node (the latter may be useful to put emphasis on the nodes' shapes).
:type scaleByFreq: boolean
:param min_obcofreq_2_plot: if defined, clusters with lower cFreq than this value are skipped, i.e. not plotted.
:type min_obcofreq_2_plot: float
:param n_legend_nodes: number of nodes representing the legend for the node sizes. The node sizes themselves encode for the cFreq. "Legend nodes" are drawn as grey boxes.
:type n_legend_nodes: int
:param min_value_4_expression_map: lower bound for color coding of values in the expression map. Remember that log2-values are expected, i.e. this value should be < 0.
:type min_value_4_expression_map: float
:param max_value_4_expression_map: upper bound for color coding of values in the expression map.
:type max_value_4_expression_map: float
:param color_gradient: name of the color gradient used for plotting the expression map.
:type color_gradient: string
:param box_style: the way the relative standard deviation is visualized in the expression map. Currently supported are 'modern', 'fusion' or 'classic'.
:type box_style: string
:rtype: none
'''
self[ 'Function parameters' ][ self.write_dot.__name__ ] = { k : v for k, v in list(locals().items()) if k != 'self' }
import numpy
node_templateString = '"{nodeID}" [label="{label}", color="{color}", shape="{shape}", width="{freq}", height="{freq}", fixedsize=true, community={community}, c_members={c_members}, metrix="{metrix}", normalized_max_obCoFreq="{normalized_max_obCoFreq}"];'
node_templateString_dict = {}
edge_templateString = '"{parent}" -> "{child}" [color="{color}", arrowsize=2.0];'
edge_templateString_dict = {}
if 'Communities' not in self.keys():
self._print( 'function "build_nodemap()" was not called prior. Building node map with default settings ...', verbosity_level = 0 )
self.build_nodemap()
most_frequent_clusters_used_4_nodemap = set( self._get_levelX_clusters( level = 0 ) )
# assign each distance metric combo a specific shape (if possible):
metrix2shape = {}
n_metrices = len( self[ 'Distances' ] )
if len( self[ 'Distances' ] ) > 3:
self._print( '[ INFO ] more distance metrics than shapes! All shapes equal "ellipse".', verbosity_level = 1 )
shapes = [ 'ellipse' for i in range( n_metrices ** n_metrices ) ]
else:
shapes = [ 'box', 'ellipse', 'triangle', 'diamond', 'octagon', 'invtriangle', 'invtrapezium' ]
for i in range( 1, n_metrices + 1 ):
for metric_combo in itertools.combinations( self[ 'Distances' ] , i ):
metrix2shape[ ' + '.join( sorted( metric_combo ) ) ] = shapes.pop( 0 )
self._print( 'metric 2 shape:', metrix2shape , verbosity_level = 1 )
self[ 'nodemap metric2shape' ] = metrix2shape
# determine max obCoFreq for proper node scaling:
sorted_obCoFreqs = sorted( [ self[ 'Communities' ][ name ][ 'highest obCoFreq' ] for name in self[ 'Communities' ] ] )
max_obCoFreq = float( sorted_obCoFreqs[ -2 ] ) # sorted_obCoFreqs[ -1 ] == root, hence max_obCoFreq would always be cFreq(root) == 2.0!
# get top, i.e. largest cluster of each community:
max_level = max( [ name[1] for name in self[ 'Communities' ] ] )
communities_top_cluster = self._get_levelX_clusters( level = max_level )
# set colors:
communities_minus_close2root = [ c for c in communities_top_cluster if len( c ) < self[ 'for IO skip clusters bigger than' ] ]
community_colors = self.create_rainbow_colors( n_colors = len( communities_minus_close2root ) )
name2community_and_color = {}
for communityID, cluster in enumerate( communities_top_cluster ):
if cluster in communities_minus_close2root:
color = community_colors.pop( 0 )
else:
color = '#BEBEBE'
name = ( cluster, max_level )
communityID_color = ( communityID, color )
name2community_and_color[ name ] = communityID_color
for child_name in self._get_descendants_in_community_tree( parent_name = name ):
name2community_and_color[ child_name ] = communityID_color
# filter nodes by min_obcofreq_2_plot, and build 'name2nodeID'-dict:
name2nodeID = {}
skipped_nodes = set()
skipped_nodes.add( ( self[ 'Root' ], 0 ) )
for name in self[ 'Communities' ]:
name2nodeID[ name ] = len( name2nodeID )
if min_obcofreq_2_plot > self[ 'Communities' ][ name ][ 'highest obCoFreq' ]:
community, level = name
if max_level > level: # prevent that communities are lost if community freq < min_obcofreq_2_plot
skipped_nodes.add( name )
### write dot file:
dot_filename = os.path.join( self[ 'Working directory' ], filename )
with open( dot_filename, 'w' ) as dot:
## initialize DOT file:
print( 'digraph "pyGCluster nodemap" {', file = dot )
print( 'graph [overlap=Prism, splines=true, ranksep=5.0, nodesep=0.75];', file = dot )
print( 'node [style=filled]', file = dot )
scale_factor = 5.
## draw nodes:
for level in range( max_level + 1 ):
for cluster in self._get_levelX_clusters( level ):
name = ( cluster, level )
if name in skipped_nodes:
continue
# draw expression map:
if len( cluster ) <= self[ 'for IO skip clusters bigger than' ]:
self.draw_expression_map_for_community_cluster( name, min_value_4_expression_map = min_value_4_expression_map, max_value_4_expression_map = max_value_4_expression_map, color_gradient = color_gradient , sub_folder = 'communityClusters', box_style = box_style )
node_templateString_dict[ 'nodeID' ] = name2nodeID[ name ]
# scale node size:
if scaleByFreq:
normalized_obCoFreq = self[ 'Communities' ][ name ][ 'highest obCoFreq' ]
width = normalized_obCoFreq / max_obCoFreq * scale_factor
else:
width = 2.5
node_templateString_dict[ 'freq' ] = width
node_templateString_dict[ 'label' ] = '{0}-{1}'.format( self[ 'Communities' ][ name ][ 'cluster ID' ], level )
# determine shape:
if cluster in most_frequent_clusters_used_4_nodemap:
clusterID = self[ 'Cluster 2 clusterID' ][ cluster ]
distances = set()
for i in numpy.nonzero( self[ 'Cluster counts' ][ clusterID ] > 0 )[ 0 ]:
distance, linkage = self[ 'Distance-linkage combinations' ][ i ].split( '-' )
distances.add( distance )
distances = ' + '.join( sorted( distances ) )
node_templateString_dict[ 'metrix' ] = distances
node_templateString_dict[ 'shape' ] = metrix2shape[ distances ]
else:
node_templateString_dict[ 'metrix' ] = 'None'
node_templateString_dict[ 'shape' ] = 'point'
# store the cluster's size (in terms of objects describing it), set color and community ID:
node_templateString_dict[ 'c_members' ] = len( cluster )
communityID, community_color = name2community_and_color[ name ]
node_templateString_dict[ 'color' ] = community_color
node_templateString_dict[ 'community' ] = communityID
node_templateString_dict[ 'normalized_max_obCoFreq' ] = self[ 'Communities' ][ name ][ 'highest obCoFreq' ]
# finally insert node into dot-file:
print( node_templateString.format( **node_templateString_dict ), file = dot )
## insert edges:
for level in range( 1, max_level + 1 ):
for parent in self._get_levelX_clusters( level ):
parent_name = ( parent, level )
edge_templateString_dict[ 'parent' ] = name2nodeID[ parent_name ]
edge_templateString_dict[ 'color' ] = name2community_and_color[ parent_name ][ 1 ]
for child in self[ 'Communities' ][ parent_name ][ 'children' ]:
child_name = ( child, level - 1 )
if child_name in skipped_nodes:
continue
edge_templateString_dict[ 'child' ] = name2nodeID[ child_name ]
# nut to break: child without direct parent ...
if parent_name in skipped_nodes:
# find largest parent:
communityID, _ = name2community_and_color[ child_name ]
# get all community clusters which are attributed the current communityID:
community_names = set()
for name in name2community_and_color:
ID, _ = name2community_and_color[ name ]
if ID == communityID and name != child_name and name[ 1 ] > child_name[ 1 ]:
community_names.add( name )
# Of those, extract clusters which are NOT to be skipped:
potential_parents = community_names - skipped_nodes
# get parent with lowest level:
min_level = max_level
for potential_parent_name in potential_parents:
parent, _level = potential_parent_name
if min_level > _level:
min_level = _level
for potential_parent_name in potential_parents:
parent, _level = potential_parent_name
if _level == min_level:
edge_templateString_dict[ 'parent' ] = name2nodeID[ potential_parent_name ]
break
print( edge_templateString.format( **edge_templateString_dict ), file = dot )
## connect largest cluster of each community with root:
root_name = ( self[ 'Root' ], -1 )
name2nodeID[ root_name ] = len( name2nodeID )
node_templateString_dict = {
'nodeID' : name2nodeID[ root_name ],
'freq' : scale_factor,
'label' : 'ROOT',
'c_members' : self[ 'Root size' ],
'community' : -1,
'color' : '#FFFFFF',
'metrix' : 'ALL',
'shape' : 'ellipse',
'normalized_max_obCoFreq' : '-99'
}
print( node_templateString.format( **node_templateString_dict ), file = dot )
edge_templateString_dict = { 'parent' : name2nodeID[ root_name ], 'color' : '#000000' }
for cluster in communities_top_cluster:
cluster_name = ( cluster, max_level )
edge_templateString_dict[ 'child' ] = name2nodeID[ cluster_name ]
print( edge_templateString.format( **edge_templateString_dict ), file = dot )
## add legend for the node size as additional, grey, boxed-sized nodes:
for i in range( 1, n_legend_nodes + 1 ):
f = max_obCoFreq * ( i / float( n_legend_nodes ) ) / max_obCoFreq
node_templateString_dict = {
'nodeID' : 'legend_node_{0}'.format( i ),
'freq' : f * scale_factor,
'label' : round( f, 4 ),
'c_members' : -1,
'community' : -1,
'color' : '#BEBEBE',
'metrix' : 'None',
'shape' : 'box',
'normalized_max_obCoFreq' : '-99'
}
print( node_templateString.format( **node_templateString_dict ), file = dot)
for i in range( 1, n_legend_nodes ):
edge_templateString_dict = { 'parent' : 'legend_node_{0}'.format( i ), 'child' : 'legend_node_{0}'.format( i + 1 ), 'color' : '#BEBEBE' }
print( edge_templateString.format( **edge_templateString_dict ), file = dot )
## finalize DOT file:
print( '}', file = dot )
self.write_legend(filename = '{0}__legend.txt'.format(filename[:-4]))
try:
rendered_filename = os.path.join( self[ 'Working directory' ], '{0}.pdf'.format( filename[ : -4 ] ) )
out, err = subprocess.Popen( [ 'dot', '-Tpdf', dot_filename, '-o', rendered_filename ], stdout = subprocess.PIPE, stderr = subprocess.PIPE ).communicate()
except:
self._print( '[ INFO ] only DOT file created, renderering with Graphviz failed.', verbosity_level = 1 )
return
def frequencies(self, identifier = None, clusterID = None, cluster = None):
'''
Returns a tuple with (i) the cFreq and (ii) a Collections.DefaultDict containing the DLC:frequency pairs for either
an identifier, e.g. "JGI4|Chlre4|123456"
or clusterID
or cluster.
Returns 'None' if the identifier is not part of the data set, or clusterID or cluster was not found during iterations.
Example:
>>> cFreq, dlc_freq_dict = cluster.frequencies( identifier = 'JGI4|Chlre4|123456' )
>>> dlc_freq_dict
... defaultdict(<type 'float'>,
... {'average-correlation': 0.0, 'complete-correlation': 0.0,
... 'centroid-euclidean': 0.0015, 'median-euclidean': 0.0064666666666666666,
... 'ward-euclidean': 0.0041333333333333335, 'weighted-correlation': 0.0,
... 'complete-euclidean': 0.0014, 'weighted-euclidean': 0.0066333333333333331,
... 'average-euclidean': 0.0020333333333333332})
:param identifier: search frequencies by identifier input
:type identifier: string
:param clusterID: search frequencies by cluster ID input
:type clusterID: int
:param cluster: search frequencies by cluster (tuple of ints) input
:type cluster: tuple
:rtype: tuple
'''
if identifier is None and clusterID is None and cluster is None:
self._print( 'invalid call of function "frequencies": neither "identifier", "clusterID" nor "cluster" were given.\n\treturning None ...',
file = sys.stderr,
verbosity_level = 0
)
return None
cFreqDict = ddict(float)
if identifier != None:
# search by identifier
ident_index = self[ 'Identifiers' ].index( identifier )
for cluster, clusterID in self[ 'Cluster 2 clusterID' ].items():
if ident_index in cluster:
for i, method in enumerate(self[ 'Distance-linkage combinations' ]):
freq = self[ 'Cluster counts' ][ clusterID ][ i ] / float( self[ 'Iterations' ] )
cFreqDict[ method ] += freq
elif cluster != None:
clusterID = self[ 'Cluster 2 clusterID' ][ cluster ]
if clusterID != None:
for i, dlc in enumerate( self[ 'Distance-linkage combinations' ] ):
freq = self[ 'Cluster counts' ][ clusterID ][ i ] / float( self[ 'Iterations' ] )
cFreqDict[ dlc ] = freq
distance_freqs = { distance : [] for distance in self[ 'Distances' ] }
for dlc, f in cFreqDict.items():
distance, linkage = dlc.split( '-' )
distance_freqs[ distance ].append( f )
cFreq = sum( [ self.median( f ) for dist, f in distance_freqs.items() ] )
return cFreq, cFreqDict
def plot_mean_distributions(self):
'''
Creates a density plot of mean values for each condition via rpy2.
:rtype: none
'''
try:
import rpy2.robjects as robjects
from rpy2.robjects import r
from rpy2.robjects.packages import importr
graphics = importr('graphics')
grdevices = importr('grDevices')
except ImportError:
self._print( '[ WARNING ] since "rpy2" is not available (ImportError), the plot of the distribution of mean values could not be created.', verbosity_level = 0 )
return
grdevices.pdf( os.path.join( self[ 'Working directory' ] , 'distribution_of_means.pdf'.format(condition) ) )
for condition in self[ 'Conditions' ]:
means = []
for identifier in self[ 'Data' ]:
mean, sd = self[ 'Data' ][ identifier ][ condition ]
means.append( mean )
graphics.plot(
r.density( robjects.FloatVector( means ) ),
main = condition,
col = 'blue',
xlab = 'Mean values',
ylab = 'Density',
)
grdevices.dev_off()
return
def draw_expression_profiles(self, min_value_4_expression_map = None, max_value_4_expression_map = None, conditions=None):
'''
Draws an expression profile plot (SVG) for each community, illustrating the main "expression pattern" of a community.
Each line in this plot represents an object. The "grey cloud" illustrates the range of the standard deviation of the mean values.
The plots are named prefixed by "exProf", followed by the community name as it is shown in the node map.
:param min_value_4_expression_map: minimum of the y-axis (since data should be log2 values, this value should typically be < 0).
:type min_value_4_expression_map: int
:param max_value_4_expression_map: maximum for the y-axis.
:type max_value_4_expression_map: int
:rtype: none
'''
if conditions is None:
conditions = self[ 'Conditions' ]
# print(conditions)
self[ 'Function parameters' ][ self.draw_expression_profiles.__name__ ] = { k : v for k, v in list(locals().items()) if k != 'self' }
import numpy
FONT_SIZE = 10
y_offset = 20
MIN_V, MAX_V = min_value_4_expression_map, max_value_4_expression_map
if min_value_4_expression_map is None or max_value_4_expression_map is None:
# determine min and max for y-axis:
_yAxisMinMax = set()
for identifier in self[ 'Data' ]:
for condition in self[ 'Data' ][ identifier ]:
mean, sd = self[ 'Data' ][ identifier ][ condition ]
_yAxisMinMax.add( round( mean + sd, 2 ) )
_yAxisMinMax.add( round( mean - sd, 2 ) )
if min_value_4_expression_map is None:
MIN_V = int( math.ceil( min( _yAxisMinMax ) ) ) - 1
if max_value_4_expression_map is None:
MAX_V = int( math.ceil( max( _yAxisMinMax ) ) )
# give y-axis the same amount in positive and negative direction (e.g. from - 10 to 10):
if min_value_4_expression_map is None and max_value_4_expression_map is None: # but only if no value is given, otherwise it's probably user-chosen!
if MAX_V > abs( MIN_V ):
MIN_V = MAX_V * -1
else:
MAX_V = MIN_V * -1
startingX = 100
startingY = 300 + y_offset # determine lenth of y-axis and y-range, represents zero point
maxY = ( startingY - y_offset ) * 2
scalingX = max( [ len( con ) * FONT_SIZE for con in conditions ] ) + 20 # distance between each condition
scalingY = ( maxY - ( startingY - y_offset ) ) / float( MAX_V ) * -1 # has to be negative!
def svg_text(x, y, text):
return '<text x="{0}" y="{1}"> {2} </text>'.format( x, y, text )
def svg_line(x1, y1, x2, y2):
return '<line x1="{0}" y1="{1}" x2="{2}" y2="{3}" style="stroke:#000000"/>'.format( x1, y1, x2, y2 )
def svg_comment(text):
return '<!-- {0} -->'.format( text )
def min_max_ratioWithSD(ratios, SDs):
ratios_plus_SD = [ ratio + SDs[ i ] for i, ratio in enumerate( ratios ) ]
ratios_minus_SD = [ ratio - SDs[ i ] for i, ratio in enumerate( ratios ) ]
return min( ratios_minus_SD ), max( ratios_plus_SD )
n_conditions = len( conditions )
max_level = max( [ name[1] for name in self[ 'Communities' ] ] )
for cluster in self._get_levelX_clusters( max_level ):
if len( cluster ) > self[ 'for IO skip clusters bigger than' ]:
continue
shape = ( len( cluster ), len( conditions ) )
ratios = numpy.zeros( shape )
SDs = numpy.zeros( shape )
identifiers = []
for row_index, identifier_index in enumerate( cluster ):
try:
identifier = self[ 'Identifiers' ][ identifier_index ]
except:
print(identifier_index, len( self['Identifiers']))
exit(1)
for col_index, condition in enumerate( conditions ):
mean, sd = self[ 'Data' ][ identifier ][ condition ]
ratios[ row_index ][ col_index ] = mean
SDs[ row_index ][ col_index ] = sd
addOn = ''
try:
addOn = self[ 'Additional Labels' ][ self['Identifiers'][ index ] ]
if type( addOn ) == type( list() ) or type( addOn ) == type( set() ):
addOn = ".oOo.".join( list( set( addOn ) ) )
except:
pass
if addOn:
identifiers.append( '{0}___{1}'.format( identifier, addOn ) )
else:
identifiers.append( identifier )
### draw expression profile:
communityID = self[ 'Communities' ][ ( cluster, max_level ) ][ 'cluster ID' ]
n_values = len( ratios )
with open( os.path.join( self[ 'Working directory' ] , 'exProf_{0}-{1}.svg'.format( communityID, max_level ) ), 'w') as fout:
width = startingX + scalingX * ( n_conditions -1 ) + len( conditions[ -1 ] ) * FONT_SIZE + max( [ len( i ) * FONT_SIZE for i in identifiers ] ) + 10
s = '<svg xmlns="http://www.w3.org/2000/svg" version="1.1" font-size="{2}px" font-family="Verdana" width="{0}" height="{1}">'
print( s.format( width, maxY + y_offset + FONT_SIZE, FONT_SIZE ), file = fout )
## draw SD-cloud:
# determine min and max ratio + SD:
print( svg_comment( 'SD CLOUD:' ), file = fout )
for i in range( n_conditions - 1 ):
y1_min, y1_max = min_max_ratioWithSD( [ ratios[ j ][ i ] for j in range( n_values ) ], [ SDs[ j ][ i ] for j in range( n_values ) ] )
y2_min, y2_max = min_max_ratioWithSD( [ ratios[ j ][ i + 1 ] for j in range( n_values ) ], [ SDs[ j ][ i + 1 ] for j in range( n_values ) ] )
s = '<path d="M{x1} {y1_min} L{x2} {y2_min} L{x2} {y2_max} L{x1} {y1_max} Z" fill="{fill}"/>'
d = { 'fill' : '#D3D3D3'}
d[ 'x1' ] = startingX + i*scalingX
d[ 'x2' ] = startingX+(i+1)*scalingX
d[ 'y1_min' ] = startingY + y1_min*scalingY
d[ 'y1_max' ] = startingY + y1_max*scalingY
d[ 'y2_min' ] = startingY + y2_min*scalingY
d[ 'y2_max' ] = startingY + y2_max*scalingY
print( s.format( **d ), file = fout )
## draw expression profile lines:
print( svg_comment( 'EXPRESSION PROFILE LINES:' ), file = fout )
for i in range( n_conditions - 1 ):
for j in range( n_values ):
d = {}
d[ 'x1' ] = startingX + i * scalingX
d[ 'x2' ] = startingX + ( i + 1 ) * scalingX
d[ 'y1' ] = startingY + ratios[ j ][ i ] * scalingY
d[ 'y2' ] = startingY + ratios[ j ][ i + 1 ] * scalingY
print( svg_line( x1 = d[ 'x1' ], y1 = d[ 'y1' ], x2 = d[ 'x2' ], y2 = d[ 'y2' ] ), file = fout )
## add legend:
print( svg_comment( 'LEGEND:' ), file = fout )
# first, collect all values to plot -> to allow removing overlapping identifiers:
legend = []
for i, identifier in enumerate( identifiers ):
_last_ratio = ratios[ i ][ -1 ]
_x = startingX + scalingX * ( n_conditions - 1 ) + 2
_y = startingY + _last_ratio * scalingY
legend.append( ( _y, _x, identifier ) )
legend.sort()
# get all y-differences:
y_differences = []
for i, ( y, x, identifier ) in enumerate( legend[ : -1 ] ):
y_differences.append( legend[ i + 1 ][ 0 ] - y )
# max font size for legend is the minimum y distance -> no overlap!
legend_maxFontSize = int( round( min( y_differences ) ) )
if legend_maxFontSize == 0:
legend_maxFontSize = 1
# plot legend
for y, x, identifier in legend:
print( '<text x="{0}" y="{1}" font-size="{3}px">{2}</text>'.format( x, y, identifier, legend_maxFontSize ), file = fout )
## plot axis:
print( svg_comment( 'AXES:' ), file = fout )
# y-axis:
print(svg_line( x1 = 50, y1 = startingY + MAX_V * scalingY, x2 = 50, y2 = maxY + y_offset), file = fout )
# y-axis - ticks:
y_ticks_per_unit = 2
for i in range( 1, MAX_V * y_ticks_per_unit + 1 ):
_ratio = float( i ) / y_ticks_per_unit
_y = startingY + _ratio * scalingY
print( svg_text( x = 0, y = _y + FONT_SIZE // 2, text = '+{0}'.format( _ratio ) ), file = fout )
print( svg_line( x1 = 40, y1 = _y, x2 = 60, y2 = _y ), file = fout )
for i in range( 1, abs( MIN_V ) * y_ticks_per_unit + 1 ):
_ratio = float( i ) / y_ticks_per_unit * -1
_y = startingY + _ratio * scalingY
print( svg_text( x = 0, y = _y + FONT_SIZE // 2, text = _ratio), file = fout )
print( svg_line( x1 = 40, y1 = _y, x2 = 60, y2 = _y), file = fout )
print( svg_text( x = 0, y = startingY + FONT_SIZE // 2, text = 0.0 ), file = fout )
print( svg_line( x1 = 40, y1 = startingY, x2 = 60, y2 = startingY ), file = fout )
# zero-line:
print( svg_line( x1 = 50, y1 = startingY, x2 = startingX + scalingX * ( n_conditions - 1 ), y2 = startingY ), file = fout )
# x-axis = conditions:
for i, condition in enumerate( conditions ):
_x = startingX + scalingX * i
print( svg_text( x= _x + 2, y = maxY + y_offset + FONT_SIZE, text = condition), file = fout )
s = '<line x1="{0}" y1="{1}" x2="{2}" y2="{3}" style="stroke-dasharray: 5, 5; stroke:#000000"/>'
print( s.format( _x, startingY + MAX_V * scalingY, _x, maxY + y_offset), file = fout )
print( '</svg>', file = fout )
self._print( '... community expression profile plots saved in "{0}"'.format( self[ 'Working directory' ] ), verbosity_level = 1 )
return
def do_it_all(
self,
working_directory = None,
distances = None,
linkages = None, function_2_generate_noise_injected_datasets = None,
min_cluster_size = 4, alphabet = None, force_plotting = False, min_cluster_freq_2_retain = 0.001,
pickle_filename = 'pyGCluster_resampled.pkl', cpus_2_use = None, iter_max = 250000,
iter_tol = 0.01 / 100000, iter_step = 5000, iter_top_P = 0.001, iter_window = 50000, iter_till_the_end = False,
top_X_clusters = None, threshold_4_the_lowest_max_freq = 0.01,
starting_min_overlap = 0.1, increasing_min_overlap = 0.05,
color_gradient = 'default', box_style = 'classic',
min_value_4_expression_map = None, max_value_4_expression_map = None, additional_labels = None
):
'''
Evokes all necessary functions which constitute the main functionality of pyGCluster.
This is AHC clustering with noise injection and a variety of DLCs,
in order to identify highly reproducible clusters,
followed by a meta-clustering of highly reproducible clusters into so-called 'communities'.
The functions that are called are:
- :py:func:`pyGCluster.Cluster.resample`
- :py:func:`pyGCluster.Cluster.build_nodemap`
- :py:func:`pyGCluster.Cluster.write_dot`
- :py:func:`pyGCluster.Cluster.draw_community_expression_maps`
- :py:func:`pyGCluster.Cluster.draw_expression_profiles`
For a complete list of possible
Distance matrix calculations
see: http://docs.scipy.org/doc/scipy/reference/spatial.distance.html
or Linkage methods
see: http://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
.. note ::
If memory is of concern (e.g. for a large dataset, > 5000 objects), cpus_2_use should be kept low.
:param distances: list of distance metrices, given as strings, e.g. [ 'correlation', 'euclidean' ]
:type distances: list
:param linkages: list of distance metrices, given as strings, e.g. [ 'average', 'complete', 'ward' ]
:type linkages: list
:param function_2_generate_noise_injected_datasets: function to generate noise-injected datasets. If None (default), Gaussian distributions are used.
:type function_2_generate_noise_injected_datasets: function
:param min_cluster_size: minimum size of a cluster, so that it is included in the assessment of cluster reproducibilities.
:type min_cluster_size: int
:param alphabet: alphabet used to convert decimal indices to characters to save memory. Defaults to string.printable, without ','.
:type alphabet: string
.. note ::
If alphabet contains ',', this character is removed from alphabet, because the indices comprising a cluster are saved comma-seperated.
:param force_plotting: the convergence plot is created after each iter_step iteration (otherwise only when convergence is detected).
:type force_plotting: boolean
:param min_cluster_freq_2_retain: ]0, 1[ minimum frequency of a cluster (only the maximum of the dlc-frequencies matters here) it has to exhibit to be stored in pyGCluster once all iterations are finished.
:type min_cluster_freq_2_retain: float
:param cpus_2_use: number of threads that are evoked in the re-sampling routine.
:type cpus_2_use: int
:param iter_max: maximum number of re-sampling iterations.
:type iter_max: int
Convergence determination:
:param iter_tol: ]0, 1e-3[ value for the threshold of the median of normalized slopes, in order to declare convergence.
:type iter_tol: float
:param iter_step: number of iterations each multiprocess performs and simultaneously the interval in which to check for convergence.
:type iter_step: int
:param iter_top_P: ]0, 1[ for the convergence estmation, the amount of most frequent clusters is examined. This is the threshold for the minimum frequency of a cluster to be included.
:type iter_top_P: float
:param iter_window: size of the sliding window in iterations. The median is obtained from normalized slopes inside this window - *should be a multiple of iter_step*
:type iter_window: int
:param iter_till_the_end: if set to True, the convergence determination is switched off; hence, re-sampling is performed until iter_max is reached.
:type iter_till_the_end: boolean
Output/Plotting:
:param pickle_filename: Filename of the output pickle object
:type pickle_filename: string
:param top_X_clusters: Plot of the top X clusters in the sorted list (by freq) of clusters having a maximum cluster frequency of at least threshold_4_the_lowest_max_freq (clusterfreq-plot is still sorted by size).
:type top_X_clusters: int
:param threshold_4_the_lowest_max_freq: ]0, 1[ Clusters must have a maximum frequency of at least threshold_4_the_lowest_max_freq to appear in the plot.
:type threshold_4_the_lowest_max_freq: float
:param min_value_4_expression_map: lower bound for color coding of values in the expression map. Remember that log2-values are expected, i.e. this value should be < 0!
:type min_value_4_expression_map: float
:param max_value_4_expression_map: upper bound for color coding of values in the expression map.
:type max_value_4_expression_map: float
:param color_gradient: name of the color gradient used for plotting the expression map. Currently supported are default, Daniel, barplot, 1337, BrBG, PiYG, PRGn, PuOr, RdBu, RdGy, RdYlBu, RdYlGn and Spectral
:type color_gradient: string
:param expression_map_filename: file name for expression map. .svg will be added if required.
:type expression_map_filename: string
:param legend_filename: file name for legend .svg will be added if required.
:type legend_filename: string
:param box_style: the way the relative standard deviation is visualized in the expression map. Currently supported are 'modern', 'fusion' or 'classic'.
:type box_style: string
:param starting_min_overlap: ]0, 1[ minimum required relative overlap between clusters so that they are assigned the same community. The relative overlap is defined as the size of the overlap between two clusters, divided by the size of the larger cluster.
:type starting_min_overlap: float
:param increasing_min_overlap: defines the increase of the required overlap between communities
:type increasing_min_overlap: float
:param additional_labels: dictionary, where additional labels can be defined which will be added in the expression map plots to the gene/protein names
:type additional_labels: dict
:rtype: None
For more information to each parameter, please refer to :py:func:`pyGCluster.Cluster.resample`,
and the subsequent functions:
:py:func:`pyGCluster.Cluster.build_nodemap`,
:py:func:`pyGCluster.Cluster.write_dot`,
:py:func:`pyGCluster.Cluster.draw_community_expression_maps`,
:py:func:`pyGCluster.Cluster.draw_expression_profiles`.
'''
if working_directory != None:
self[ 'Working directory' ] = working_directory
if distances is None:
distances = [ 'euclidean', 'correlation' ]
if linkages is None:
linkages = [ 'complete', 'average', 'weighted', 'centroid', 'median', 'ward' ]
if additional_labels != None:
self[ 'Additional Labels' ] = additional_labels
self._print( 'RESAMPLING ...', verbosity_level = 2 )
self.resample(
distances = distances,
linkages = linkages,
function_2_generate_noise_injected_datasets = function_2_generate_noise_injected_datasets,
alphabet = alphabet,
iter_max = iter_max,
iter_top_P = iter_top_P,
iter_step = iter_step,
iter_tol = iter_tol,
iter_window = iter_window,
min_cluster_size = min_cluster_size,
min_cluster_freq_2_retain = min_cluster_freq_2_retain,
pickle_filename = pickle_filename,
cpus_2_use = cpus_2_use,
iter_till_the_end = iter_till_the_end
)
self._print( 'Resampling done.', verbosity_level = 2 )
self._print( '\nplotting cluster frequencies, building node map, drawing expression maps ...', verbosity_level = 2 )
self.plot_clusterfreqs(
min_cluster_size = min_cluster_size,
top_X_clusters = top_X_clusters,
threshold_4_the_lowest_max_freq = threshold_4_the_lowest_max_freq,
)
self.build_nodemap(
min_cluster_size = min_cluster_size,
top_X_clusters = top_X_clusters,
threshold_4_the_lowest_max_freq = threshold_4_the_lowest_max_freq,
starting_min_overlap = starting_min_overlap,
increasing_min_overlap = increasing_min_overlap
)
dot_filename = 'nodemap_minSize{0}_top{1}_top{2:.0f}promille.dot'.format( min_cluster_size, top_X_clusters, threshold_4_the_lowest_max_freq * 1000 )
self.write_dot(
filename = dot_filename,
min_value_4_expression_map = min_value_4_expression_map,
max_value_4_expression_map = max_value_4_expression_map,
color_gradient = color_gradient,
box_style = box_style
)
self.draw_community_expression_maps(
min_value_4_expression_map = min_value_4_expression_map,
max_value_4_expression_map = max_value_4_expression_map,
color_gradient = color_gradient,
box_style = box_style
)
self.draw_expression_profiles(
min_value_4_expression_map = min_value_4_expression_map,
max_value_4_expression_map = max_value_4_expression_map
)
return
def info(self):
'''
Prints some information about the clustering via pyGCluster:
- number of genes/proteins clustered
- number of conditions defined
- number of distance-linkage combinations
- number of iterations performed
as well as some information about the communities, the legend for the shapes of nodes in the node map and the way the functions were called.
:rtype: none
'''
self._print( '[ INFO ] {0:*^100}'.format( ' info function START ' ), verbosity_level = 0 )
self._print('''
{0:>9} identifiers were used to cluster
{1:>9} conditions were defined
{2:>9} linkage - distance def combos were used
{3:>9} iterations were performed
'''.format(
len( self[ 'Identifiers' ] ),
len( self[ 'Conditions' ] ),
len( self[ 'Distance-linkage combinations' ] ),
self[ 'Iterations' ]
), verbosity_level = 0
)
self._print( 'Results are saved in the folder: "{0}"'.format( self[ 'Working directory' ] ), verbosity_level = 0 )
if 'Communities' in self.keys() and self[ 'Communities' ] != {}:
max_level = max( [ name[1] for name in self[ 'Communities' ] ] )
communities_top_cluster = self._get_levelX_clusters( level = max_level )
communities_minus_close2root = [ c for c in communities_top_cluster if len( c ) < self[ 'for IO skip clusters bigger than' ] ]
s = '{3} most_frequent_clusters were combined into {0} communities. {1} of those communities contain more than {2} objects \n(i.e. are "close to root" communities).'
n_communities = len( communities_top_cluster)
self._print( s.format( n_communities, n_communities - len( communities_minus_close2root ), self[ 'for IO skip clusters bigger than' ], len( self._get_levelX_clusters( level = 0 ) ) ), verbosity_level = 0 )
self._print( 'See below for the parameters that were used to form communities (function "build_nodemap").', verbosity_level = 0 )
else:
self._print( 'Communities were not yet formed.', verbosity_level = 0 )
if 'nodemap metric2shape' in self.keys():
self._print( 'The legend for the node shapes in the DOT file is:', verbosity_level = 0 )
for metric, shape in self[ 'nodemap metric2shape' ]:
self._print( ' - clusters that are found by distance metric(s): "{0}" are visualized as "{1}"'.format( metric, shape ), verbosity_level = 0 )
self._print( 'Values of the parameters of the functions that were already called:', verbosity_level = 0 )
for function_name in self[ 'Function parameters' ]:
self._print( '\t- function {0} was called with ...'.format( function_name ), verbosity_level = 0 )
for kw, value in sorted( self[ 'Function parameters' ][ function_name ].items() ):
self._print( '{0: >45} : {1}'.format( kw, value ), verbosity_level = 0 )
self._print( '[ INFO ] {0:*^100}'.format( ' info function END ' ), verbosity_level = 0 )
return
def save(self, filename = 'pyGCluster.pkl'):
'''
Saves the current pyGCluster.Cluster object in a Pickle object.
:param filename: may be either a simple file name ("example.pkl") or a complete path (e.g. "/home/user/Desktop/example.pkl"). In the former case, the pickle is stored in pyGCluster's working directory.
:type filename: string
:rtype: none
'''
tmp = {}
for key in self.keys():
tmp[ key ] = self[ key ]
if not os.path.split( filename )[ 0 ]:
with open( os.path.join( self[ 'Working directory' ], filename ), 'wb' ) as fout:
pickle.dump( tmp, fout )
self._print( 'pyGCluster pickled in: "{0}"'.format( os.path.join( self[ 'Working directory' ], filename ) ), verbosity_level = 1 )
else:
with open( filename, 'wb' ) as fout:
pickle.dump( tmp, fout )
self._print( 'pyGCluster pickled in: "{0}"'.format( filename ), verbosity_level = 1 )
return
def load(self, filename):
'''
Fills a pyGCluster.Cluster object with the session saved as "filename".
If "filename" is not a complete path, e.g. "example.pkl" (instead of "/home/user/Desktop/example.pkl"), the directory given by self[ 'Working directory' ] is used.
.. note ::
Loading of pyGCluster has to be performed as a 2-step-procedure:
>>> LoadedClustering = pyGCluster.Cluster()
>>> LoadedClustering.load( "/home/user/Desktop/example.pkl" )
:param filename: may be either a simple file name ("example.pkl") or a complete path (e.g. "/home/user/Desktop/example.pkl").
:type filename: string
:rtype: none
'''
_dir, _file = os.path.split( filename )
if _dir:
with open( filename, 'rb' ) as fin:
tmp = pickle.load( fin )
else:
with open( os.path.join( self[ 'Working directory' ], filename ), 'rb' ) as fin:
tmp = pickle.load( fin )
for key in tmp.keys():
self[ key ] = tmp[ key ]
self._print( 'pyGCluster loaded.', verbosity_level = 1 )
return
def median(self, _list):
'''
Returns the median from a list of numeric values.
:param _list:
:type _list: list
:rtype: int / float
'''
_list = sorted( _list )
length = len( _list )
value = None
if length % 2 == 0:
# even !
value = ( _list[ length // 2 ] + _list[ length // 2 - 1 ] ) / 2.0
else:
# odd !
value = _list[ length // 2 ]
return value
def _print(self, *args, **kwargs):
'''
Internal print function which implements the "verbosity_level" parameter.
:rtype: none
'''
if kwargs[ 'verbosity_level' ] <= self[ 'Verbosity level' ]:
del kwargs[ 'verbosity_level' ]
print( *args, **kwargs )
return
if __name__ == '__main__':
#invoke the freeze_support funtion for windows based systems
try:
sys.getwindowsversion()
multiprocessing.freeze_support()
except:
pass
x = Cluster()
exit()
| 57.751531
| 390
| 0.565604
|
43f2ddc8af53740ae6e8c8b291d62835ffaeb9ee
| 6,621
|
py
|
Python
|
app/admin/views.py
|
jabedparadox/flask-user-management
|
8da1deae90afac22f7176b3c894963a174833811
|
[
"MIT"
] | 1
|
2019-08-20T09:07:38.000Z
|
2019-08-20T09:07:38.000Z
|
app/admin/views.py
|
jabedparadox/flask-user-management
|
8da1deae90afac22f7176b3c894963a174833811
|
[
"MIT"
] | null | null | null |
app/admin/views.py
|
jabedparadox/flask-user-management
|
8da1deae90afac22f7176b3c894963a174833811
|
[
"MIT"
] | 2
|
2020-11-18T05:20:29.000Z
|
2021-09-25T16:51:31.000Z
|
from flask import (
Blueprint,
abort,
flash,
redirect,
render_template,
request,
url_for,
)
from flask_login import current_user, login_required
from flask_rq import get_queue
from app import db
from app.admin.forms import (
ChangeAccountInfoForm,
ChangeAccountTypeForm,
ChangeUserEmailForm,
InviteUserForm,
NewUserForm,
)
from app.decorators import admin_required
from app.email import send_email
from app.models import EditableHTML, Role, User
admin = Blueprint('admin', __name__)
@admin.route('/')
@login_required
@admin_required
def index():
"""Admin dashboard page."""
return render_template('admin/index.html')
@admin.route('/new-user', methods=['GET', 'POST'])
@login_required
@admin_required
def new_user():
"""Create a new user."""
form = NewUserForm()
if form.validate_on_submit():
user = User(
role=form.role.data,
first_name=form.first_name.data,
last_name=form.last_name.data,
email=form.email.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
flash('User {} successfully created'.format(user.full_name()),
'form-success')
return render_template('admin/new_user.html', form=form)
@admin.route('/invite-user', methods=['GET', 'POST'])
@login_required
@admin_required
def invite_user():
"""Invites a new user to create an account and set their own password."""
form = InviteUserForm()
if form.validate_on_submit():
user = User(
role=form.role.data,
first_name=form.first_name.data,
last_name=form.last_name.data,
email=form.email.data)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token()
invite_link = url_for(
'account.join_from_invite',
user_id=user.id,
token=token,
_external=True)
get_queue().enqueue(
send_email,
recipient=user.email,
subject='You Are Invited To Join',
template='account/email/invite',
user=user,
invite_link=invite_link,
)
flash('User {} successfully invited'.format(user.full_name()),
'form-success')
return render_template('admin/new_user.html', form=form)
@admin.route('/users')
@login_required
@admin_required
def registered_users():
"""View all registered users."""
users = User.query.all()
roles = Role.query.all()
return render_template(
'admin/registered_users.html', users=users, roles=roles)
@admin.route('/user/<int:user_id>')
@admin.route('/user/<int:user_id>/info')
@login_required
@admin_required
def user_info(user_id):
"""View a user's profile."""
user = User.query.filter_by(id=user_id).first()
if user is None:
abort(404)
return render_template('admin/manage_user.html', user=user)
@admin.route('/user/<int:user_id>/change-account-info', methods=['GET', 'POST'])
@login_required
@admin_required
def change_account_info(user_id):
"""Change a account information."""
user = User.query.filter_by(id=user_id).first()
form = ChangeAccountInfoForm()
if form.validate_on_submit():
user.first_name = form.first_name.data
user.last_name = form.last_name.data
db.session.add(user)
db.session.commit()
flash('Account information successfully updated to {}.'.format(
user.full_name(), user.role.name), 'form-success')
return render_template('admin/manage_user.html', user=user, form=form)
@admin.route('/user/<int:user_id>/change-email', methods=['GET', 'POST'])
@login_required
@admin_required
def change_user_email(user_id):
"""Change a user's email."""
user = User.query.filter_by(id=user_id).first()
if user is None:
abort(404)
form = ChangeUserEmailForm()
if form.validate_on_submit():
user.email = form.email.data
db.session.add(user)
db.session.commit()
flash('Email for user {} successfully changed to {}.'.format(
user.full_name(), user.email), 'form-success')
return render_template('admin/manage_user.html', user=user, form=form)
@admin.route(
'/user/<int:user_id>/change-account-type', methods=['GET', 'POST'])
@login_required
@admin_required
def change_account_type(user_id):
"""Change a user's account type."""
if current_user.id == user_id:
flash('You cannot change the type of your own account. Please ask '
'another administrator to do this.', 'error')
return redirect(url_for('admin.user_info', user_id=user_id))
user = User.query.get(user_id)
if user is None:
abort(404)
form = ChangeAccountTypeForm()
if form.validate_on_submit():
user.role = form.role.data
db.session.add(user)
db.session.commit()
flash('Role for user {} successfully changed to {}.'.format(
user.full_name(), user.role.name), 'form-success')
return render_template('admin/manage_user.html', user=user, form=form)
@admin.route('/user/<int:user_id>/delete')
@login_required
@admin_required
def delete_user_request(user_id):
"""Request deletion of a user's account."""
user = User.query.filter_by(id=user_id).first()
if user is None:
abort(404)
return render_template('admin/manage_user.html', user=user)
@admin.route('/user/<int:user_id>/_delete')
@login_required
@admin_required
def delete_user(user_id):
"""Delete a user's account."""
if current_user.id == user_id:
flash('You cannot delete your own account. Please ask another '
'administrator to do this.', 'error')
else:
user = User.query.filter_by(id=user_id).first()
db.session.delete(user)
db.session.commit()
flash('Successfully deleted user %s.' % user.full_name(), 'success')
return redirect(url_for('admin.registered_users'))
@admin.route('/_update_editor_contents', methods=['POST'])
@login_required
@admin_required
def update_editor_contents():
"""Update the contents of an editor."""
edit_data = request.form.get('edit_data')
editor_name = request.form.get('editor_name')
editor_contents = EditableHTML.query.filter_by(
editor_name=editor_name).first()
if editor_contents is None:
editor_contents = EditableHTML(editor_name=editor_name)
editor_contents.value = edit_data
db.session.add(editor_contents)
db.session.commit()
return 'OK', 200
| 30.232877
| 80
| 0.658964
|
52d39a33e0640f23a66416e8949791d15d6505af
| 494
|
py
|
Python
|
tests/test_pydocgen.py
|
LukasPolon/docgenerator
|
912e8af9a1567bdd2ff376cdd6d5d891c4b2d123
|
[
"MIT"
] | null | null | null |
tests/test_pydocgen.py
|
LukasPolon/docgenerator
|
912e8af9a1567bdd2ff376cdd6d5d891c4b2d123
|
[
"MIT"
] | 9
|
2018-10-08T21:04:16.000Z
|
2018-10-17T21:38:44.000Z
|
tests/test_pydocgen.py
|
LukasPolon/docgenerator
|
912e8af9a1567bdd2ff376cdd6d5d891c4b2d123
|
[
"MIT"
] | null | null | null |
import unittest
from click.testing import CliRunner
from src.cli.pydocgen import pydocgen
class TestPydocgen(unittest.TestCase):
def setUp(self):
""" SetUp method inherted from TestCase."""
self.clirunner = CliRunner()
def test_pydocgen(self):
""" Test pydocgen command."""
result = self.clirunner.invoke(pydocgen)
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output.strip(), 'PYDOCGEN command.')
| 26
| 69
| 0.657895
|
d55204cb80830aa781cd731ea0f472ae9795de67
| 2,607
|
py
|
Python
|
app/authentication.py
|
BartekSzpak/adversary
|
231caf58722a5641dd08afe354f2760e89699f3a
|
[
"Apache-2.0",
"CC0-1.0"
] | 22
|
2019-06-08T11:00:02.000Z
|
2021-09-10T10:22:20.000Z
|
app/authentication.py
|
BartekSzpak/adversary
|
231caf58722a5641dd08afe354f2760e89699f3a
|
[
"Apache-2.0",
"CC0-1.0"
] | 39
|
2019-04-28T13:28:58.000Z
|
2020-07-28T00:49:45.000Z
|
app/authentication.py
|
BartekSzpak/adversary
|
231caf58722a5641dd08afe354f2760e89699f3a
|
[
"Apache-2.0",
"CC0-1.0"
] | 11
|
2019-04-29T00:58:35.000Z
|
2021-06-28T02:18:48.000Z
|
import binascii
import os
from plugins.adversary.app.engine.database import subjectify
from plugins.adversary.app.engine.objects import SiteUser
from plugins.adversary.app.util import tz_utcnow
from cryptography.exceptions import InvalidKey
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from itsdangerous import URLSafeTimedSerializer, BadSignature, SignatureExpired
_backend = default_backend()
max_age = 60*60*24*1 # good for 1 day
class NotAuthorized(Exception):
pass
class Token(object):
def __init__(self, session_blob, auth_key):
self._blob = session_blob
self.auth_key = auth_key
if self._blob is None:
raise NotAuthorized
try:
s = URLSafeTimedSerializer(self.auth_key)
self.session_info = s.loads(self._blob, max_age=max_age)
except (BadSignature, SignatureExpired, UnicodeDecodeError, binascii.Error):
raise NotAuthorized
def require_group(self, g):
if g not in self.session_info['groups']:
raise NotAuthorized()
def in_group(self, g):
return g in self.session_info['groups']
def login_generic(auth_key, groups, attrs) -> str:
serializer = URLSafeTimedSerializer(auth_key)
temp = attrs.copy()
temp.update({'groups': groups})
return serializer.dumps(subjectify(temp))
def register_user(username, groups, email=None, password=None):
salt, key = _create_hash(password.encode())
return SiteUser(username=username, password=key, salt=salt, groups=groups, email=email).save()
def login_user(username, password) -> str:
try:
site_user = SiteUser.objects.get(username=username)
except SiteUser.DoesNotExist:
return False
if not _verify(password.encode(), site_user.password, site_user.salt):
return False
site_user.update(last_login=tz_utcnow())
return True
def username_exists(username: str):
try:
SiteUser.objects.get(username=username)
return True
except SiteUser.DoesNotExist:
return False
def _verify(glob, key, salt):
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=32, salt=salt, iterations=100000, backend=_backend)
try:
kdf.verify(glob, key)
return True
except InvalidKey:
return False
def _create_hash(glob):
salt = os.urandom(16)
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=32, salt=salt, iterations=100000, backend=_backend)
return salt, kdf.derive(glob)
| 29.965517
| 106
| 0.714614
|
0f76e0f22ce1e8daf012936ede74d108b118ec83
| 1,074
|
py
|
Python
|
ibsng/handler/user/change_credit_extended.py
|
ParspooyeshFanavar/pyibsng
|
d48bcf4f25e3f23461528bf0ff8870cc3d537444
|
[
"MIT"
] | 6
|
2018-03-06T10:16:36.000Z
|
2021-12-05T12:43:10.000Z
|
ibsng/handler/user/change_credit_extended.py
|
ParspooyeshFanavar/pyibsng
|
d48bcf4f25e3f23461528bf0ff8870cc3d537444
|
[
"MIT"
] | 3
|
2018-03-06T10:27:08.000Z
|
2022-01-02T15:21:27.000Z
|
ibsng/handler/user/change_credit_extended.py
|
ParspooyeshFanavar/pyibsng
|
d48bcf4f25e3f23461528bf0ff8870cc3d537444
|
[
"MIT"
] | 3
|
2018-01-06T16:28:31.000Z
|
2018-09-17T19:47:19.000Z
|
"""Change credit extended API method."""
from ibsng.handler.handler import Handler
class changeCreditExtended(Handler):
"""Change credit extended method class."""
def control(self):
"""Validate inputs after setup method.
:return: None
:rtype: None
"""
self.is_valid(self.user_id, int)
self.is_valid(self.credit, float)
self.is_valid(self.change_type, str)
self.is_valid_content(self.change_type, ("ADD|SET|MULTIPLY"))
self.is_valid(self.credit_comment, str, False)
def setup(self, user_id, credit, change_type, credit_comment=""):
"""Setup required parameters.
:param str user_id: ibsng user id
:param float credit: new credit value
:param str change_type: type of change (ADD, SET, MULTIPLY)
:param str credit_comment: comment for this credit
:return: None
:rtype: None
"""
self.user_id = user_id
self.credit = credit
self.change_type = change_type
self.credit_comment = credit_comment
| 30.685714
| 69
| 0.640596
|
baa210b1a254e4d0d12969361744ff89f700d2df
| 1,012
|
py
|
Python
|
kubernetes/test/test_v1_rolling_update_daemon_set.py
|
itholic/python
|
dffe577a062e17057270ae80fa677ffd83e9d183
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_rolling_update_daemon_set.py
|
itholic/python
|
dffe577a062e17057270ae80fa677ffd83e9d183
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_rolling_update_daemon_set.py
|
itholic/python
|
dffe577a062e17057270ae80fa677ffd83e9d183
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1_rolling_update_daemon_set import V1RollingUpdateDaemonSet # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1RollingUpdateDaemonSet(unittest.TestCase):
"""V1RollingUpdateDaemonSet unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1RollingUpdateDaemonSet(self):
"""Test V1RollingUpdateDaemonSet"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v1_rolling_update_daemon_set.V1RollingUpdateDaemonSet() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 25.3
| 124
| 0.738142
|
ef9a8a5355b97b051eb31cea48ba50771b7541e2
| 6,615
|
py
|
Python
|
rally_openstack/scenarios/octavia/pools.py
|
RSE-Cambridge/rally-openstack
|
32bbc091bbce1db625a2fc22da28b32718befa13
|
[
"Apache-2.0"
] | null | null | null |
rally_openstack/scenarios/octavia/pools.py
|
RSE-Cambridge/rally-openstack
|
32bbc091bbce1db625a2fc22da28b32718befa13
|
[
"Apache-2.0"
] | null | null | null |
rally_openstack/scenarios/octavia/pools.py
|
RSE-Cambridge/rally-openstack
|
32bbc091bbce1db625a2fc22da28b32718befa13
|
[
"Apache-2.0"
] | 1
|
2018-12-10T12:31:27.000Z
|
2018-12-10T12:31:27.000Z
|
# Copyright 2018: Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.task import validation
from rally_openstack import consts
from rally_openstack import scenario
from rally_openstack.scenarios.octavia import utils
"""Scenarios for Octavia Loadbalancer pools."""
@validation.add("required_services", services=[consts.Service.OCTAVIA])
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_contexts", contexts=["network"])
@scenario.configure(context={"cleanup@openstack": ["octavia"]},
name="Octavia.create_and_list_pools",
platform="openstack")
class CreateAndListPools(utils.OctaviaBase):
def run(self, protocol, lb_algorithm):
"""Create a loadbalancer pool per each subnet and then pools.
:param protocol: protocol for which the pool listens
:param lb_algorithm: loadbalancer algorithm
"""
subnets = []
loadbalancers = []
networks = self.context.get("tenant", {}).get("networks", [])
project_id = self.context["tenant"]["id"]
for network in networks:
subnets.extend(network.get("subnets", []))
for subnet_id in subnets:
lb = self.octavia.load_balancer_create(
project_id=project_id,
subnet_id=subnet_id)
loadbalancers.append(lb)
for loadbalancer in loadbalancers:
self.octavia.wait_for_loadbalancer_prov_status(loadbalancer)
self.octavia.pool_create(
lb_id=loadbalancer["id"],
protocol=protocol, lb_algorithm=lb_algorithm)
self.octavia.pool_list()
@validation.add("required_services", services=[consts.Service.OCTAVIA])
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_contexts", contexts=["network"])
@scenario.configure(context={"cleanup@openstack": ["octavia"]},
name="Octavia.create_and_delete_pools",
platform="openstack")
class CreateAndDeletePools(utils.OctaviaBase):
def run(self, protocol, lb_algorithm):
"""Create a pool per each subnet and then delete pool
:param protocol: protocol for which the pool listens
:param lb_algorithm: loadbalancer algorithm
"""
subnets = []
loadbalancers = []
networks = self.context.get("tenant", {}).get("networks", [])
project_id = self.context["tenant"]["id"]
for network in networks:
subnets.extend(network.get("subnets", []))
for subnet_id in subnets:
lb = self.octavia.load_balancer_create(
project_id=project_id,
subnet_id=subnet_id)
loadbalancers.append(lb)
for loadbalancer in loadbalancers:
self.octavia.wait_for_loadbalancer_prov_status(loadbalancer)
pools = self.octavia.pool_create(
lb_id=loadbalancer["id"],
protocol=protocol, lb_algorithm=lb_algorithm)
self.octavia.pool_delete(pools["id"])
@validation.add("required_services", services=[consts.Service.OCTAVIA])
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_contexts", contexts=["network"])
@scenario.configure(context={"cleanup@openstack": ["octavia"]},
name="Octavia.create_and_update_pools",
platform="openstack")
class CreateAndUpdatePools(utils.OctaviaBase):
def run(self, protocol, lb_algorithm):
"""Create a pool per each subnet and then update
:param protocol: protocol for which the pool listens
:param lb_algorithm: loadbalancer algorithm
"""
subnets = []
loadbalancers = []
networks = self.context.get("tenant", {}).get("networks", [])
project_id = self.context["tenant"]["id"]
for network in networks:
subnets.extend(network.get("subnets", []))
for subnet_id in subnets:
lb = self.octavia.load_balancer_create(
project_id=project_id,
subnet_id=subnet_id)
loadbalancers.append(lb)
update_pool = {
"name": self.generate_random_name()
}
for loadbalancer in loadbalancers:
self.octavia.wait_for_loadbalancer_prov_status(loadbalancer)
pools = self.octavia.pool_create(
lb_id=loadbalancer["id"],
protocol=protocol, lb_algorithm=lb_algorithm)
self.octavia.pool_set(
pool_id=pools["id"], pool_update_args=update_pool)
@validation.add("required_services", services=[consts.Service.OCTAVIA])
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_contexts", contexts=["network"])
@scenario.configure(context={"cleanup@openstack": ["octavia"]},
name="Octavia.create_and_show_pools",
platform="openstack")
class CreateAndShowPools(utils.OctaviaBase):
def run(self, protocol, lb_algorithm):
"""Create a pool per each subnet and show it
:param protocol: protocol for which the pool listens
:param lb_algorithm: loadbalancer algorithm
"""
subnets = []
loadbalancers = []
networks = self.context.get("tenant", {}).get("networks", [])
project_id = self.context["tenant"]["id"]
for network in networks:
subnets.extend(network.get("subnets", []))
for subnet_id in subnets:
lb = self.octavia.load_balancer_create(
project_id=project_id,
subnet_id=subnet_id)
loadbalancers.append(lb)
for loadbalancer in loadbalancers:
self.octavia.wait_for_loadbalancer_prov_status(loadbalancer)
pools = self.octavia.pool_create(
lb_id=loadbalancer["id"],
protocol=protocol, lb_algorithm=lb_algorithm)
self.octavia.pool_show(pools["id"])
| 40.335366
| 78
| 0.646863
|
5f3ef7695c40285ba8a33f1fef60af6543e5d5d1
| 2,601
|
py
|
Python
|
intro-mdp/mission01/netconf_clear_ips.py
|
cobeam/DevNetRepo
|
d824bb6ad7d21bcae03485b571e97fc2b6b61df9
|
[
"MIT"
] | 90
|
2018-04-07T00:39:23.000Z
|
2020-06-09T02:44:02.000Z
|
intro-mdp/mission01/netconf_clear_ips.py
|
cobeam/DevNetRepo
|
d824bb6ad7d21bcae03485b571e97fc2b6b61df9
|
[
"MIT"
] | 38
|
2018-04-06T18:11:36.000Z
|
2020-05-11T23:36:24.000Z
|
intro-mdp/mission01/netconf_clear_ips.py
|
ljm625/dnav3-code
|
833c2c05401fbe84529d51dc1eb597c10b6615c9
|
[
"MIT"
] | 143
|
2018-04-20T00:17:24.000Z
|
2020-06-12T15:07:42.000Z
|
#!/usr/bin/env python
"""This script clears the IP configuration on GigabitEthernet2 on 2 devices.
Copyright (c) 2018 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import sys
from netconf_functions import check_ip, clear_ip
# Get the absolute path for the directory where this file is located "here"
here = os.path.abspath(os.path.dirname(__file__))
# Get the absolute path for the project / repository root
project_root = os.path.abspath(os.path.join(here, "../.."))
# Extend the system path to include the project root and import the env files
sys.path.insert(0, project_root)
import env_lab # noqa
# Create a list of devices to query
devices = [
{"conn": env_lab.IOS_XE_1, "ip": "172.16.255.1", "prefix": "24"},
{"conn": env_lab.IOS_XE_2, "ip": "172.16.255.2", "prefix": "24"},
]
# Step 1: Query the devices for the current interface configuration.
print("Checking the current IP configuration on GigabitEthernet2 on devices")
# Query both devices for current interface configuration
for device in devices:
check_ip(device)
# Step 2: Configure the IP addresses for GigabitEthernet2 on the devices
# Create an XML configuration template for openconfig-interfaces
print("Attempting to clear GigabitEthernet2 IP addressing")
# Configure GigabitEthernet2 IP on each device
for device in devices:
clear_ip(device)
# Step 3: Print updated IP addresses on devices
print(
"Re-Checking the current IP configuration on GigabitEthernet2 on devices"
)
# Query both devices for current interface configuration
for device in devices:
check_ip(device)
| 36.633803
| 78
| 0.774702
|
a92e30a19ed3e26f2e50bc8c922e9d6b23b3f20c
| 1,889
|
py
|
Python
|
lingvo/model_imports.py
|
muntasir2000/lingvo
|
1555299b817288b5a6637ded416dbbdc9b00036d
|
[
"Apache-2.0"
] | null | null | null |
lingvo/model_imports.py
|
muntasir2000/lingvo
|
1555299b817288b5a6637ded416dbbdc9b00036d
|
[
"Apache-2.0"
] | null | null | null |
lingvo/model_imports.py
|
muntasir2000/lingvo
|
1555299b817288b5a6637ded416dbbdc9b00036d
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Global import for model hyper-parameters.
Using this module any ModelParams can be accessed via GetParams.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import re
from lingvo import model_registry
import six
import tensorflow as tf
_TASK_ROOT = 'lingvo.tasks'
# LINT.IfChange(task_dirs)
_TASK_DIRS = [
'asr',
'image',
'lm',
'mt',
'punctuator',
]
# LINT.ThenChange(tasks/BUILD:task_dirs)
# Import all ModelParams to ensure that they are added to the global registry.
for task_name in _TASK_DIRS:
name = '%s.%s.params' % (_TASK_ROOT, task_name)
tf.logging.info('Importing %s', name)
try:
importlib.import_module(name)
except ImportError as e:
errmsg = str(e)
if six.PY2:
match_str = 'No module named.*params'
else:
match_str = 'No module named.*%s' % _TASK_ROOT
if re.match(match_str, errmsg):
# Expected that some imports may be missing.
tf.logging.info('Expected error importing %s: %s', task_name, errmsg)
else:
tf.logging.info('Unexpected error importing %s: %s', task_name, errmsg)
raise
| 30.967213
| 80
| 0.694547
|
43310c0dcc626856359d7679b7173909ddd6bb68
| 13,209
|
py
|
Python
|
tests/test_configuration_set.py
|
pletessier/python-configuration
|
7ab87eaa244b704ee3c64e511fd4797ec888fe34
|
[
"MIT"
] | null | null | null |
tests/test_configuration_set.py
|
pletessier/python-configuration
|
7ab87eaa244b704ee3c64e511fd4797ec888fe34
|
[
"MIT"
] | null | null | null |
tests/test_configuration_set.py
|
pletessier/python-configuration
|
7ab87eaa244b704ee3c64e511fd4797ec888fe34
|
[
"MIT"
] | null | null | null |
from config import (
config_from_dict,
config_from_env,
config_from_python,
create_path_from_config,
ConfigurationSet,
config
)
from pytest import raises
import os
import json
try:
import yaml
except ImportError:
yaml = None # type: ignore
try:
import toml
except ImportError:
toml = None # type: ignore
DICT1 = {
"a1.B1.c1": 1,
"a1.b1.C2": 2,
"A1.b1.c3": 3,
"a1.b2.c1": "a",
"a1.b2.c2": True,
"a1.b2.c3": 1.1,
}
DICT2_1 = {
"a2.b1.c1": 'f',
"a2.b1.c2": False,
"a2.B1.c3": None,
}
DICT2_2 = {
"a2.b2.c1": 10,
"a2.b2.c2": 'YWJjZGVmZ2g=',
"a2.b2.C3": "abcdefgh",
}
DICT3_1 = {
"a2.b2.c1": 10,
"a2.b2.c2": 'YWJjZGVmZ2g=',
"a2.b2.C3": "abcdefgh",
"z1": 100,
}
DICT3_2 = {
"a2": 10,
"z1.w2": 123,
"z1.w3": "abc",
}
DICT3_3 = {
"a2.g2": 10,
"a2.w2": 123,
"a2.w3": "abc",
}
DICT3 = {
"a3.b1.c1": 'af',
"a3.b1.c2": True,
"a3.b1.c3": None,
"a3.b2.c1": 104,
"a3.b2.c2": 'YWJjZGVmZ2g=',
"a3.b2.c3": "asdfdsbcdefgh",
}
JSON = json.dumps(DICT3)
if yaml:
YAML = """
z1:
w1: 1
w2: null
w3: abc
z2:
w1: 1.1
w2:
- a
- b
- c
w3:
p1: 1
p2: 5.4
"""
DICT_YAML = {
"z1.w1": 1,
"z1.w2": None,
"z1.w3": "abc",
"z2.w1": 1.1,
"z2.w2": ["a", "b", "c"],
"z2.w3": {"p1": 1, "p2": 5.4},
}
if toml:
TOML = """
[owner]
name = "ABC"
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002,]
connection_max = 5000
enabled = true
[clients]
data = [ [ "gamma", "delta",], [ 1, 2,],]
hosts = [ "alpha", "omega",]
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
"""
DICT_TOML = {'owner': {'name': 'ABC'},
'database': {'server': '192.168.1.1',
'ports': [8001, 8001, 8002],
'connection_max': 5000,
'enabled': True},
'clients': {'data': [['gamma', 'delta'], [1, 2]],
'hosts': ['alpha', 'omega']},
'servers': {'alpha': {'ip': '10.0.0.1', 'dc': 'eqdc10'},
'beta': {'ip': '10.0.0.2', 'dc': 'eqdc10'}}}
INI = """
[section1]
key1 = True
[section2]
key1 = abc
key2 = def
key3 = 1.1
[section3]
key1 = 1
key2 = 0
"""
DICT_INI = {
"section1.key1": 'True',
"section2.key1": "abc",
"section2.key2": "def",
"section2.key3": '1.1',
"section3.key1": '1',
"section3.key2": '0',
}
PATH_DICT = {
"sdf.dsfsfd": 1,
"sdjf.wquwe": "sdfsd",
"sdjf.wquwe43": None,
"sdjf.wquwse43": True
}
PREFIX = 'CONFIG'
os.environ.update(
(PREFIX + '__' + k.replace('.', '__').upper(), str(v))
for k, v in DICT1.items()
)
def test_load_env(): # type: ignore
cfg = ConfigurationSet(
config_from_dict(DICT2_1),
config_from_dict(DICT2_2),
config_from_env(prefix=PREFIX)
)
# from env
assert cfg["a1.b1.c1"] == '1'
assert cfg["a1.b1"].get_int('c1') == 1
assert cfg["a1.b1"].as_dict() == {"c1": '1', "c2": '2', "c3": '3'}
assert cfg["a1.b2"].as_dict() == {"c1": "a", "c2": 'True', "c3": '1.1'}
# from dict
assert cfg["a2.b1.c1"] == 'f'
assert cfg["a2.b2"].as_dict() == {'c1': 10, 'c2': 'YWJjZGVmZ2g=', 'c3': 'abcdefgh'}
def test_fails(): # type: ignore
cfg = ConfigurationSet(
config_from_dict(DICT2_1),
config_from_dict(DICT2_2),
config_from_env(prefix=PREFIX)
)
with raises(KeyError, message="a1.b2.c3.d4"):
assert cfg["a1.b2.c3.d4"] is Exception
with raises(KeyError, message="'c4'"):
assert cfg.a1.b2.c4 is Exception
with raises(ValueError, message="Expected a valid True or False expression."):
assert cfg["a1.b2"].get_bool("c3") is Exception
def test_get(): # type: ignore
cfg = ConfigurationSet(
config_from_dict(DICT2_1),
config_from_dict(DICT2_2),
config_from_env(prefix=PREFIX)
)
assert cfg.get("a2.b2") == config_from_dict({'c1': 10, 'c2': 'YWJjZGVmZ2g=', 'c3': 'abcdefgh'})
assert cfg.get("a2.b5", "1") == "1"
def test_get_dict(): # type: ignore
cfg = ConfigurationSet(
config_from_dict(DICT2_1),
config_from_dict(DICT2_2),
config_from_env(prefix=PREFIX)
)
assert cfg.get_dict("a2") == {'b1.c1': 'f', 'b1.c2': False, 'b1.c3': None, 'b2.c1': 10,
'b2.c2': 'YWJjZGVmZ2g=', 'b2.c3': 'abcdefgh'}
assert cfg.a2.as_dict() == {'b1.c1': 'f', 'b1.c2': False, 'b1.c3': None, 'b2.c1': 10,
'b2.c2': 'YWJjZGVmZ2g=', 'b2.c3': 'abcdefgh'}
assert dict(cfg.a2) == {'b1.c1': 'f', 'b1.c2': False, 'b1.c3': None, 'b2.c1': 10,
'b2.c2': 'YWJjZGVmZ2g=', 'b2.c3': 'abcdefgh'}
with raises(KeyError):
assert cfg.get_dict("a3") is Exception
assert set(cfg.a2.values()) == {'f', False, None, 10, 'YWJjZGVmZ2g=', 'abcdefgh'}
assert dict(cfg.a2) == dict(cfg.a2.items())
def test_get_dict_different_types(): # type: ignore
cfg = ConfigurationSet(
config_from_dict(DICT3_1),
config_from_dict(DICT3_2), # a2 is ignored here
config_from_dict(DICT3_3),
)
assert cfg.get_dict("a2") == {'b2.c1': 10, 'b2.c2': 'YWJjZGVmZ2g=', 'b2.c3': 'abcdefgh',
'g2': 10, 'w2': 123, 'w3': 'abc'}
assert cfg.a2.as_dict() == {'b2.c1': 10, 'b2.c2': 'YWJjZGVmZ2g=', 'b2.c3': 'abcdefgh',
'g2': 10, 'w2': 123, 'w3': 'abc'}
assert dict(cfg.a2) == {'b2.c1': 10, 'b2.c2': 'YWJjZGVmZ2g=', 'b2.c3': 'abcdefgh',
'g2': 10, 'w2': 123, 'w3': 'abc'}
with raises(TypeError): # the first configuration overrides the type
assert cfg.get_dict("z1") is Exception
assert cfg.z1 == 100
def test_repr(): # type: ignore
import sys
path = os.path.join(os.path.dirname(__file__), 'python_config.py')
cfg = ConfigurationSet(
config_from_dict(DICT2_1),
config_from_dict(DICT2_2),
config_from_env(prefix=PREFIX),
config_from_python(path, prefix='CONFIG')
)
joined_dicts = dict((k, str(v)) for k, v in DICT1.items())
joined_dicts.update(DICT2_1)
joined_dicts.update(DICT2_2)
joined_dicts['sys.version'] = sys.hexversion
assert str(dict((k.lower(), v) for k, v in joined_dicts.items())) in repr(cfg)
def test_alternate_set_loader(): # type: ignore
import sys
path = os.path.join(os.path.dirname(__file__), 'python_config.py')
import tempfile
with tempfile.TemporaryDirectory() as folder:
create_path_from_config(folder, config_from_dict(PATH_DICT), remove_level=0)
entries = [
DICT2_1, # assumes dict
('dict', DICT2_2),
('env', PREFIX),
('python', path, 'CONFIG'),
('json', JSON),
('ini', INI),
('path', folder, 0),
]
if yaml:
entries.append(('yaml', YAML))
if toml:
entries.append(('toml', TOML))
cfg = config(*entries)
joined_dicts = dict((k, str(v)) for k, v in DICT1.items())
joined_dicts.update(DICT2_1)
joined_dicts.update(DICT2_2)
joined_dicts.update(DICT3)
joined_dicts.update(DICT_INI)
if yaml:
joined_dicts.update(DICT_YAML)
if toml:
joined_dicts.update(DICT_TOML)
joined_dicts.update((k, str(v)) for k, v in PATH_DICT.items())
joined_dicts['sys.version'] = sys.hexversion
assert config_from_dict(joined_dicts).as_dict() == cfg.as_dict()
assert config_from_dict(joined_dicts) == cfg
def test_alternate_set_loader_prefix(): # type: ignore
import sys
path = os.path.join(os.path.dirname(__file__), 'python_config.py')
import tempfile
with tempfile.TemporaryDirectory() as folder:
create_path_from_config(folder, config_from_dict(PATH_DICT), remove_level=0)
cfg = config(
DICT2_1, # assumes dict
('dict', DICT2_2),
('env', ),
('python', path),
('json', JSON),
('ini', INI),
('path', folder, 0),
prefix='CONFIG'
)
joined_dicts = dict((k, str(v)) for k, v in DICT1.items())
joined_dicts.update(DICT2_1)
joined_dicts.update(DICT2_2)
joined_dicts.update(DICT3)
joined_dicts.update(DICT_INI)
joined_dicts.update((k, str(v)) for k, v in PATH_DICT.items())
joined_dicts['sys.version'] = sys.hexversion
assert config_from_dict(joined_dicts).as_dict() == cfg.as_dict()
assert config_from_dict(joined_dicts) == cfg
def test_alternate_set_loader_strings(): # type: ignore
import sys
path = str(os.path.join(os.path.dirname(__file__), 'python_config.py'))
import tempfile
with tempfile.TemporaryDirectory() as folder, \
tempfile.NamedTemporaryFile(suffix='.json') as f1, \
tempfile.NamedTemporaryFile(suffix='.ini') as f2, \
tempfile.NamedTemporaryFile(suffix='.yaml') as f3, \
tempfile.NamedTemporaryFile(suffix='.toml') as f4:
# path
subfolder = folder + '/sub'
os.makedirs(subfolder)
create_path_from_config(subfolder, config_from_dict(PATH_DICT), remove_level=1)
# json
f1.file.write(JSON.encode())
f1.file.flush()
# ini
f2.file.write(INI.encode())
f2.file.flush()
entries = [
DICT2_1, # dict
DICT2_2,
'env',
path, # python
f1.name, # json
f2.name, # ini
folder, # path
]
if yaml:
f3.file.write(YAML.encode())
f3.file.flush()
entries.append(
f3.name # yaml
)
if toml:
f4.file.write(TOML.encode())
f4.file.flush()
entries.append(
f4.name # toml
)
cfg = config(
*entries,
prefix='CONFIG'
)
joined_dicts = dict((k, str(v)) for k, v in DICT1.items())
joined_dicts.update(DICT2_1)
joined_dicts.update(DICT2_2)
joined_dicts.update(DICT3)
joined_dicts.update(DICT_INI)
if yaml:
joined_dicts.update(DICT_YAML)
if toml:
joined_dicts.update(DICT_TOML)
joined_dicts.update((k, str(v)) for k, v in PATH_DICT.items())
joined_dicts['sys.version'] = sys.hexversion
assert config_from_dict(joined_dicts).as_dict() == cfg.as_dict()
assert config_from_dict(joined_dicts) == cfg
def test_alternate_set_loader_strings_python_module(): # type: ignore
import sys
module = 'tests.python_config'
import tempfile
with tempfile.TemporaryDirectory() as folder, \
tempfile.NamedTemporaryFile(suffix='.json') as f1, \
tempfile.NamedTemporaryFile(suffix='.ini') as f2, \
tempfile.NamedTemporaryFile(suffix='.yaml') as f3, \
tempfile.NamedTemporaryFile(suffix='.toml') as f4:
# path
subfolder = folder + '/sub'
os.makedirs(subfolder)
create_path_from_config(subfolder, config_from_dict(PATH_DICT), remove_level=1)
# json
f1.file.write(JSON.encode())
f1.file.flush()
# ini
f2.file.write(INI.encode())
f2.file.flush()
entries = [
DICT2_1, # dict
DICT2_2,
'env',
module, # python
f1.name, # json
f2.name, # ini
folder, # path
]
if yaml:
f3.file.write(YAML.encode())
f3.file.flush()
entries.append(f3.name)
if toml:
f4.file.write(TOML.encode())
f4.file.flush()
entries.append(
f4.name # toml
)
cfg = config(
*entries,
prefix='CONFIG'
)
joined_dicts = dict((k, str(v)) for k, v in DICT1.items())
joined_dicts.update(DICT2_1)
joined_dicts.update(DICT2_2)
joined_dicts.update(DICT3)
joined_dicts.update(DICT_INI)
if yaml:
joined_dicts.update(DICT_YAML)
if toml:
joined_dicts.update(DICT_TOML)
joined_dicts.update((k, str(v)) for k, v in PATH_DICT.items())
joined_dicts['sys.version'] = sys.hexversion
assert config_from_dict(joined_dicts).as_dict() == cfg.as_dict()
assert config_from_dict(joined_dicts) == cfg
def test_alternate_set_loader_fails(): # type: ignore
with raises(ValueError, message="configs should be a non-empty iterable of Configuration objects"):
assert config() is Exception
with raises(ValueError):
assert config(('no type', '')) is Exception
with raises(ValueError):
assert config('no type') is Exception
with raises(ValueError):
assert config([]) is Exception
with raises(ValueError):
assert config(('python', )) is Exception
| 28.345494
| 103
| 0.550534
|
0350a0a8e11ef6ee0e3e5fe9e75486eb9553a15d
| 8,590
|
py
|
Python
|
pac.py
|
MajoDiaz/Pacman
|
19200eaaf6d0499d7ff6c3104ea32a533ea8f165
|
[
"MIT"
] | null | null | null |
pac.py
|
MajoDiaz/Pacman
|
19200eaaf6d0499d7ff6c3104ea32a533ea8f165
|
[
"MIT"
] | null | null | null |
pac.py
|
MajoDiaz/Pacman
|
19200eaaf6d0499d7ff6c3104ea32a533ea8f165
|
[
"MIT"
] | null | null | null |
"""Pacman, classic arcade game.
Exercises
1. Change the board.
2. Change the number of ghosts.
3. Change where pacman starts.
4. Make the ghosts faster/slower.
5. Make the ghosts smarter.
"""
#A01701879 María José Díaz Sánchez
#A00829556 Santiago Gonzalez Irigoyen
#Este código es un juego de pacman
from random import choice, randint
from turtle import *
from freegames import floor, vector
state = {'score': 0}#esto marca cuantos puntos se comen
path = Turtle(visible=False)
writer = Turtle(visible=False)
aim = vector(5, 0)
pacman = vector(-40, -80)
#con la variable ghost se generan los vectores para los fantasmas
#se cambio también el color de cada fantasma para distingurilos
ghosts = [
[vector(-180, 160), vector(5, 0), 'right', 'red'],
[vector(-180, -160), vector(0, 5), 'up', 'orange'],
[vector(100, 160), vector(0, -5), 'down', 'cyan'],
[vector(100, -160), vector(-5, 0), 'left', 'pink']
]
'''En tiles es donde se constuyre básicamente el laberinto,
los 0 son el espacio en negro y los 1 son el camino diseñado.
Si se modifican los 1 y 0 se crea un nuevo tablero'''
'''Se modificaron los valores de tiles para crear un tabero modificado
esto se hizo cambiando algunos valores por 0 o por 1 según convenga para
el diseño del nuevo tablero'''
tiles = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0,
0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0,
0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0,
0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0,
0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0,
0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0,
0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
]
def square(x, y):
"Draw square using path at (x, y)."
'''Esta función usa las coordenadas creadas en la función
world, para crear el cuadrado de los caminos'''
path.up()
path.goto(x, y)
path.down()
path.begin_fill()
for count in range(4):
path.forward(20)
path.left(90)
path.end_fill()
def offset(point):
"Return offset of point in tiles."
'''Esta función tambien ayuda a la funcion move
creando un idex que es usado en los if de esa función'''
x = (floor(point.x, 20) + 200) / 20
y = (180 - floor(point.y, 20)) / 20
index = int(x + y * 20)
return index
def valid(point):
"Return True if point is valid in tiles."
'''Esta función ayuda a ver si el punto es valido
para el pacman, regresa su valor a la función move'''
index = offset(point)
if tiles[index] == 0:
return False
index = offset(point + 19)
if tiles[index] == 0:
return False
return point.x % 20 == 0 or point.y % 20 == 0
def world():
"Draw world using path."
#Con esta función se crea el tablero
#en este caso el fondo es negro
#y el camino es azul
bgcolor('black')
path.color('white')
#colors define los colores diferentes para los puntos
colors = ['cyan', 'red', 'light green', 'orange']
for index in range(len(tiles)):
'''Esta funcion checa cuales de la lista son 1 y cuales
0 para construir el laberinto. Se verifica que sean mayores
a 0, para despues mandar ese vector [x,y] a la función
square, la cual va a ir creando los cuadrado de cada posición.
Si el valor de ese index del tile es 0, este se quedara en negro'''
tile = tiles[index]
if tile > 0:
x = (index % 20) * 20 - 200
y = 180 - (index // 20) * 20
square(x, y)
if tile == 1:
path.up()
path.goto(x + 10, y + 10)
#crea los distintos puntos con colores al azar
path.dot(2, colors[randint(0,3)])
def move():
"Move pacman and all ghosts."
'''La función moce es la que ayuda a mover a todos
los personajes, checa si topan con un recuadro negro,
si hay puntos en donde estan caminando y se apoya de otras
funciones como valid y offset'''
writer.undo()
writer.write(state['score'])
clear()
if valid(pacman + aim):
pacman.move(aim)
index = offset(pacman)
if tiles[index] == 1:
tiles[index] = 2
state['score'] += 1
x = (index % 20) * 20 - 200
y = 180 - (index // 20) * 20
square(x, y)
up()
goto(pacman.x + 10, pacman.y + 10)
dot(20, 'yellow2')
for point, course, history, id in ghosts:
if valid(point + course):
point.move(course)
#Si se encuentra con una pared el fantasma, busca si está
#a la izquierda, abajo, a la derecha o arriba de pacman
#y entonces se mueve en dirección suya.
#Se intercalaron los pares de moviminetos por eje para prevenir
#que los fantasmas se atoren llendo de lado a lado. Si no se puede
#ir en estas direcciones 'inteligentes', los fantasmas deciden al
#azar de las direcciones posibles. De esta forma manejamos
#posibles errores de lógica. History recuerda cual fue la última
#dirección del fantasma y trata de evitar repeticiones.
elif pacman.x > point.x and valid(point + vector(5,0)) and history != 'right':
course.x = 5
course.y = 0
history = 'right'
#con este print se mandan los cambios de dirección a la terminal
#print(str(id), 'change')
elif pacman.y > point.y and valid(point + vector(0,5)) and history != 'up':
course.x = 0
course.y = 5
history = 'up'
#print(str(id), 'change')
elif pacman.x < point.x and valid(point + vector(-5,0)) and history != 'left':
course.x = -5
course.y = 0
history = 'left'
#print(str(id), 'change')
elif pacman.y < point.y and valid(point + vector(0,-5)) and history != 'down':
course.x = 0
course.y = -5
history = 'down'
#print(str(id), 'change')
else:
options = [
vector(5, 0),
vector(-5, 0),
vector(0, 5),
vector(0, -5),
]
'''aqui están las distintas formas
en las que se mueven los fantasmas
si no ven a pacman'''
plan = choice(options)
if plan == vector(5,0):
history = 'right'
if plan == vector(0,5):
history = 'up'
if plan == vector(-5,0):
history = 'left'
if plan == vector(0,-5):
history = 'down'
course.x = plan.x
course.y = plan.y
up()
goto(point.x + 10, point.y + 10)
dot(20, id)
update()
for point, course, history, id in ghosts:
if abs(pacman - point) < 20:
return
'''Ontimer pone la velocidad del juego
entre mayor sea el valor que se le meta
mas tiempor se tomarán en moverse, entre
menor sea el valor más rápido se mueven'''
'''En este caso se cambio el valor de 100
a 35, para que se movieran más rápido'''
ontimer(move, 35)
def change(x, y):
"Change pacman aim if valid."
if valid(pacman + vector(x, y)):
aim.x = x
aim.y = y
setup(420, 420, 370, 0)#define el espacio del juego
hideturtle()
tracer(False)
writer.goto(160, 160)
writer.color('white')
writer.write(state['score'])#escribe el puntaje en el tablero
listen()
#las siguientes funciones son para mover al pacman
onkey(lambda: change(5, 0), 'Right')
onkey(lambda: change(-5, 0), 'Left')
onkey(lambda: change(0, 5), 'Up')
onkey(lambda: change(0, -5), 'Down')
world()
move()
done()
| 33.554688
| 86
| 0.547497
|
d063ba09c2eb0b5096d9f3fd5b798cae560b7608
| 1,936
|
py
|
Python
|
restorm/managers.py
|
gt3389b/restorm
|
7901ffb8d05589508604e84352257486f350ac79
|
[
"MIT"
] | 3
|
2017-01-03T16:41:35.000Z
|
2020-09-17T09:39:02.000Z
|
restorm/managers.py
|
gt3389b/restorm
|
7901ffb8d05589508604e84352257486f350ac79
|
[
"MIT"
] | null | null | null |
restorm/managers.py
|
gt3389b/restorm
|
7901ffb8d05589508604e84352257486f350ac79
|
[
"MIT"
] | 3
|
2017-01-02T18:59:02.000Z
|
2020-09-17T09:53:31.000Z
|
# -*- coding: utf-8 -*-
from restorm.exceptions import RestServerException, RestValidationException
from restorm.patterns import ResourcePattern
from restorm.query import RestQuerySet
class ResourceManagerDescriptor(object):
"""
This class ensures managers aren't accessible via model instances. For
example, Book.objects works, but book_obj.objects raises AttributeError.
"""
def __init__(self, manager):
self.manager = manager
def __get__(self, instance, type=None):
if instance is not None:
raise AttributeError(
'Manager is not accessible via %s instances' % type.__name__)
return self.manager
class ResourceManager(object):
def __init__(self, queryset_class=None):
if queryset_class is None:
queryset_class = RestQuerySet
self.queryset_class = queryset_class
self.object_class = None
@property
def options(self):
try:
return getattr(self, '_options')
except AttributeError:
self._options = self.object_class._meta
return self._options
def get_queryset(self):
queryset = self.queryset_class(
model=self.object_class, client=self.options.client)
return queryset
def filter(self, **kwargs):
queryset = self.get_queryset().filter(**kwargs)
return queryset
def all(self):
queryset = self.get_queryset()
return queryset
def get(self, **kwargs):
obj = self.get_queryset().get(**kwargs)
return obj
def using(self, client):
return self.get_queryset().using(client)
def order_by(self, *args):
return self.get_queryset().order_by(*args)
def create(self, **kwargs):
"""Send POST request to resource and return Resource instance."""
instance = self.object_class(kwargs)
instance.save()
return instance
| 29.333333
| 77
| 0.652376
|
0725a7d94dc62a6935f2976549b4fff79860e1fd
| 169
|
py
|
Python
|
teste.py
|
dom8891/Projeto_LMS_DevOps
|
d7a881811c76bbac197ecca5a6da17f041c12646
|
[
"Apache-2.0"
] | null | null | null |
teste.py
|
dom8891/Projeto_LMS_DevOps
|
d7a881811c76bbac197ecca5a6da17f041c12646
|
[
"Apache-2.0"
] | null | null | null |
teste.py
|
dom8891/Projeto_LMS_DevOps
|
d7a881811c76bbac197ecca5a6da17f041c12646
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from principal import somar
from principal import subtrair
def teste_somar():
assert somar(2,4)==6
def teste_subtrair():
assert subtrair(9,5)==4
| 18.777778
| 30
| 0.739645
|
70acff208b579e920f945d980b63178c0bc1f4f7
| 451
|
py
|
Python
|
setup.py
|
andreyz/mapzen-gtfs
|
d445f1588ed10713eea9a1ca2878eef792121eca
|
[
"MIT"
] | 29
|
2015-06-08T00:49:52.000Z
|
2021-09-25T21:46:53.000Z
|
setup.py
|
andreyz/mapzen-gtfs
|
d445f1588ed10713eea9a1ca2878eef792121eca
|
[
"MIT"
] | 12
|
2015-07-28T07:12:55.000Z
|
2017-05-11T14:24:12.000Z
|
setup.py
|
andreyz/mapzen-gtfs
|
d445f1588ed10713eea9a1ca2878eef792121eca
|
[
"MIT"
] | 10
|
2015-07-28T06:57:51.000Z
|
2021-01-05T05:56:27.000Z
|
from setuptools import setup
import mzgtfs
setup(
name='mzgtfs',
version=mzgtfs.__version__,
description='Mapzen GTFS',
author='Ian Rees',
author_email='ian@mapzen.com',
url='https://github.com/transitland/mapzen-gtfs',
license='License :: OSI Approved :: MIT License',
packages=['mzgtfs'],
install_requires=['unicodecsv', 'pytz'],
zip_safe=False,
# Include examples.
package_data = {
'': ['*.txt', '*.md', '*.zip']
}
)
| 22.55
| 51
| 0.662971
|
f795caa633969d307251a97fef43a29b2ec0e66c
| 2,291
|
py
|
Python
|
python_utilities/scripting.py
|
sdaxen/python_utilities
|
7b9d6cc21bfc31be83629d2ac02b27e886ebc2bb
|
[
"MIT"
] | 2
|
2020-04-13T20:17:36.000Z
|
2020-05-12T01:13:12.000Z
|
python_utilities/scripting.py
|
sethaxen/python_utilities
|
7b9d6cc21bfc31be83629d2ac02b27e886ebc2bb
|
[
"MIT"
] | 5
|
2015-10-20T22:57:51.000Z
|
2017-09-07T01:10:23.000Z
|
python_utilities/scripting.py
|
sethaxen/python_utilities
|
7b9d6cc21bfc31be83629d2ac02b27e886ebc2bb
|
[
"MIT"
] | 3
|
2015-08-17T17:55:41.000Z
|
2018-09-19T13:56:42.000Z
|
"""Basic utilities for common scripting tasks.
Author: Seth Axen
E-mail: seth.axen@gmail.com
"""
import logging
import time
LOG_LEVELS = (logging.NOTSET, logging.INFO, logging.DEBUG, logging.WARNING,
logging.ERROR, logging.CRITICAL)
def setup_logging(filename=None, verbose=False, level=-1,
timezone=time.localtime, with_level=True, with_time=True,
reset=True, writemode="a"):
"""Setup format string, file, and verbosity for logging.
Parameters
----------
filename : str, optional (default None)
Log file. If None, logs are written to stdout.
verbose : bool, optional (default False)
Write debugging log messages in addition to info.
level : int, optional (default -1)
Force specific level for logging. If provided, ignore `verbose`.
timezone : function, optional (default time.localtime)
Function that returns a ``time.struct_time`` object. Recommended
options are time.localtime for local machine time or time.gmtime for
GMT.
with_level : bool, optional (default False)
Include log level in log messages.
with_time : bool, optional (default True)
Include system time in log messages.
reset : bool, optional (default True)
Description
writemode : str, optional (default "a")
Mode for writing to log file if `filename` is specified.
"""
if reset:
root = logging.getLogger()
if root.handlers:
for handler in root.handlers:
root.removeHandler(handler)
if level != -1 and level in LOG_LEVELS:
log_level = level
elif verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
msg_format = "%(message)s"
if with_level:
msg_format = "%(levelname)s|" + msg_format
if with_time:
logging.Formatter.converter = timezone
msg_format = "%(asctime)s|" + msg_format
if filename is not None:
logging.basicConfig(filename=filename, filemode=writemode,
level=log_level, format=msg_format)
logging.debug("Logging to %s" % filename)
else:
logging.basicConfig(level=log_level, format=msg_format)
logging.debug("Logging to stdout")
| 34.712121
| 76
| 0.64557
|
a52c9cafd4e97a2a095dcdbd585659044c6ccff9
| 1,323
|
py
|
Python
|
pajbot/managers/kvi.py
|
sadlyfell/bullbot
|
b6ef96f61678fab4a245d8ccddf9d1ae7aae9fee
|
[
"MIT"
] | null | null | null |
pajbot/managers/kvi.py
|
sadlyfell/bullbot
|
b6ef96f61678fab4a245d8ccddf9d1ae7aae9fee
|
[
"MIT"
] | null | null | null |
pajbot/managers/kvi.py
|
sadlyfell/bullbot
|
b6ef96f61678fab4a245d8ccddf9d1ae7aae9fee
|
[
"MIT"
] | 1
|
2020-03-11T19:37:10.000Z
|
2020-03-11T19:37:10.000Z
|
import logging
from collections import UserDict
from pajbot.managers.redis import RedisManager
from pajbot.streamhelper import StreamHelper
log = logging.getLogger(__name__)
class KVIData:
def __init__(self, streamer, kvi_id):
self.key = "{streamer}:kvi".format(streamer=streamer)
self.id = kvi_id
def set(self, new_value, redis=None):
if redis is None:
redis = RedisManager.get()
redis.hset(self.key, self.id, new_value)
def get(self, redis=None):
if redis is None:
redis = RedisManager.get()
try:
raw_value = redis.hget(self.key, self.id)
value = int(raw_value)
except (TypeError, ValueError):
value = 0
return value
def inc(self):
redis = RedisManager.get()
old_value = self.get(redis=redis)
self.set(old_value + 1, redis=redis)
def dec(self):
redis = RedisManager.get()
old_value = self.get(redis=redis)
self.set(old_value - 1, redis=redis)
def __str__(self):
return str(self.get())
class KVIManager(UserDict):
def __init__(self):
self.streamer = StreamHelper.get_streamer()
UserDict.__init__(self)
def __getitem__(self, kvi_id):
return KVIData(self.streamer, kvi_id)
| 24.5
| 61
| 0.623583
|
9fd76bf228b89c2c1df71410fd5f5a49b1a8a59f
| 3,062
|
py
|
Python
|
clld/tests/test_web_views_olac.py
|
Woseseltops/clld
|
5ba065f35b7e6f68b8638d86550e6f0f597ff02d
|
[
"MIT"
] | 1
|
2019-08-12T15:43:56.000Z
|
2019-08-12T15:43:56.000Z
|
clld/tests/test_web_views_olac.py
|
Woseseltops/clld
|
5ba065f35b7e6f68b8638d86550e6f0f597ff02d
|
[
"MIT"
] | null | null | null |
clld/tests/test_web_views_olac.py
|
Woseseltops/clld
|
5ba065f35b7e6f68b8638d86550e6f0f597ff02d
|
[
"MIT"
] | null | null | null |
from datetime import date
from clld.tests.util import TestWithEnv, XmlResponse
class OaiPmhResponse(XmlResponse):
ns = 'http://www.openarchives.org/OAI/2.0/'
@property
def error(self):
e = self.findall('error')
if e:
return e[0].get('code')
def test_ResumptionToken():
from clld.web.views.olac import ResumptionToken
assert ResumptionToken(from_=date.today(), until=date.today()).__unicode__()
class Tests(TestWithEnv):
def with_params(self, **kw):
from clld.web.views.olac import olac
self.set_request_properties(params=kw)
return OaiPmhResponse(olac(self.env['request']))
def test_olac_no_verb(self):
self.assertEqual(self.with_params().error, 'badVerb')
def test_olac_listsets(self):
self.assertNotEqual(self.with_params(verb='ListSets').error, None)
def test_olac_identify_and_additional_arg(self):
self.assertEqual(
self.with_params(verb='Identify', other='arg').error, 'badArgument')
def test_olac_identify(self):
assert self.with_params(verb='Identify').findall('Identify')
def test_olac_listMetadataFormats(self):
self.with_params(
verb='ListMetadataFormats').findone('metadataPrefix').text == 'olac'
assert self.with_params(verb='ListMetadataFormats', other='x').error
def test_olac_list(self):
from clld.web.views.olac import OlacConfig
assert self.with_params(
verb='ListIdentifiers', metadataPrefix='olac').findall('header')
OlacConfig()
id_ = self.with_params(verb='Identify').findone(
'{http://www.openarchives.org/OAI/2.0/oai-identifier}sampleIdentifier').text
assert self.with_params(
verb='GetRecord', metadataPrefix='olac', identifier=id_).findone('record')
assert self.with_params(verb='GetRecord', metadataPrefix='olac').error
assert self.with_params(
verb='GetRecord', metadataPrefix='ol', identifier=id_).error
assert self.with_params(
verb='GetRecord', metadataPrefix='olac', identifier=id_ + '123').error
assert self.with_params(
verb='ListIdentifiers', resumptionToken='tr', metadataPrefix='olac').error
assert self.with_params(
verb='ListIdentifiers', resumptionToken='tr', o='x').error
assert self.with_params(verb='ListIdentifiers').error
assert self.with_params(
verb='ListIdentifiers', metadataPrefix='olac', set='x').error
assert self.with_params(verb='ListIdentifiers', resumptionToken='tr').error
assert not self.with_params(
verb='ListIdentifiers',
resumptionToken='0f2000-01-01u2222-01-01').error
assert not self.with_params(verb='ListIdentifiers', resumptionToken='100').error
assert self.with_params(verb='ListIdentifiers', resumptionToken='200').error
assert self.with_params(
verb='ListIdentifiers',
resumptionToken='100f2000-01-01u2000-01-01').error
| 37.802469
| 88
| 0.67113
|
31f7773549205a89a098fd66f8b2172f41b02e1f
| 231
|
py
|
Python
|
app/controller/ops/__init__.py
|
YiChengCai1999/DepressionAnnotator
|
828f505d0f22f7c2337f1b37c7dee3ea23468951
|
[
"Apache-2.0"
] | null | null | null |
app/controller/ops/__init__.py
|
YiChengCai1999/DepressionAnnotator
|
828f505d0f22f7c2337f1b37c7dee3ea23468951
|
[
"Apache-2.0"
] | null | null | null |
app/controller/ops/__init__.py
|
YiChengCai1999/DepressionAnnotator
|
828f505d0f22f7c2337f1b37c7dee3ea23468951
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/12/3 14:06
# @Author : cendeavor
# @File : __init__.py.py
# @Software: PyCharm
from flask import Blueprint
ops = Blueprint('ops', __name__)
from . import views
| 17.769231
| 32
| 0.640693
|
f2ea5a465a171ac988f2d56bfefe5c9928ca14a5
| 2,254
|
py
|
Python
|
src/cltl/combot/infra/event/api.py
|
leolani/cltl-combot
|
7008742ba9db782166f79322658a8cb49890d61b
|
[
"MIT"
] | 1
|
2020-11-21T18:53:22.000Z
|
2020-11-21T18:53:22.000Z
|
src/cltl/combot/infra/event/api.py
|
leolani/cltl-combot
|
7008742ba9db782166f79322658a8cb49890d61b
|
[
"MIT"
] | null | null | null |
src/cltl/combot/infra/event/api.py
|
leolani/cltl-combot
|
7008742ba9db782166f79322658a8cb49890d61b
|
[
"MIT"
] | null | null | null |
import uuid
from dataclasses import dataclass
from typing import TypeVar, Generic, Optional, Iterable, Callable
import time
from cltl.combot.infra.di_container import DIContainer
class TopicError(ValueError):
pass
@dataclass
class EventMetadata:
timestamp: float = time.time()
offset: int = -1
topic: str = ""
@classmethod
def with_(cls, metadata, timestamp: float = None, offset: int = None, topic: str = None) -> Optional["EventMetadata"]:
new_timestamp = timestamp if timestamp is not None else metadata.timestamp
new_offset = offset if offset is not None else metadata.offset
new_topic = topic if topic is not None else metadata.topic
return cls(new_timestamp, new_offset, new_topic)
T = TypeVar("T")
@dataclass
class Event(Generic[T]):
id: str
payload: T
metadata: EventMetadata = EventMetadata()
@classmethod
def for_payload(cls, payload: T) -> Optional["Event"]:
return cls(str(uuid.uuid4()), payload)
@classmethod
def with_topic(cls, event, topic: str) -> Optional["Event"]:
return cls(event.id, event.payload, EventMetadata.with_(event.metadata, topic=topic))
def __eq__(self, other):
return self.id == other.id
class EventBus:
"""
Supports publishing of and subscribing to events based on topics.
Events published to a topic are delivered to all subscribers in the order
of their arrival. Publishing and invocation of the subscribed handler
can be asynchronous. Subscribers receive only events that arrive after they
subscribed to a topic.
"""
def publish(self, topic: str, event: Event) -> None:
raise NotImplementedError()
def subscribe(self, topic, handler: Callable[[Event], None]) -> None:
raise NotImplementedError()
def unsubscribe(self, topic: str, handler: Callable[[Event], None] = None) -> None:
raise NotImplementedError()
@property
def topics(self) -> Iterable[str]:
raise NotImplementedError()
def has_topic(self, topic: str) -> bool:
return topic in self.topics
class EventBusContainer(DIContainer):
@property
def event_bus(self) -> EventBus:
raise ValueError("No EventBus configured")
| 28.531646
| 122
| 0.689885
|
23f4aa44ac35da4eb0bb75fd335b88135ad3762f
| 898
|
py
|
Python
|
tests/test_block_enumerate.py
|
elsandal/pyclesperanto_prototype
|
7bda828813b86b44b63d73d5e8f466d9769cded1
|
[
"BSD-3-Clause"
] | 64
|
2020-03-18T12:11:22.000Z
|
2022-03-31T08:19:18.000Z
|
tests/test_block_enumerate.py
|
elsandal/pyclesperanto_prototype
|
7bda828813b86b44b63d73d5e8f466d9769cded1
|
[
"BSD-3-Clause"
] | 148
|
2020-05-14T06:14:11.000Z
|
2022-03-26T15:02:31.000Z
|
tests/test_block_enumerate.py
|
elsandal/pyclesperanto_prototype
|
7bda828813b86b44b63d73d5e8f466d9769cded1
|
[
"BSD-3-Clause"
] | 16
|
2020-05-31T00:53:44.000Z
|
2022-03-23T13:20:57.000Z
|
import pyclesperanto_prototype as cle
import numpy as np
source = np.asarray([[0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0]])
reference = np.asarray([[0, 1, 0, 2, 0, 0, 3, 4, 0, 0, 5, 0]])
def block_enum(source, blocksize):
flagged_indices = cle.push(source)
max_label = source.shape[1] - 1
block_sums = cle.create([1, int((int(max_label) + 1) / blocksize) + 1])
cle.sum_reduction_x(flagged_indices, block_sums, blocksize)
# distribute new numbers
new_indices = cle.create([1, int(max_label) + 1])
cle.block_enumerate(flagged_indices, block_sums, new_indices, blocksize)
return cle.pull(new_indices)
def test_block_enumerate():
result = block_enum(source, 4)
print(result)
print(reference)
assert np.array_equal(result, reference)
result = block_enum(source, 2)
print(result)
print(reference)
assert np.array_equal(result, reference)
| 30.965517
| 76
| 0.679287
|
1e8bd26b3e6da005f8c995f4899a91f8d6459e86
| 7,831
|
py
|
Python
|
test_project/test_app/tests/test_site_notify.py
|
ninemoreminutes/django-site-utils
|
d0c9f360451593f20ce0e80866f7e76185c0764b
|
[
"BSD-3-Clause"
] | null | null | null |
test_project/test_app/tests/test_site_notify.py
|
ninemoreminutes/django-site-utils
|
d0c9f360451593f20ce0e80866f7e76185c0764b
|
[
"BSD-3-Clause"
] | 13
|
2020-05-07T03:57:03.000Z
|
2022-03-12T00:54:56.000Z
|
test_project/test_app/tests/test_site_notify.py
|
ninemoreminutes/django-site-utils
|
d0c9f360451593f20ce0e80866f7e76185c0764b
|
[
"BSD-3-Clause"
] | null | null | null |
# Python
from __future__ import unicode_literals
import email.utils
# Django
import django
def test_site_notify_default(command_runner, mailoutbox, settings):
# Send to default recipients (admins).
assert settings.ADMINS
result = command_runner('site_notify')
assert result[0] is None
assert len(mailoutbox) == 1
msg = mailoutbox[0]
expected_emails = set([x[1] for x in settings.ADMINS])
for recipient in msg.to:
realname, email_address = email.utils.parseaddr(recipient)
assert email_address in expected_emails
expected_emails.remove(email_address)
assert not expected_emails
def test_site_notify_managers(command_runner, mailoutbox, settings):
# Send to addresses listed in settings.MANAGERS.
assert settings.MANAGERS
result = command_runner('site_notify', managers=True)
assert result[0] is None
assert len(mailoutbox) == 1
msg = mailoutbox[0]
expected_emails = set([x[1] for x in settings.MANAGERS])
for recipient in msg.to:
realname, email_address = email.utils.parseaddr(recipient)
assert email_address in expected_emails
expected_emails.remove(email_address)
assert not expected_emails
def test_site_notify_superusers(user_model, super_user, inactive_super_user,
command_runner, mailoutbox):
# Send to active superusers.
result = command_runner('site_notify', superusers=True)
assert result[0] is None
assert len(mailoutbox) == 1
msg = mailoutbox[0]
users = user_model.objects.filter(is_active=True, is_superuser=True)
expected_emails = set(users.values_list('email', flat=True))
for recipient in msg.to:
realname, email_address = email.utils.parseaddr(recipient)
assert email_address in expected_emails
expected_emails.remove(email_address)
assert not expected_emails
def test_site_notify_staff(user_model, super_user, inactive_super_user,
staff_user, manager_staff_user, command_runner,
mailoutbox):
# Send to active staff users.
result = command_runner('site_notify', staff=True)
assert result[0] is None
assert len(mailoutbox) == 1
msg = mailoutbox[0]
users = user_model.objects.filter(is_active=True, is_staff=True)
expected_emails = set(users.values_list('email', flat=True))
for recipient in msg.to:
realname, email_address = email.utils.parseaddr(recipient)
assert email_address in expected_emails
expected_emails.remove(email_address)
assert not expected_emails
def test_site_notify_managers_staff(user_model, super_user, inactive_super_user,
staff_user, manager_staff_user,
command_runner, mailoutbox, settings):
# Send to managers and active staff. Email address in both lists should
# only be listed once.
result = command_runner('site_notify', managers=True, staff=True)
assert result[0] is None
assert len(mailoutbox) == 1
msg = mailoutbox[0]
users = user_model.objects.filter(is_active=True, is_staff=True)
expected_emails = set(users.values_list('email', flat=True))
expected_emails.update([x[1] for x in settings.MANAGERS])
for recipient in msg.to:
realname, email_address = email.utils.parseaddr(recipient)
assert email_address in expected_emails
expected_emails.remove(email_address)
assert not expected_emails
def test_site_notify_all(user_model, super_user, inactive_super_user,
staff_user, manager_staff_user, command_runner,
mailoutbox, settings):
# Send to all admins, managers and staff.
result = command_runner('site_notify', all_users=True)
assert result[0] is None
assert len(mailoutbox) == 1
msg = mailoutbox[0]
users = user_model.objects.filter(is_active=True, is_staff=True)
expected_emails = set(users.values_list('email', flat=True))
users = user_model.objects.filter(is_active=True, is_superuser=True)
expected_emails.update(users.values_list('email', flat=True))
expected_emails.update([x[1] for x in settings.MANAGERS])
expected_emails.update([x[1] for x in settings.ADMINS])
for recipient in msg.to:
realname, email_address = email.utils.parseaddr(recipient)
assert email_address in expected_emails
expected_emails.remove(email_address)
assert not expected_emails
def test_site_notify_bcc(command_runner, mailoutbox, settings):
# Send to default recipients (admins) bcc'ed.
result = command_runner('site_notify', bcc=True)
assert result[0] is None
assert len(mailoutbox) == 1
msg = mailoutbox[0]
expected_emails = set([x[1] for x in settings.ADMINS])
assert not msg.to
for recipient in msg.bcc:
realname, email_address = email.utils.parseaddr(recipient)
assert email_address in expected_emails
expected_emails.remove(email_address)
assert not expected_emails
def test_site_notify_change_default(command_runner, mailoutbox, settings):
# Change default recipients via setting.
assert settings.ADMINS
assert settings.MANAGERS
settings.SITE_NOTIFY_DEFAULT_RECIPIENTS = ('admins', 'managers')
result = command_runner('site_notify')
assert result[0] is None
assert len(mailoutbox) == 1
msg = mailoutbox[0]
expected_emails = set([x[1] for x in settings.ADMINS])
expected_emails.update([x[1] for x in settings.MANAGERS])
for recipient in msg.to:
realname, email_address = email.utils.parseaddr(recipient)
assert email_address in expected_emails
expected_emails.remove(email_address)
assert not expected_emails
def test_site_notify_subject_body(command_runner, mailoutbox):
# Positional arguments should become message subject, then body.
result = command_runner('site_notify', 'test_subject', 'test_body', 'test_body2')
assert result[0] is None
assert len(mailoutbox) == 1
msg = mailoutbox[0]
assert 'test_subject' in msg.subject
assert 'test_body' in msg.body
assert 'test_body2' in msg.body
def test_site_notify_templates(command_runner, mailoutbox, settings):
# Override subject and body templates via command line arguments.
result = command_runner('site_notify',
subject_template='new_site_notify_subject.txt',
body_template='new_site_notify_body.txt')
assert result[0] is None
assert len(mailoutbox) == 1
msg = mailoutbox[0]
# Verify that template context processor variables are made available.
assert 'SITE_ID={}'.format(settings.SITE_ID) in msg.subject
assert 'test_project.urls' in msg.body
assert 'NEW_BODY_SUFFIX' in msg.body
def test_site_notify_template_settings(command_runner, mailoutbox, settings):
# Override subject and body templates via settings.
settings.SITE_NOTIFY_SUBJECT_TEMPLATE = 'new_site_notify_subject.txt'
settings.SITE_NOTIFY_BODY_TEMPLATE = 'new_site_notify_body.txt'
result = command_runner('site_notify')
assert result[0] is None
assert len(mailoutbox) == 1
msg = mailoutbox[0]
assert 'SITE_ID={}'.format(settings.SITE_ID) in msg.subject
assert 'test_project.urls' in msg.body
assert 'NEW_BODY_SUFFIX' in msg.body
def test_site_notify_auth_not_installed(command_runner, mailoutbox, settings):
# Send to default recipients (admins).
settings.INSTALLED_APPS = (x for x in settings.INSTALLED_APPS if x != 'django.contrib.auth')
result = command_runner('site_notify')
assert result[0] is None
result = command_runner('site_notify', superusers=True)
if django.VERSION >= (2, 0):
assert isinstance(result[0], Exception)
| 41
| 96
| 0.713702
|
4dca9385067e4de3ae4bf60432fbff27e253fa40
| 9,623
|
py
|
Python
|
test/functional/listsinceblock.py
|
harzcoin/harzcoin
|
fb2c9d0e079fc4b55b3eb8cc0bfcb873ed1155d9
|
[
"MIT"
] | null | null | null |
test/functional/listsinceblock.py
|
harzcoin/harzcoin
|
fb2c9d0e079fc4b55b3eb8cc0bfcb873ed1155d9
|
[
"MIT"
] | null | null | null |
test/functional/listsinceblock.py
|
harzcoin/harzcoin
|
fb2c9d0e079fc4b55b3eb8cc0bfcb873ed1155d9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017 The Harzcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listsincelast RPC."""
from test_framework.test_framework import HarzcoinTestFramework
from test_framework.util import assert_equal, assert_array_result, assert_raises_rpc_error
class ListSinceBlockTest (HarzcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def run_test(self):
self.nodes[2].generate(101)
self.sync_all()
self.test_no_blockhash()
self.test_invalid_blockhash()
self.test_reorg()
self.test_double_spend()
self.test_double_send()
def test_no_blockhash(self):
txid = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
blockhash, = self.nodes[2].generate(1)
self.sync_all()
txs = self.nodes[0].listtransactions()
assert_array_result(txs, {"txid": txid}, {
"category": "receive",
"amount": 1,
"blockhash": blockhash,
"confirmations": 1,
})
assert_equal(
self.nodes[0].listsinceblock(),
{"lastblock": blockhash,
"removed": [],
"transactions": txs})
assert_equal(
self.nodes[0].listsinceblock(""),
{"lastblock": blockhash,
"removed": [],
"transactions": txs})
def test_invalid_blockhash(self):
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"42759cde25462784395a337460bde75f58e73d3f08bd31fdc3507cbac856a2c4")
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"0000000000000000000000000000000000000000000000000000000000000000")
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"invalid-hex")
def test_reorg(self):
'''
`listsinceblock` did not behave correctly when handed a block that was
no longer in the main chain:
ab0
/ \
aa1 [tx0] bb1
| |
aa2 bb2
| |
aa3 bb3
|
bb4
Consider a client that has only seen block `aa3` above. It asks the node
to `listsinceblock aa3`. But at some point prior the main chain switched
to the bb chain.
Previously: listsinceblock would find height=4 for block aa3 and compare
this to height=5 for the tip of the chain (bb4). It would then return
results restricted to bb3-bb4.
Now: listsinceblock finds the fork at ab0 and returns results in the
range bb1-bb4.
This test only checks that [tx0] is present.
'''
# Split network into two
self.split_network()
# send to nodes[0] from nodes[2]
senttx = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# generate on both sides
lastblockhash = self.nodes[1].generate(6)[5]
self.nodes[2].generate(7)
self.log.info('lastblockhash=%s' % (lastblockhash))
self.sync_all([self.nodes[:2], self.nodes[2:]])
self.join_network()
# listsinceblock(lastblockhash) should now include tx, as seen from nodes[0]
lsbres = self.nodes[0].listsinceblock(lastblockhash)
found = False
for tx in lsbres['transactions']:
if tx['txid'] == senttx:
found = True
break
assert found
def test_double_spend(self):
'''
This tests the case where the same UTXO is spent twice on two separate
blocks as part of a reorg.
ab0
/ \
aa1 [tx1] bb1 [tx2]
| |
aa2 bb2
| |
aa3 bb3
|
bb4
Problematic case:
1. User 1 receives THALER in tx1 from utxo1 in block aa1.
2. User 2 receives THALER in tx2 from utxo1 (same) in block bb1
3. User 1 sees 2 confirmations at block aa3.
4. Reorg into bb chain.
5. User 1 asks `listsinceblock aa3` and does not see that tx1 is now
invalidated.
Currently the solution to this is to detect that a reorg'd block is
asked for in listsinceblock, and to iterate back over existing blocks up
until the fork point, and to include all transactions that relate to the
node wallet.
'''
self.sync_all()
# Split network into two
self.split_network()
# share utxo between nodes[1] and nodes[2]
utxos = self.nodes[2].listunspent()
utxo = utxos[0]
privkey = self.nodes[2].dumpprivkey(utxo['address'])
self.nodes[1].importprivkey(privkey)
# send from nodes[1] using utxo to nodes[0]
change = '%.8f' % (float(utxo['amount']) - 1.0003)
recipientDict = {
self.nodes[0].getnewaddress(): 1,
self.nodes[1].getnewaddress(): change,
}
utxoDicts = [{
'txid': utxo['txid'],
'vout': utxo['vout'],
}]
txid1 = self.nodes[1].sendrawtransaction(
self.nodes[1].signrawtransaction(
self.nodes[1].createrawtransaction(utxoDicts, recipientDict))['hex'])
# send from nodes[2] using utxo to nodes[3]
recipientDict2 = {
self.nodes[3].getnewaddress(): 1,
self.nodes[2].getnewaddress(): change,
}
self.nodes[2].sendrawtransaction(
self.nodes[2].signrawtransaction(
self.nodes[2].createrawtransaction(utxoDicts, recipientDict2))['hex'])
# generate on both sides
lastblockhash = self.nodes[1].generate(3)[2]
self.nodes[2].generate(4)
self.join_network()
self.sync_all()
# gettransaction should work for txid1
assert self.nodes[0].gettransaction(txid1)['txid'] == txid1, "gettransaction failed to find txid1"
# listsinceblock(lastblockhash) should now include txid1, as seen from nodes[0]
lsbres = self.nodes[0].listsinceblock(lastblockhash)
assert any(tx['txid'] == txid1 for tx in lsbres['removed'])
# but it should not include 'removed' if include_removed=false
lsbres2 = self.nodes[0].listsinceblock(blockhash=lastblockhash, include_removed=False)
assert 'removed' not in lsbres2
def test_double_send(self):
'''
This tests the case where the same transaction is submitted twice on two
separate blocks as part of a reorg. The former will vanish and the
latter will appear as the true transaction (with confirmations dropping
as a result).
ab0
/ \
aa1 [tx1] bb1
| |
aa2 bb2
| |
aa3 bb3 [tx1]
|
bb4
Asserted:
1. tx1 is listed in listsinceblock.
2. It is included in 'removed' as it was removed, even though it is now
present in a different block.
3. It is listed with a confirmations count of 2 (bb3, bb4), not
3 (aa1, aa2, aa3).
'''
self.sync_all()
# Split network into two
self.split_network()
# create and sign a transaction
utxos = self.nodes[2].listunspent()
utxo = utxos[0]
change = '%.8f' % (float(utxo['amount']) - 1.0003)
recipientDict = {
self.nodes[0].getnewaddress(): 1,
self.nodes[2].getnewaddress(): change,
}
utxoDicts = [{
'txid': utxo['txid'],
'vout': utxo['vout'],
}]
signedtxres = self.nodes[2].signrawtransaction(
self.nodes[2].createrawtransaction(utxoDicts, recipientDict))
assert signedtxres['complete']
signedtx = signedtxres['hex']
# send from nodes[1]; this will end up in aa1
txid1 = self.nodes[1].sendrawtransaction(signedtx)
# generate bb1-bb2 on right side
self.nodes[2].generate(2)
# send from nodes[2]; this will end up in bb3
txid2 = self.nodes[2].sendrawtransaction(signedtx)
assert_equal(txid1, txid2)
# generate on both sides
lastblockhash = self.nodes[1].generate(3)[2]
self.nodes[2].generate(2)
self.join_network()
self.sync_all()
# gettransaction should work for txid1
self.nodes[0].gettransaction(txid1)
# listsinceblock(lastblockhash) should now include txid1 in transactions
# as well as in removed
lsbres = self.nodes[0].listsinceblock(lastblockhash)
assert any(tx['txid'] == txid1 for tx in lsbres['transactions'])
assert any(tx['txid'] == txid1 for tx in lsbres['removed'])
# find transaction and ensure confirmations is valid
for tx in lsbres['transactions']:
if tx['txid'] == txid1:
assert_equal(tx['confirmations'], 2)
# the same check for the removed array; confirmations should STILL be 2
for tx in lsbres['removed']:
if tx['txid'] == txid1:
assert_equal(tx['confirmations'], 2)
if __name__ == '__main__':
ListSinceBlockTest().main()
| 34.245552
| 106
| 0.579653
|
d399a73bd5fa0b09028a970224ce8a036e1eaebd
| 1,114
|
py
|
Python
|
share/rpcuser/rpcuser.py
|
jwflame/nyancoin-client-1
|
6bc7686edf8bc4b058d504ce0ab40b7cd2a0597b
|
[
"MIT"
] | null | null | null |
share/rpcuser/rpcuser.py
|
jwflame/nyancoin-client-1
|
6bc7686edf8bc4b058d504ce0ab40b7cd2a0597b
|
[
"MIT"
] | 3
|
2021-12-19T15:19:12.000Z
|
2022-01-27T16:59:49.000Z
|
share/rpcuser/rpcuser.py
|
jwflame/nyancoin-client-1
|
6bc7686edf8bc4b058d504ce0ab40b7cd2a0597b
|
[
"MIT"
] | 3
|
2021-11-30T16:30:39.000Z
|
2022-01-21T18:13:30.000Z
|
#!/usr/bin/env python2
# Copyright (c) 2015-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import hashlib
import sys
import os
from random import SystemRandom
import base64
import hmac
if len(sys.argv) < 2:
sys.stderr.write('Please include username as an argument.\n')
sys.exit(0)
username = sys.argv[1]
#This uses os.urandom() underneath
cryptogen = SystemRandom()
#Create 16 byte hex salt
salt_sequence = [cryptogen.randrange(256) for i in range(16)]
hexseq = list(map(hex, salt_sequence))
salt = "".join([x[2:] for x in hexseq])
#Create 32 byte b64 password
password = base64.urlsafe_b64encode(os.urandom(32))
digestmod = hashlib.sha256
if sys.version_info.major >= 3:
password = password.decode('utf-8')
digestmod = 'SHA256'
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), digestmod)
result = m.hexdigest()
print("String to be appended to bitcoin.conf:")
print("rpcauth="+username+":"+salt+"$"+result)
print("Your password:\n"+password)
| 26.52381
| 79
| 0.728007
|
1847bb27f5e627e7221d77b2e6778a5e029e8c2e
| 8,859
|
py
|
Python
|
tests/test_slddb/test_webapi.py
|
bmaranville/orsopy
|
74083afdce8f8f1ab3866c7f1f5209942c8734db
|
[
"MIT"
] | null | null | null |
tests/test_slddb/test_webapi.py
|
bmaranville/orsopy
|
74083afdce8f8f1ab3866c7f1f5209942c8734db
|
[
"MIT"
] | null | null | null |
tests/test_slddb/test_webapi.py
|
bmaranville/orsopy
|
74083afdce8f8f1ab3866c7f1f5209942c8734db
|
[
"MIT"
] | null | null | null |
import os
import shutil
import sys
import tempfile
import unittest
import zipfile
from importlib import reload
from urllib import request
class TestWebAPI(unittest.TestCase):
server_available = True
@classmethod
def setUpClass(cls):
# create a temporary fold to download python api and store database file
cls.path = os.path.join(tempfile.gettempdir(), "slddb_test")
if os.path.exists(cls.path):
shutil.rmtree(cls.path) # cleanup possible earlier runs
os.makedirs(cls.path)
# try to download the module from website
try:
res = request.urlopen("http://127.0.0.1:5000/download_api", timeout=500)
except Exception:
try:
res = request.urlopen("https://slddb.esss.dk/slddb/download_api", timeout=500)
except Exception:
cls.server_available = False
print("Server unreachable to download python api")
return
else:
server_url = "https://slddb.esss.dk/slddb/"
else:
server_url = "http://127.0.0.1:5000/"
with open(os.path.join(cls.path, "slddb.zip"), "wb") as f:
try:
f.write(res.read())
except Exception:
cls.server_available = False
print("Server unreachable to download python api")
return
# clear all modules of slddb
for key in list(sys.modules.keys()):
if key.startswith("slddb"):
del sys.modules[key]
# try extracting the zip file
with zipfile.ZipFile(os.path.join(cls.path, "slddb.zip")) as zf:
zf.extractall(cls.path)
# use the local api version to make sure test coverage works
global api, slddb
from orsopy import slddb
from orsopy.slddb import api, dbconfig, webapi
# overwrite local server URL
cls._api_url = dbconfig.WEBAPI_URL
dbconfig.WEBAPI_URL = server_url
webapi.WEBAPI_URL = dbconfig.WEBAPI_URL
# set a temporary database file
dbconfig.DB_FILE = os.path.join(cls.path, "slddb.db")
slddb.DB_FILE = dbconfig.DB_FILE
webapi.DB_FILE = slddb.DB_FILE
@classmethod
def tearDownClass(cls):
try:
global api
try:
api.db.db.close()
except Exception:
pass
del api
except NameError:
pass
# delete temporary folder with all files
shutil.rmtree(cls.path)
# clear all modules of slddb
for key in list(sys.modules.keys()):
if key.startswith("slddb"):
del sys.modules[key]
def test_a_downloaddb(self):
if not self.server_available:
return
# make sure the path of the module is correct and that the database has not been downloaded
self.assertTrue(api.first_access)
# self.assertEqual(slddb.__file__, os.path.join(self.path, 'slddb', '__init__.py'))
self.assertFalse(os.path.exists(slddb.DB_FILE))
# test of database download
api.download_db()
self.assertTrue(os.path.exists(slddb.DB_FILE))
mtime = os.path.getmtime(slddb.DB_FILE)
# test of second download to overwrite
api.download_db()
m2time = os.path.getmtime(slddb.DB_FILE)
self.assertNotEqual(mtime, m2time)
# test error when server does not exist
from urllib.error import URLError
from orsopy.slddb import dbconfig, webapi
webapi.WEBAPI_URL = "http://doesnot.exist/"
with self.assertRaises(URLError):
api.download_db()
webapi.WEBAPI_URL = dbconfig.WEBAPI_URL
# test error when file is wrong
api.db_suburl = "download_api"
with self.assertRaises(ValueError):
api.download_db()
api.db_suburl = "download_db"
def test_b_check(self):
if not self.server_available:
return
api.first_access = True
if os.path.isfile(slddb.DB_FILE):
os.remove(slddb.DB_FILE)
api.check()
self.assertFalse(api.first_access)
api.first_access = True
api.check()
self.assertFalse(api.first_access)
api.check()
# check the update case
api.db.db.close()
del api.db
api.first_access = True
api.max_age = -1
api.check()
api.max_age = 1
# check warning if download url doesn't work during update
api.db.db.close()
del api.db
api.first_access = True
api.max_age = -1
from orsopy.slddb import dbconfig, webapi
webapi.WEBAPI_URL = "http://doesnot.exist/"
with self.assertWarns(UserWarning):
api.check()
api.max_age = 1
webapi.WEBAPI_URL = dbconfig.WEBAPI_URL
api.check()
def test_c_query(self):
if not self.server_available:
return
res = api.search(fomula="Fe2O3")
self.assertGreater(len(res), 0)
self.assertIn("density", res[0])
def test_c_material(self):
if not self.server_available:
return
mat = api.material(1)
self.assertEqual(mat.__class__.__name__, "Material")
def test_c_custom(self):
if not self.server_available:
return
mat = api.custom(formula="Au", dens=19.3)
self.assertEqual(mat.__class__.__name__, "Material")
def test_d_local(self):
if not self.server_available:
return
# test database access if server is unavailable
from orsopy.slddb import dbconfig, webapi
webapi.WEBAPI_URL = "http://doesnot.exist/"
with self.subTest(msg="local search", i=0):
# first search drop connection
api.use_webquery = True
api.search(formula="Fe2O3")
self.assertFalse(api.use_webquery)
res = api.search(formula="Fe2O3")
self.assertGreater(len(res), 0)
self.assertIn("density", res[0])
with self.subTest(msg="local material", i=0):
api.use_webquery = True
mat = api.material(1)
self.assertFalse(api.use_webquery)
mat = api.material(1)
self.assertEqual(mat.__class__.__name__, "Material")
webapi.WEBAPI_URL = dbconfig.WEBAPI_URL
def test_bio_blender(self):
if not self.server_available:
return
mat = api.bio_blender("aa", molecule="protein")
self.assertEqual(mat.__class__.__name__, "Material")
mat = api.bio_blender("aa", molecule="dna")
self.assertEqual(mat.__class__.__name__, "Material")
mat = api.bio_blender("aa", molecule="rna")
self.assertEqual(mat.__class__.__name__, "Material")
class TestConfigPaths(unittest.TestCase):
@classmethod
def setUpClass(cls):
# create a temporary fold to download python api and store database file
cls.path = os.path.join(tempfile.gettempdir(), "slddb_testconfig")
if os.path.exists(cls.path):
shutil.rmtree(cls.path) # cleanup possible earlier runs
os.makedirs(cls.path)
cls.old_environ = dict(os.environ)
if "APPDATA" in os.environ:
del os.environ["APPDATA"]
if "XDG_CONFIG_HOME" in os.environ:
del os.environ["XDG_CONFIG_HOME"]
if "HOME" in os.environ:
del os.environ["HOME"]
@classmethod
def tearDownClass(cls):
# delete temporary folder with all files and reset environment variables
shutil.rmtree(cls.path)
os.environ = cls.old_environ
def test_macpath(self):
# mac version of config path
os.environ["APPDATA"] = self.path
from orsopy.slddb import dbconfig
reload(dbconfig)
del os.environ["APPDATA"]
res_path = os.path.join(self.path, "slddb")
self.assertEqual(dbconfig.configpath, res_path)
self.assertTrue(os.path.exists(res_path))
def test_linux(self):
# mac version of config path
os.environ["XDG_CONFIG_HOME"] = self.path
from orsopy.slddb import dbconfig
reload(dbconfig)
del os.environ["XDG_CONFIG_HOME"]
res_path = os.path.join(self.path, "slddb")
self.assertEqual(dbconfig.configpath, res_path)
self.assertTrue(os.path.exists(res_path))
def test_rest(self):
# mac version of config path
os.environ["HOME"] = self.path
from orsopy.slddb import dbconfig
reload(dbconfig)
del os.environ["HOME"]
res_path = os.path.join(self.path, ".config", "slddb")
self.assertEqual(dbconfig.configpath, res_path)
self.assertTrue(os.path.exists(res_path))
| 33.430189
| 99
| 0.606276
|
c60bc14d7f9c91e9a8ee3e270eae5e38e759b1cb
| 6,992
|
py
|
Python
|
src/site/context_processors.py
|
dvsidorov/django-leon-base
|
bb68f81ad45ce5e416e483c87a98e9fcbedf8f19
|
[
"MIT"
] | null | null | null |
src/site/context_processors.py
|
dvsidorov/django-leon-base
|
bb68f81ad45ce5e416e483c87a98e9fcbedf8f19
|
[
"MIT"
] | null | null | null |
src/site/context_processors.py
|
dvsidorov/django-leon-base
|
bb68f81ad45ce5e416e483c87a98e9fcbedf8f19
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from ..base.context_processors import BaseContextProcessor
class FrontMainMenuContextProcessor(BaseContextProcessor):
"""
Class for block context processor menu
"""
MAIN_MENU_ITEM_MODEL = None
def _create_data(self):
self.main_menu = self.MAIN_MENU_ITEM_MODEL.objects.filter(show=True).order_by('position')
def _format(self):
pass
def __call__(self, request):
self.main_menu = {}
self.output_context = {
'main_menu': None
}
self._init(request)
self._create_data()
self._format()
self._aggregate()
return self.output_context
class FrontSidebarMenuContextProcessor(BaseContextProcessor):
"""
Class for block context processor menu
"""
SIDEBAR_MENU_ITEM_MODEL = None
def _create_data(self):
self.sidebar_menu = self.SIDEBAR_MENU_ITEM_MODEL.objects.all().order_by('position')
def _format(self):
pass
def __call__(self, request):
self.sidebar_menu = {}
self.output_context = {
'sidebar_menu': None
}
self._init(request)
self._create_data()
self._format()
self._aggregate()
return self.output_context
class FrontAdditionalLinkContextProcessor(BaseContextProcessor):
"""
Class for block context processor menu
"""
ADDITIONAL_LINK_ITEM_MODEL = None
def _create_data(self):
self.additional_link_s = self.ADDITIONAL_LINK_ITEM_MODEL.objects.all().order_by('position')
def _format(self):
pass
def __call__(self, request):
self.additional_link_s = {}
self.output_context = {
'additional_link_s': None
}
self._init(request)
self._create_data()
self._format()
self._aggregate()
return self.output_context
class FrontLogoHeaderContextProcessor(BaseContextProcessor):
"""
Class for block context processor menu
"""
LOGO_HEADER_MODEL = None
def _create_data(self):
self.logo_header = self.LOGO_HEADER_MODEL.objects.first()
def _format(self):
pass
def __call__(self, request):
self.logo = {}
self.output_context = {
'logo_header': None
}
self._init(request)
self._create_data()
self._format()
self._aggregate()
return self.output_context
class FrontLogoFooterContextProcessor(BaseContextProcessor):
"""
Class for block context processor menu
"""
LOGO_FOOTER_MODEL = None
def _create_data(self):
self.logo_footer = self.LOGO_FOOTER_MODEL.objects.first()
def _format(self):
pass
def __call__(self, request):
self.logo = {}
self.output_context = {
'logo_footer': None
}
self._init(request)
self._create_data()
self._format()
self._aggregate()
return self.output_context
class FrontPhotoStreamContextProcessor(BaseContextProcessor):
"""
Class for block context processor menu
"""
PHOTO_STREAM_MODEL = None
def _create_data(self):
self.photo_stream = self.PHOTO_STREAM_MODEL.objects.first()
def _format(self):
pass
def __call__(self, request):
self.photo_stream = {}
self.output_context = {
'photo_stream': None
}
self._init(request)
self._create_data()
self._format()
self._aggregate()
return self.output_context
class FrontPrimaryMenuContextProcessor(BaseContextProcessor):
"""
Class for block context processor menu
"""
PRIMARY_MENU_ITEM_MODEL = None
def _create_data(self):
self.primary_menu = self.PRIMARY_MENU_ITEM_MODEL.objects.all().order_by('position')
def _format(self):
pass
def __call__(self, request):
self.primary_menu = {}
self.output_context = {
'primary_menu': None
}
self._init(request)
self._create_data()
self._format()
self._aggregate()
return self.output_context
class FrontRightsDescContextProcessor(BaseContextProcessor):
"""
Class for block context processor menu
"""
RIGHTS_MODEL = None
def _create_data(self):
self.rights_desc = self.RIGHTS_MODEL.objects.first()
def _format(self):
pass
def __call__(self, request):
self.rights_desc = {}
self.output_context = {
'rights_desc': None
}
self._init(request)
self._create_data()
self._format()
self._aggregate()
return self.output_context
class FrontSmallDescContextProcessor(BaseContextProcessor):
"""
Class for block context processor
"""
SMALL_DESC_MODEL = None
def _create_data(self):
self.small_desc = self.SMALL_DESC_MODEL.objects.first()
def _format(self):
pass
def __call__(self, request):
self.small_desc = {}
self.output_context = {
'small_desc': None
}
self._init(request)
self._create_data()
self._format()
self._aggregate()
return self.output_context
class FrontSocialLinksContextProcessor(BaseContextProcessor):
"""
Class for social links context processor menu
"""
SOCIAL_LINK_MODEL = None
def _create_data(self):
self.social_link_s = self.SOCIAL_LINK_MODEL.objects.all().order_by('position')
def __call__(self, request):
self.header = {}
self.output_context = {
'social_link_s': None
}
self._init(request)
self._create_data()
self._aggregate()
return self.output_context
class FrontUserCityContextProcessor(BaseContextProcessor):
"""
Class for user city context processor menu
"""
USER_CITY_MODEL = None
def _create_data(self):
self.user_city_s = self.USER_CITY_MODEL.objects.filter(is_main=True).all().order_by('name')
def _current_city(self):
session = self.request.session
self.user_city_selected = session.get('city_id')
def __call__(self, request):
self.header = {}
self.output_context = {
'user_city_s': None,
'user_city_selected': None
}
self._init(request)
self._create_data()
self._current_city()
self._aggregate()
return self.output_context
class FrontWorkingDescContextProcessor(BaseContextProcessor):
"""
Class for working desc
"""
WORKING_DESC_MODEL = None
def _create_data(self):
self.working_desc = self.WORKING_DESC_MODEL.objects.first()
def __call__(self, request):
self.header = {}
self.output_context = {
'working_desc': None
}
self._init(request)
self._create_data()
self._aggregate()
return self.output_context
| 24.362369
| 99
| 0.627002
|
e8693f678a929ce82a307ce8a0cc6138998d50c1
| 25,479
|
py
|
Python
|
run_baselines_exp.py
|
ashok-arjun/few-shot-ssl-public
|
f7577d80b7491e0f27234a2e9c0113782365c2e1
|
[
"MIT"
] | 497
|
2018-03-02T00:50:53.000Z
|
2022-03-22T06:30:59.000Z
|
run_baselines_exp.py
|
eleniTriantafillou/few-shot-ssl-public
|
3cf522031aa40b4ffb61e4693d0b48fdd5669276
|
[
"MIT"
] | 20
|
2018-03-19T06:15:30.000Z
|
2021-11-20T07:21:38.000Z
|
run_baselines_exp.py
|
eleniTriantafillou/few-shot-ssl-public
|
3cf522031aa40b4ffb61e4693d0b48fdd5669276
|
[
"MIT"
] | 108
|
2018-03-02T06:56:13.000Z
|
2021-12-23T03:40:43.000Z
|
# Copyright (c) 2018 Mengye Ren, Eleni Triantafillou, Sachin Ravi, Jake Snell,
# Kevin Swersky, Joshua B. Tenenbaum, Hugo Larochelle, Richars S. Zemel.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
"""Nearest neighbors and logistic regression baselines.
Usage:
./run_baselines_exp.py \
--aug [AUGMENT 90 DEGREE] \
--shuffle_episode [SHUFFLE EPISODE] \
--nclasses_eval [NUM CLASSES EVAL] \
--nclasses_train [NUM CLASSES TRAIN] \
--nshot [NUM SHOT] \
--num_eval_episode [NUM EVAL EPISODE] \
--num_test [NUM TEST] \
--num_unlabel [NUM UNLABEL] \
--seed [RANDOM SEED] \
--dataset [DATASET NAME]
Flags:
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
import six
import tensorflow as tf
from fewshot.configs.config_factory import get_config
from fewshot.configs.tiered_imagenet_config import *
from fewshot.configs.mini_imagenet_config import *
from fewshot.configs.omniglot_config import *
from fewshot.data.data_factory import get_dataset
from fewshot.data.episode import Episode
from fewshot.data.tiered_imagenet import TieredImageNetDataset
from fewshot.data.mini_imagenet import MiniImageNetDataset
from fewshot.data.omniglot import OmniglotDataset
from fewshot.models.nnlib import cnn, weight_variable
from fewshot.utils import logger
from fewshot.utils.batch_iter import BatchIterator
from tqdm import tqdm
log = logger.get()
class LRModel(object):
"""A fully supervised logistic regression model for episodic learning."""
def __init__(self, x, y, num_classes, dtype=tf.float32, learn_rate=1e-3):
x_shape = x.get_shape()
x_size = 1
for ss in x_shape[1:]:
x_size *= int(ss)
x = tf.reshape(x, [-1, x_size])
w_class = weight_variable(
[x_size, num_classes],
init_method='truncated_normal',
dtype=tf.float32,
init_param={'stddev': 0.01},
name='w_class')
b_class = weight_variable(
[num_classes],
init_method='constant',
init_param={'val': 0.0},
name='b_class')
logits = tf.matmul(x, w_class) + b_class
xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=y)
xent = tf.reduce_mean(xent, name='xent')
cost = xent
cost += self._decay()
self._cost = cost
self._inputs = x
self._labels = y
self._train_op = tf.train.AdamOptimizer(learn_rate).minimize(
cost, var_list=[w_class, b_class])
correct = tf.equal(tf.argmax(logits, axis=1), y)
self._acc = tf.reduce_mean(tf.cast(correct, dtype))
self._prediction = tf.nn.softmax(logits)
def _decay(self):
wd_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
log.info('Weight decay variables')
[log.info(x) for x in wd_losses]
log.info('Total length: {}'.format(len(wd_losses)))
if len(wd_losses) > 0:
return tf.add_n(wd_losses)
else:
log.warning('No weight decay variables!')
return 0.0
@property
def inputs(self):
return self._inputs
@property
def labels(self):
return self._labels
@property
def cost(self):
return self._cost
@property
def train_op(self):
return self._train_op
@property
def acc(self):
return self._acc
@property
def prediction(self):
return self._prediction
class SupervisedModel(object):
"""A fully supervised classification model for baseline representation learning"""
def __init__(self,
config,
x,
y,
num_classes,
is_training=True,
dtype=tf.float32):
"""Constructor.
Args:
config:
x:
y:
num_classes:
"""
h, _ = cnn(
x,
config.filter_size,
strides=config.strides,
pool_fn=[tf.nn.max_pool] * len(config.pool_fn),
pool_size=config.pool_size,
pool_strides=config.pool_strides,
act_fn=[tf.nn.relu for aa in config.conv_act_fn],
add_bias=True,
init_std=config.conv_init_std,
init_method=config.conv_init_method,
wd=config.wd,
dtype=dtype,
batch_norm=True,
is_training=is_training,
ext_wts=None)
h_shape = h.get_shape()
h_size = 1
for ss in h_shape[1:]:
h_size *= int(ss)
h = tf.reshape(h, [-1, h_size])
w_class = weight_variable(
[h_size, num_classes],
init_method='truncated_normal',
dtype=tf.float32,
init_param={'stddev': 0.01},
name='w_class')
b_class = weight_variable(
[num_classes],
init_method='constant',
init_param={'val': 0.0},
name='b_class')
self._feature = h
logits = tf.matmul(h, w_class) + b_class
xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=y)
xent = tf.reduce_mean(xent, name='xent')
cost = xent
cost += self._decay()
self._cost = cost
self._inputs = x
self._labels = y
global_step = tf.get_variable(
'global_step', shape=[], dtype=tf.int64, trainable=False)
# Learning rate decay.
learn_rate = tf.train.piecewise_constant(
global_step, list(np.array(config.lr_decay_steps).astype(np.int64)),
[config.learn_rate] + list(config.lr_list))
self._learn_rate = learn_rate
self._train_op = tf.train.AdamOptimizer(learn_rate).minimize(
cost, global_step=global_step)
correct = tf.equal(tf.argmax(logits, axis=1), y)
self._acc = tf.reduce_mean(tf.cast(correct, dtype))
def _decay(self):
wd_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
log.info('Weight decay variables')
[log.info(x) for x in wd_losses]
log.info('Total length: {}'.format(len(wd_losses)))
if len(wd_losses) > 0:
return tf.add_n(wd_losses)
else:
log.warning('No weight decay variables!')
return 0.0
@property
def inputs(self):
return self._inputs
@property
def labels(self):
return self._labels
@property
def cost(self):
return self._cost
@property
def train_op(self):
return self._train_op
@property
def acc(self):
return self._acc
@property
def feature(self):
return self._feature
@property
def learn_rate(self):
return self._learn_rate
def get_exp_logger(sess, log_folder):
"""Gets a TensorBoard logger."""
with tf.name_scope('Summary'):
writer = tf.summary.FileWriter(os.path.join(log_folder, 'logs'), sess.graph)
class ExperimentLogger():
def log(self, name, niter, value):
summary = tf.Summary()
summary.value.add(tag=name, simple_value=value)
writer.add_summary(summary, niter)
def flush(self):
"""Flushes results to disk."""
writer.flush()
def close(self):
"""Closes writer."""
writer.close()
return ExperimentLogger()
def supervised_pretrain(sess,
model,
train_data,
num_steps,
num_eval_steps=10,
batch_size=100,
logging_fn=None):
"""Pretrain a supervised model on the labeled split of the training data to get a reasonable
embedding model for baselines.
Args:
sess: TensorFlow session object.
model: SupervisedModel object.
train_data: Training dataset object.
test_data: Testing dataset object.
num_steps: Int. Number of training steps.
"""
train_iter = BatchIterator(
train_data.get_size(),
batch_size=batch_size,
cycle=True,
shuffle=True,
get_fn=train_data.get_batch_idx,
log_epoch=-1)
train_eval_iter = BatchIterator(
train_data.get_size(),
batch_size=batch_size,
cycle=True,
shuffle=True,
get_fn=train_data.get_batch_idx,
log_epoch=-1)
test_iter = BatchIterator(
train_data.get_size(),
batch_size=batch_size,
cycle=True,
shuffle=True,
get_fn=train_data.get_batch_idx_test,
log_epoch=-1)
sess.run(tf.global_variables_initializer())
it = tqdm(six.moves.xrange(num_steps), ncols=0)
for ii in it:
x_train, y_train = train_iter.next()
sess.run(
[model.train_op],
feed_dict={
model.inputs: x_train,
model.labels: y_train
})
if (ii + 1) % 100 == 0 or ii == 0:
train_cost = 0.0
train_acc = 0.0
for jj in six.moves.xrange(num_eval_steps):
x_train, y_train = train_eval_iter.next()
cost_, acc_ = sess.run(
[model.cost, model.acc],
feed_dict={
model.inputs: x_train,
model.labels: y_train
})
train_cost += cost_ / num_eval_steps
train_acc += acc_ / num_eval_steps
test_cost = 0.0
test_acc = 0.0
for jj in six.moves.xrange(num_eval_steps):
x_train, y_train = test_iter.next()
cost_, acc_ = sess.run(
[model.cost, model.acc],
feed_dict={
model.inputs: x_train,
model.labels: y_train
})
test_cost += cost_ / num_eval_steps
test_acc += acc_ / num_eval_steps
learn_rate = sess.run(model.learn_rate)
if logging_fn is not None:
logging_fn(
ii + 1, {
'train_cost': train_cost,
'train_acc': train_acc,
'test_cost': test_cost,
'test_acc': test_acc,
'learn_rate': learn_rate
})
it.set_postfix(
ce='{:.3e}'.format(train_cost),
train_acc='{:.3f}%'.format(train_acc * 100),
test_acc='{:.3f}%'.format(test_acc * 100),
lr='{:.3e}'.format(learn_rate))
def preprocess_batch(batch):
if len(batch.x_train.shape) == 4:
x_train = np.expand_dims(batch.x_train, 0)
y_train = np.expand_dims(batch.y_train, 0)
x_test = np.expand_dims(batch.x_test, 0)
y_test = np.expand_dims(batch.y_test, 0)
if batch.x_unlabel is not None:
x_unlabel = np.expand_dims(batch.x_unlabel, 0)
else:
x_unlabel = None
if hasattr(batch, 'y_unlabel') and batch.y_unlabel is not None:
y_unlabel = np.expand_dims(batch.y_unlabel, 0)
else:
y_unlabel = None
return Episode(
x_train,
y_train,
x_test,
y_test,
x_unlabel=x_unlabel,
y_unlabel=y_unlabel,
y_train_str=batch.y_train_str,
y_test_str=batch.y_test_str)
else:
return batch
def get_nn_fit(x_train, y_train, x_test, k=1):
"""Fit a nearest neighbor classifier.
Args:
x_train: Training inputs. [N, H, W, C].
y_train: Training integer class labels. [N].
x_test: Test inputs. [N, H, W, C].
k: Int. Number of nearest neighbors to consider. Default 1.
Returns:
y_pred: Test prediction integer class labels. [N].
"""
nbatches = x_train.shape[0]
y_pred = np.zeros([x_test.shape[0], x_test.shape[1]])
for ii in six.moves.xrange(nbatches):
x_train_ = x_train.reshape([x_train[ii].shape[0], -1])
y_train_ = y_train.reshape([x_train[ii].shape[0]])
x_test_ = x_test.reshape([x_test[ii].shape[0], -1])
x_train_ = np.expand_dims(x_train_, 1)
x_test_ = np.expand_dims(x_test_, 0)
pairdist = ((x_train_ - x_test_)**2).sum(axis=-1)
assert k == 1, 'Only support k=1 for now'
min_idx = np.argmin(pairdist, axis=0)
sort_idx = np.argsort(pairdist, axis=0)
y_pred[ii] = y_train[ii, min_idx]
return y_pred
def run_nn(sess, meta_dataset, num_episodes=600, emb_model=None):
"""Nearest neighbor baselines."""
ncorr = 0
ntotal = 0
acc_list = []
for neval in tqdm(six.moves.xrange(num_episodes), ncols=0):
dataset = meta_dataset.next()
batch = dataset.next_batch()
batch = preprocess_batch(batch)
if emb_model is not None:
x_train = sess.run(
emb_model.feature,
feed_dict={
emb_model.inputs: np.squeeze(batch.x_train, axis=0)
})
x_test = sess.run(
emb_model.feature,
feed_dict={
emb_model.inputs: np.squeeze(batch.x_test, axis=0)
})
x_train = np.expand_dims(x_train, axis=0)
x_test = np.expand_dims(x_test, axis=0)
else:
x_train = batch.x_train
x_test = batch.x_test
y_pred = get_nn_fit(x_train, batch.y_train, x_test)
ncorr_ = np.equal(y_pred, batch.y_test).astype(np.float32)
ncorr += ncorr_.sum()
ntotal += y_pred.size
acc_list.append(ncorr_.sum() / float(y_pred.size))
meta_dataset.reset()
acc_list = np.array(acc_list)
print('Acc', ncorr / float(ntotal))
print('Std', acc_list.std())
print('95 CI', acc_list.std() * 1.96 / np.sqrt(float(num_episodes)))
def get_lr_fit(sess, model, x_train, y_train, x_test, num_steps=100):
"""Fit a multi-class logistic regression classifier.
Args:
x_train: [N, D]. Training data.
y_train: [N]. Training label, integer classes.
x_test: [M, D]. Test data.
Returns:
y_pred: [M]. Integer class prediction of test data.
"""
nbatches = x_train.shape[0]
y_pred = np.zeros([x_test.shape[0], x_test.shape[1]])
for ii in six.moves.xrange(nbatches):
x_train_ = x_train[ii].reshape([x_train[ii].shape[0], -1])
x_test_ = x_test[ii].reshape([x_test[ii].shape[0], -1])
y_train_ = y_train[ii]
# Reinitialize variables for a new episode.
var_to_init = list(
filter(lambda x: 'LRModel' in x.name, tf.global_variables()))
sess.run(tf.variables_initializer(var_to_init))
# Run LR training.
for step in six.moves.xrange(num_steps):
cost, acc, _ = sess.run(
[model.cost, model.acc, model.train_op],
feed_dict={
model.inputs: x_train_,
model.labels: y_train_
})
y_pred[ii] = np.argmax(
sess.run(model.prediction, feed_dict={
model.inputs: x_test_
}), axis=-1)
return y_pred
def run_lr(sess,
meta_dataset,
input_shape,
feature_shape,
num_episodes=600,
num_classes=5,
emb_model=None):
"""Logistic regression baselines."""
def get_lr_model(x_shape=[None, 28, 28, 1], learn_rate=1e-3):
with log.verbose_level(2):
x = tf.placeholder(tf.float32, x_shape, name='x')
y = tf.placeholder(tf.int64, [None], name='y')
with tf.variable_scope('LRModel'):
lr_model = LRModel(x, y, num_classes, learn_rate=learn_rate)
return lr_model
ncorr = 0
ntotal = 0
if emb_model is not None:
model = get_lr_model(x_shape=[None] + feature_shape, learn_rate=1e-2)
num_steps = 200
# I tried 2000 here doesn't help.
else:
model = get_lr_model(x_shape=[None] + input_shape, learn_rate=1e-3)
num_steps = 200
acc_list = []
for neval in tqdm(six.moves.xrange(num_episodes), ncols=0):
dataset = meta_dataset.next()
batch = dataset.next_batch()
batch = preprocess_batch(batch)
if emb_model is not None:
x_train = sess.run(
emb_model.feature,
feed_dict={
emb_model.inputs: np.squeeze(batch.x_train, axis=0)
})
x_test = sess.run(
emb_model.feature,
feed_dict={
emb_model.inputs: np.squeeze(batch.x_test, axis=0)
})
x_train = np.expand_dims(x_train, axis=0)
x_test = np.expand_dims(x_test, axis=0)
else:
x_train = batch.x_train
x_test = batch.x_test
y_pred = get_lr_fit(
sess, model, x_train, batch.y_train, x_test, num_steps=num_steps)
ncorr_ = np.equal(y_pred, batch.y_test).astype(np.float32)
ncorr += ncorr_.sum()
ntotal += y_pred.size
acc_list.append(ncorr_.sum() / float(y_pred.size))
meta_dataset.reset()
acc_list = np.array(acc_list)
print('Acc', ncorr / float(ntotal))
print('Std', acc_list.std())
print('95 CI', acc_list.std() * 1.96 / np.sqrt(float(num_episodes)))
def main():
# ------------------------------------------------------------------------
# Flags.
if FLAGS.num_test == -1 and (FLAGS.dataset == "tiered-imagenet" or
FLAGS.dataset == 'mini-imagenet'):
num_test = 5
else:
num_test = FLAGS.num_test
nclasses_train = FLAGS.nclasses_train
nclasses_eval = FLAGS.nclasses_eval
# Whether doing 90 degree augmentation.
if 'mini-imagenet' in FLAGS.dataset or 'tiered-imagenet' in FLAGS.dataset:
_aug_90 = False
input_shape = [84, 84, 3]
feature_shape = [1600]
else:
_aug_90 = True
input_shape = [28, 28, 1]
feature_shape = [64]
nshot = FLAGS.nshot
dataset = FLAGS.dataset
meta_train_dataset = get_dataset(
FLAGS.dataset,
'train',
nclasses_train,
nshot,
num_test=num_test,
aug_90=_aug_90,
num_unlabel=FLAGS.num_unlabel,
shuffle_episode=FLAGS.shuffle_episode,
seed=FLAGS.seed)
meta_val_dataset = get_dataset(
FLAGS.dataset,
'val',
nclasses_eval,
nshot,
num_test=num_test,
aug_90=_aug_90,
num_unlabel=FLAGS.num_unlabel,
shuffle_episode=FLAGS.shuffle_episode,
seed=FLAGS.seed)
meta_test_dataset = get_dataset(
FLAGS.dataset,
"test",
nclasses_eval,
nshot,
num_test=num_test,
aug_90=_aug_90,
num_unlabel=FLAGS.num_unlabel,
shuffle_episode=FLAGS.shuffle_episode,
seed=FLAGS.seed)
# ------------------------------------------------------------------------
# Get embedding model.
def get_emb_model(config, dataset, is_training=True):
log.info('Building embedding model')
with log.verbose_level(2):
x = tf.placeholder(
tf.float32, [None, config.height, config.width, config.num_channel],
name='x')
y = tf.placeholder(tf.int64, [None], name='y')
with tf.variable_scope('EmbeddingModel'):
emb_model = SupervisedModel(
config, x, y, dataset.num_classes, is_training=is_training)
log.info('Training embedding model in fully supervised mode')
return emb_model
# Get supervised training logging function.
def get_logging_fn(sess, log_folder):
exp_logger = get_exp_logger(sess, log_folder)
def _logging_fn(niter, data):
# log.info(
# 'Step {} Train Cost {:.3e} Train Acc {:.3f} Test Cost {:.3e} Test Acc {:.3f}'.
# format(niter, data['train_cost'], data['train_acc'] * 100.0, data[
# 'test_cost'], data['test_acc'] * 100.0))
for key in data:
exp_logger.log(key, niter, data[key])
exp_logger.flush()
return _logging_fn
# ------------------------------------------------------------------------
# Pretrain an embedding model with train dataset (for new version of the paper).
ckpt_train = os.path.join('results', dataset, 'supv_emb_model_train',
'model.ckpt')
log_folder_train = os.path.join('results', dataset, 'supv_emb_model_train')
ckpt_dir = os.path.dirname(ckpt_train)
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
if not os.path.exists(ckpt_train + '.meta'):
with tf.Graph().as_default(), tf.Session() as sess:
config = get_config(dataset, 'basic-pretrain')
emb_model_train = get_emb_model(config, meta_train_dataset)
logging_fn = get_logging_fn(sess, log_folder_train)
supervised_pretrain(
sess,
emb_model_train,
meta_train_dataset,
num_steps=config.max_train_steps,
logging_fn=logging_fn)
# Save model to a checkpoint.
saver = tf.train.Saver()
saver.save(sess, ckpt_train)
else:
log.info('Checkpoint found. Skip pretraining.')
# ------------------------------------------------------------------------
# Run nearest neighbor in the pixel space.
with tf.Graph().as_default(), tf.Session() as sess:
log.info('Nearest neighbor baseline in the pixel space')
run_nn(sess, meta_test_dataset, num_episodes=FLAGS.num_eval_episode)
# ------------------------------------------------------------------------
# Run logistic regression in the pixel space.
with tf.Graph().as_default(), tf.Session() as sess:
log.info('Logistic regression in the pixel space')
run_lr(
sess,
meta_test_dataset,
input_shape,
feature_shape,
num_episodes=FLAGS.num_eval_episode)
# ------------------------------------------------------------------------
# Run nearest neighbor in the embedding space, using train model.
with tf.Graph().as_default(), tf.Session() as sess:
log.info(
'Nearest neighbor baseline in feature space, pretrained features, train'
)
config = get_config(dataset, 'basic-pretrain')
emb_model_train = get_emb_model(
config, meta_train_dataset, is_training=False)
saver = tf.train.Saver()
saver.restore(sess, ckpt_train)
run_nn(
sess,
meta_test_dataset,
emb_model=emb_model_train,
num_episodes=FLAGS.num_eval_episode)
# ------------------------------------------------------------------------
# Run nearest neighbor in the embedding space, using train model, with random features.
with tf.Graph().as_default(), tf.Session() as sess:
log.info('Nearest neighbor baseline in feature space, random features')
config = get_config(dataset, 'basic-pretrain')
emb_model_train = get_emb_model(
config, meta_train_dataset, is_training=False)
sess.run(tf.global_variables_initializer())
run_nn(
sess,
meta_test_dataset,
emb_model=emb_model_train,
num_episodes=FLAGS.num_eval_episode)
# ------------------------------------------------------------------------
# Run logistic regression in the embedding space, using train model.
with tf.Graph().as_default(), tf.Session() as sess:
log.info(
'Logistic regression in the feature space, pretrained features, train')
config = get_config(dataset, 'basic-pretrain')
emb_model_train = get_emb_model(
config, meta_train_dataset, is_training=False)
saver = tf.train.Saver()
saver.restore(sess, ckpt_train)
run_lr(
sess,
meta_test_dataset,
input_shape,
feature_shape,
num_episodes=FLAGS.num_eval_episode,
emb_model=emb_model_train)
# ------------------------------------------------------------------------
# Run logistic regression in the embedding space, using train model, with random features.
with tf.Graph().as_default(), tf.Session() as sess:
log.info('Logistic regression in the feature space, random features')
config = get_config(dataset, 'basic-pretrain')
emb_model_train = get_emb_model(
config, meta_train_dataset, is_training=False)
sess.run(tf.global_variables_initializer())
run_lr(
sess,
meta_test_dataset,
input_shape,
feature_shape,
num_episodes=FLAGS.num_eval_episode,
emb_model=emb_model_train)
if __name__ == '__main__':
flags = tf.flags
FLAGS = tf.flags.FLAGS
flags.DEFINE_bool("aug", True, "Whether perform 90 degree data augmentation")
flags.DEFINE_bool("shuffle_episode", False,
"Whether to shuffle the sequence order")
flags.DEFINE_bool("final_eval", False, "Final eval for tieredImageNet")
flags.DEFINE_integer("nclasses_eval", 5, "Number of classes for testing")
flags.DEFINE_integer("nclasses_train", 5, "Number of classes for training")
flags.DEFINE_integer("nshot", 1, "nshot")
flags.DEFINE_integer("num_eval_episode", 600, "Number of evaluation episodes")
flags.DEFINE_integer("num_test", -1, "Number of test images per episode")
flags.DEFINE_integer("num_unlabel", 5, "Number of unlabeled for training")
flags.DEFINE_integer("seed", 0, "Random seed")
flags.DEFINE_string("dataset", "omniglot", "Dataset name")
main()
| 32.96119
| 94
| 0.61953
|
35db33b7042d4213dcd44f6dca595b054394fe60
| 955
|
py
|
Python
|
tensor2tensor/problems_test.py
|
repoloper/tensor2tensor
|
2fd91d34b8e6d79599c0612e446175174e838b9d
|
[
"Apache-2.0"
] | 61
|
2018-06-23T01:40:58.000Z
|
2021-06-07T09:33:38.000Z
|
tensor2tensor/problems_test.py
|
zhaopufeng/tensor2tensor
|
7bb67a18e1e4a0cddd1d61c65c937f14c1c124e3
|
[
"Apache-2.0"
] | 5
|
2020-02-06T01:01:43.000Z
|
2022-02-09T23:28:40.000Z
|
tensor2tensor/problems_test.py
|
zhaopufeng/tensor2tensor
|
7bb67a18e1e4a0cddd1d61c65c937f14c1c124e3
|
[
"Apache-2.0"
] | 8
|
2018-10-23T13:10:12.000Z
|
2019-07-31T05:53:08.000Z
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""tensor2tensor.problems test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor import problems
import tensorflow as tf
class ProblemsTest(tf.test.TestCase):
def testImport(self):
self.assertIsNotNone(problems)
if __name__ == "__main__":
tf.test.main()
| 29.84375
| 74
| 0.772775
|
ca4875ef9069beca4d8412d51511232016255c85
| 11,762
|
py
|
Python
|
test_app/migrations/0001_initial.py
|
anfema/wagtail_to_ion
|
2c042d220f4421f8a277c4bfcbdd65f89d76f4c7
|
[
"MIT"
] | 1
|
2022-02-22T08:13:16.000Z
|
2022-02-22T08:13:16.000Z
|
test_app/migrations/0001_initial.py
|
anfema/wagtail_to_ion
|
2c042d220f4421f8a277c4bfcbdd65f89d76f4c7
|
[
"MIT"
] | 33
|
2020-11-05T10:30:27.000Z
|
2022-03-11T12:23:25.000Z
|
test_app/migrations/0001_initial.py
|
anfema/wagtail_to_ion
|
2c042d220f4421f8a277c4bfcbdd65f89d76f4c7
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.11 on 2021-01-04 13:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
import wagtail.core.models
import wagtail.images.models
import wagtail.search.index
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('taggit', '0003_taggeditem_add_unique_index'),
('contenttypes', '0002_remove_content_type_name'),
('wagtailcore', '0045_assign_unlock_grouppagepermission'),
]
operations = [
migrations.CreateModel(
name='IonCollection',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='IonDocument',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='title')),
('file', models.FileField(upload_to='documents', verbose_name='file')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('file_size', models.PositiveIntegerField(editable=False, null=True)),
('file_hash', models.CharField(blank=True, editable=False, max_length=40)),
('checksum', models.CharField(max_length=255)),
('mime_type', models.CharField(max_length=128)),
('include_in_archive', models.BooleanField(default=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('collection', models.ForeignKey(default=wagtail.core.models.get_root_collection_id, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Collection', verbose_name='collection')),
('tags', taggit.managers.TaggableManager(blank=True, help_text=None, through='taggit.TaggedItem', to='taggit.Tag', verbose_name='tags')),
('uploaded_by_user', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='uploaded by user')),
],
options={
'abstract': False,
},
bases=(wagtail.search.index.Indexed, models.Model),
),
migrations.CreateModel(
name='IonImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='title')),
('file', models.ImageField(height_field='height', upload_to=wagtail.images.models.get_upload_to, verbose_name='file', width_field='width')),
('width', models.IntegerField(editable=False, verbose_name='width')),
('height', models.IntegerField(editable=False, verbose_name='height')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created at')),
('focal_point_x', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_y', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_width', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_height', models.PositiveIntegerField(blank=True, null=True)),
('file_size', models.PositiveIntegerField(editable=False, null=True)),
('file_hash', models.CharField(blank=True, editable=False, max_length=40)),
('checksum', models.CharField(max_length=255)),
('mime_type', models.CharField(max_length=128)),
('include_in_archive', models.BooleanField(default=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('collection', models.ForeignKey(default=wagtail.core.models.get_root_collection_id, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Collection', verbose_name='collection')),
('tags', taggit.managers.TaggableManager(blank=True, help_text=None, through='taggit.TaggedItem', to='taggit.Tag', verbose_name='tags')),
('uploaded_by_user', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='uploaded by user')),
],
options={
'abstract': False,
},
bases=(wagtail.search.index.Indexed, models.Model),
),
migrations.CreateModel(
name='IonLanguage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('is_default', models.BooleanField(default=False)),
('is_rtl', models.BooleanField(default=False)),
('code', models.CharField(max_length=32)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='IonMedia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='title')),
('file', models.FileField(upload_to='media', verbose_name='file')),
('type', models.CharField(choices=[('audio', 'Audio file'), ('video', 'Video file')], max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('checksum', models.CharField(max_length=255)),
('mime_type', models.CharField(max_length=128)),
('thumbnail', models.ImageField(blank=True, null=True, upload_to='media_thumbnails', verbose_name='thumbnail')),
('duration', models.PositiveIntegerField(blank=True, help_text='Duration in seconds', null=True, verbose_name='duration')),
('width', models.PositiveIntegerField(blank=True, null=True, verbose_name='width')),
('height', models.PositiveIntegerField(blank=True, null=True, verbose_name='height')),
('thumbnail_checksum', models.CharField(max_length=255)),
('thumbnail_mime_type', models.CharField(max_length=128)),
('include_in_archive', models.BooleanField(default=False, help_text="If enabled, the file will be included in the ION archive tar file and can increase the archive's size significantly.")),
('updated_at', models.DateTimeField(auto_now=True)),
('collection', models.ForeignKey(default=wagtail.core.models.get_root_collection_id, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Collection', verbose_name='collection')),
('tags', taggit.managers.TaggableManager(blank=True, help_text=None, through='taggit.TaggedItem', to='taggit.Tag', verbose_name='tags')),
('uploaded_by_user', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='uploaded by user')),
],
options={
'abstract': False,
},
bases=(wagtail.search.index.Indexed, models.Model),
),
migrations.CreateModel(
name='TestPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('document_field', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='test_app.IonDocument')),
('image_field', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='test_app.IonImage')),
('media_field', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='test_app.IonMedia')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='ContentTypeDescription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField()),
('example_image', models.ImageField(blank=True, upload_to='')),
('content_type', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to='contenttypes.ContentType')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='IonRendition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('filter_spec', models.CharField(db_index=True, max_length=255)),
('file', models.ImageField(height_field='height', upload_to=wagtail.images.models.get_rendition_upload_to, width_field='width')),
('width', models.IntegerField(editable=False)),
('height', models.IntegerField(editable=False)),
('focal_point_key', models.CharField(blank=True, default='', editable=False, max_length=16)),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='renditions', to='test_app.IonImage')),
],
options={
'abstract': False,
'unique_together': {('image', 'filter_spec', 'focal_point_key')},
},
),
migrations.CreateModel(
name='IonMediaRendition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(choices=[('720p', '720p'), ('1080p', '1080p')], max_length=128)),
('file', models.FileField(blank=True, null=True, upload_to='media_renditions', verbose_name='file')),
('thumbnail', models.FileField(blank=True, null=True, upload_to='media_thumbnails', verbose_name='thumbnail')),
('width', models.PositiveIntegerField(blank=True, null=True, verbose_name='width')),
('height', models.PositiveIntegerField(blank=True, null=True, verbose_name='height')),
('transcode_finished', models.BooleanField(default=False)),
('transcode_errors', models.TextField(blank=True, null=True)),
('checksum', models.CharField(default='null:', max_length=255)),
('thumbnail_checksum', models.CharField(default='null:', max_length=255)),
('media_item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='renditions', to='test_app.IonMedia')),
],
options={
'abstract': False,
'unique_together': {('name', 'media_item')},
},
),
]
| 63.236559
| 221
| 0.619793
|
c64a75e7e921238e60fc6192289592dae1110569
| 1,748
|
py
|
Python
|
src/pages/models.py
|
Nnonexistent/chemphys
|
d2f34364d006a494bb965bb83d1967d7dd56f9ba
|
[
"MIT"
] | null | null | null |
src/pages/models.py
|
Nnonexistent/chemphys
|
d2f34364d006a494bb965bb83d1967d7dd56f9ba
|
[
"MIT"
] | 19
|
2015-03-08T08:46:09.000Z
|
2019-10-01T05:16:43.000Z
|
src/pages/models.py
|
Nnonexistent/chemphys
|
d2f34364d006a494bb965bb83d1967d7dd56f9ba
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from utils.localized import BaseLocalizedObject, BaseLocalizedContent
class Page(BaseLocalizedObject):
order = models.PositiveIntegerField(verbose_name=_(u'Order'), blank=True, default=0)
url = models.SlugField(db_index=True, verbose_name=_(u'URL part'), unique=True)
in_menu = models.BooleanField(default=False, verbose_name=_(u'Visible in menu'))
class Meta:
ordering = ['order']
verbose_name = _(u'Page')
verbose_name_plural = _(u'Pages')
def __unicode__(self):
return self.title or self.url
def save(self, *args, **kwargs):
if not self.order:
qs = self.__class__.objects.all()
if self.pk:
qs = qs.exclude(pk=self.pk)
self.order = min(2 ** 31 - 1, 1 + qs.count())
super(Page, self).save(*args, **kwargs)
@models.permalink
def get_absolute_url(self):
return 'pages_page', [self.url]
@property
def title(self):
return self.get_localized('title')
@property
def content(self):
return self.get_localized('content')
class LocalizedPageContent(BaseLocalizedContent):
page = models.ForeignKey(Page, verbose_name=Page._meta.verbose_name)
title = models.CharField(max_length=100, verbose_name=_(u'Title'))
content = models.TextField(default='', blank=True, verbose_name=_(u'Content'))
class Meta:
ordering = ['lang']
verbose_name = _(u'Localized page content')
verbose_name_plural = _(u'Localized page contents')
unique_together = [('page', 'lang')]
def __unicode__(self):
return self.title
| 31.214286
| 88
| 0.661327
|
30f4def960558c74b8676f8da6d158bdcd0cfb5d
| 2,749
|
py
|
Python
|
observations/r/channing.py
|
hajime9652/observations
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
[
"Apache-2.0"
] | 199
|
2017-07-24T01:34:27.000Z
|
2022-01-29T00:50:55.000Z
|
observations/r/channing.py
|
hajime9652/observations
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
[
"Apache-2.0"
] | 46
|
2017-09-05T19:27:20.000Z
|
2019-01-07T09:47:26.000Z
|
observations/r/channing.py
|
hajime9652/observations
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
[
"Apache-2.0"
] | 45
|
2017-07-26T00:10:44.000Z
|
2022-03-16T20:44:59.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def channing(path):
"""Channing House Data
The `channing` data frame has 462 rows and 5 columns.
Channing House is a retirement centre in Palo Alto, California. These
data were collected between the opening of the house in 1964 until July
1, 1975. In that time 97 men and 365 women passed through the centre.
For each of these, their age on entry and also on leaving or death was
recorded. A large number of the observations were censored mainly due to
the resident being alive on July 1, 1975 when the data was collected.
Over the time of the study 130 women and 46 men died at Channing House.
Differences between the survival of the sexes, taking age into account,
was one of the primary concerns of this study.
This data frame contains the following columns:
`sex`
A factor for the sex of each resident (`"Male"` or `"Female"`).
`entry`
The residents age (in months) on entry to the centre
`exit`
The age (in months) of the resident on death, leaving the centre or
July 1, 1975 whichever event occurred first.
`time`
The length of time (in months) that the resident spent at Channing
House. (`time=exit-entry`)
`cens`
The indicator of right censoring. 1 indicates that the resident died
at Channing House, 0 indicates that they left the house prior to
July 1, 1975 or that they were still alive and living in the centre
at that date.
The data were obtained from
Hyde, J. (1980) Testing survival with incomplete observations.
*Biostatistics Casebook*. R.G. Miller, B. Efron, B.W. Brown and L.E.
Moses (editors), 31–46. John Wiley.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `channing.csv`.
Returns:
Tuple of np.ndarray `x_train` with 462 rows and 6 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'channing.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/boot/channing.csv'
maybe_download_and_extract(path, url,
save_file_name='channing.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| 33.120482
| 74
| 0.702437
|
9a3696ef79fbf67747fde98a9be47a792125ae01
| 2,303
|
py
|
Python
|
CarMazeHw1/solver.py
|
linhthi/int3401
|
7a785067da21d5e70421b26db0786375bf1254bc
|
[
"MIT"
] | null | null | null |
CarMazeHw1/solver.py
|
linhthi/int3401
|
7a785067da21d5e70421b26db0786375bf1254bc
|
[
"MIT"
] | null | null | null |
CarMazeHw1/solver.py
|
linhthi/int3401
|
7a785067da21d5e70421b26db0786375bf1254bc
|
[
"MIT"
] | 3
|
2022-03-07T04:22:16.000Z
|
2022-03-24T11:37:46.000Z
|
from environment import CarMazeEnv
from queue import Queue
from functools import partial
class Solver:
def __init__(self, env: CarMazeEnv) -> None:
self.env = env
self.found = None
def act_and_add_state(self, act, prev_state, cur_cost, is_ucf=False):
x, y, d, v, step_cost = act(*prev_state)
if cur_cost == -1:
prev_state = None # Init state
if x < 0 or y < 0: # Invalid action
return None
cur_cost += 1
s = (x, y, d, v)
# Fast stop
if self.env.goal == (x, y) and v == 0:
self.path[s] = prev_state
self.found = cur_cost, s
return None
if s in self.seen:
return None
# Sure min path to s
p = self.path.get(s, None)
if p is not None:
return None
self.path[s] = prev_state
return x, y, d, v, cur_cost
def explore(self, s, cost, is_ucf):
if s in self.seen:
return -1, None
self.seen.add(s)
if self.found is not None: # Fast stop
return self.found
step_fn = self.step_bfs
# Action
v = s[-1]
if v == 0:
step_fn(self.env.turn_left, s, cost)
step_fn(self.env.turn_right, s, cost)
step_fn(self.env.speed_up, s, cost)
else:
step_fn(self.env.no_action, s, cost)
step_fn(self.env.slow_down, s, cost)
if v < self.env.vmax:
step_fn(self.env.speed_up, s, cost)
return -1, None
def step_bfs(self, act, s, total_cost):
ret = self.act_and_add_state(act, s, total_cost, is_ucf=False)
if ret is None:
return # Already explored or invalid action
x, y, d, v, cur_cost = ret
new_s = (x, y, d, v)
self.Q.put(partial(self.explore, new_s, cur_cost, False))
def solve_bfs(self):
self.Q = Queue()
self.seen = set()
self.path = dict()
# self.mins = dict()
x0, y0 = self.env.start
self.step_bfs(self.env.no_action, (x0, y0, 0, 0), -1)
ans = -1
last_state = None
while ans == -1 and not self.Q.empty():
ans, last_state = self.Q.get()()
return ans, last_state
| 29.151899
| 73
| 0.52627
|
47970ea8f304b3ed51d13b598a6c44e91b0e068a
| 6,301
|
py
|
Python
|
utils/dataloader/dataloader_multiview_blender.py
|
DrZedd42/DIB-R
|
611f69dce00e75f65b37722120010e2da46fc1bf
|
[
"MIT"
] | 607
|
2019-08-08T02:41:48.000Z
|
2022-03-29T07:53:11.000Z
|
utils/dataloader/dataloader_multiview_blender.py
|
DrZedd42/DIB-R
|
611f69dce00e75f65b37722120010e2da46fc1bf
|
[
"MIT"
] | 23
|
2019-08-17T15:52:01.000Z
|
2021-11-10T03:32:41.000Z
|
utils/dataloader/dataloader_multiview_blender.py
|
DrZedd42/DIB-R
|
611f69dce00e75f65b37722120010e2da46fc1bf
|
[
"MIT"
] | 108
|
2019-11-09T22:30:27.000Z
|
2022-03-23T02:08:27.000Z
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
from __future__ import division
import os
import numpy as np
import cv2
from torch.utils.data import Dataset, DataLoader
#######################################################
class DataProvider(Dataset):
def __init__(self, file_list,
imsz=-1,
viewnum=1,
mode='train',
datadebug=False,
classes=None,
data_folder=None):
self.mode = mode
self.datadebug = datadebug
self.imsz = imsz
self.viewum = viewnum
assert self.viewum >= 1
self.folder = data_folder
self.camfolder = data_folder
print(self.folder)
self.pkl_list = []
with open(file_list, 'r') as f:
while True:
line = f.readline().strip()
if not line:
break
self.pkl_list.append(line)
if not classes is None:
self.filter_class(classes)
self.imnum = len(self.pkl_list)
print('imnum {}'.format(self.imnum))
self.imnum = len(self.pkl_list)
print(self.pkl_list[0])
print(self.pkl_list[-1])
print('imnum {}'.format(self.imnum))
def __len__(self):
return self.imnum
def __getitem__(self, idx):
return self.prepare_instance(idx)
def filter_class(self, classes):
new_pkl_list = []
for pkl in self.pkl_list:
for cls in classes:
if cls in pkl:
new_pkl_list.append(pkl)
break
self.pkl_list = new_pkl_list
def load_im_cam(self, pkl_path, catagory, md5name, num):
imname = '%s/%s/%s/%d.png' % (self.folder, catagory, md5name, num)
img = cv2.imread(imname, cv2.IMREAD_UNCHANGED)
img = cv2.resize(img, (self.imsz, self.imsz))
im_hxwx4 = img.astype('float32') / 255.0
rotntxname = '%s/%s/%s/%d.npy' % (self.camfolder, catagory, md5name, num)
rotmtx_4x4 = np.load(rotntxname).astype(np.float32)
rotmx = rotmtx_4x4[:3, :3]
transmtx = rotmtx_4x4[:3, 3:4]
transmtx = -np.matmul(rotmx.T, transmtx)
renderparam = (rotmx, transmtx)
return im_hxwx4, renderparam
def prepare_instance(self, idx):
re = {}
re['valid'] = True
pkl_path = self.pkl_list[idx]
_, fname = os.path.split(pkl_path)
fname, _ = os.path.splitext(fname)
catagory, md5name, numname = fname.split('_')
re['cate'] = catagory
re['md5'] = md5name
try:
if self.viewum == 1:
num = int(numname)
im_hxwx4, renderparam = self.load_im_cam(pkl_path, catagory, md5name, num)
i = 0
re['view%d' % i] = {}
re['view%d' % i]['camrot'] = renderparam[0]
re['view%d' % i]['campos'] = renderparam[1]
re['view%d' % i]['im'] = im_hxwx4
re['view%d' % i]['ori_im'] = np.copy(im_hxwx4)
re['view%d' % i]['num'] = num
else:
for i in range(self.viewum):
# 24 views in total
num = np.random.randint(24)
im_hxwx4, renderparam = self.load_im_cam(pkl_path, catagory, md5name, num)
re['view%d' % i] = {}
re['view%d' % i]['camrot'] = renderparam[0]
re['view%d' % i]['campos'] = renderparam[1][:, 0]
re['view%d' % i]['im'] = im_hxwx4
re['view%d' % i]['ori_im'] = np.copy(im_hxwx4)
re['view%d' % i]['num'] = num
except:
re['valid'] = False
return re
return re
def collate_fn(batch_list):
collated = {}
batch_list = [data for data in batch_list if data['valid']]
if len(batch_list) == 0:
return None
keys = ['cate', 'md5']
for key in keys:
val = [item[key] for item in batch_list]
collated[key] = val
viewnum = len(batch_list[0].keys()) - 3
keys = ['im', 'camrot', 'campos', 'num']
for i in range(viewnum):
collated['view%d' % i] = {}
for key in keys:
val = [item['view%d' % i][key] for item in batch_list]
val = np.stack(val, axis=0)
collated['view%d' % i][key] = val
return collated
def get_data_loaders(filelist, imsz, viewnum, mode, bs, numworkers, classes=None, data_folder=None):
print('Building dataloaders')
dataset_train = DataProvider(filelist, imsz, viewnum,
mode=mode, datadebug=False, classes=classes, data_folder=data_folder)
if mode == 'test':
shuffle = False
else:
shuffle = True
train_loader = DataLoader(dataset_train, batch_size=bs,
shuffle=shuffle, num_workers=numworkers, collate_fn=collate_fn)
print('train num {}'.format(len(dataset_train)))
print('train iter'.format(len(train_loader)))
return train_loader
| 35.005556
| 102
| 0.565466
|
5a59255e6e651fbf080f0a9f574308f0bf255027
| 10,810
|
py
|
Python
|
cfgov/ask_cfpb/migrations/0045_remove_category_sidebar.py
|
adebisi-aden/consumerfinance.gov
|
8c0f5afac341823c59f73b0c6bd60592e0f5eaca
|
[
"CC0-1.0"
] | 37
|
2020-08-18T19:52:39.000Z
|
2022-03-23T08:08:41.000Z
|
cfgov/ask_cfpb/migrations/0045_remove_category_sidebar.py
|
adebisi-aden/consumerfinance.gov
|
8c0f5afac341823c59f73b0c6bd60592e0f5eaca
|
[
"CC0-1.0"
] | 338
|
2020-08-14T20:46:36.000Z
|
2022-03-31T20:49:32.000Z
|
cfgov/ask_cfpb/migrations/0045_remove_category_sidebar.py
|
adebisi-aden/consumerfinance.gov
|
8c0f5afac341823c59f73b0c6bd60592e0f5eaca
|
[
"CC0-1.0"
] | 14
|
2020-10-21T15:27:03.000Z
|
2022-03-17T03:16:36.000Z
|
# Generated by Django 2.2.24 on 2021-08-03 13:53
from django.db import migrations
import v1.atomic_elements.molecules
import v1.blocks
import v1.models.snippets
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.snippets.blocks
class Migration(migrations.Migration):
dependencies = [
('ask_cfpb', '0044_add_aria_label_to_hyperlinks'),
]
operations = [
migrations.AlterField(
model_name='answerpage',
name='sidebar',
field=wagtail.core.fields.StreamField([('call_to_action', wagtail.core.blocks.StructBlock([('slug_text', wagtail.core.blocks.CharBlock(required=False)), ('paragraph_text', wagtail.core.blocks.RichTextBlock(required=False)), ('button', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('aria_label', wagtail.core.blocks.CharBlock(help_text='Add an ARIA label if the link text does not describe the destination of the link (e.g. has ambiguous text like "Learn more" that is not descriptive on its own).', required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False)), ('size', wagtail.core.blocks.ChoiceBlock(choices=[('regular', 'Regular'), ('large', 'Large Primary')]))]))])), ('related_links', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(required=False)), ('paragraph', wagtail.core.blocks.RichTextBlock(required=False)), ('links', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('aria_label', wagtail.core.blocks.CharBlock(help_text='Add an ARIA label if the link text does not describe the destination of the link (e.g. has ambiguous text like "Learn more" that is not descriptive on its own).', required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False))])))])), ('related_metadata', wagtail.core.blocks.StructBlock([('slug', wagtail.core.blocks.CharBlock(max_length=100)), ('content', wagtail.core.blocks.StreamBlock([('text', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(max_length=100)), ('blob', wagtail.core.blocks.RichTextBlock())], icon='pilcrow')), ('list', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(max_length=100)), ('links', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('aria_label', wagtail.core.blocks.CharBlock(help_text='Add an ARIA label if the link text does not describe the destination of the link (e.g. has ambiguous text like "Learn more" that is not descriptive on its own).', required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False))])))], icon='list-ul')), ('date', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(max_length=100)), ('date', wagtail.core.blocks.DateBlock())], icon='date')), ('topics', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(default='Topics', max_length=100)), ('show_topics', wagtail.core.blocks.BooleanBlock(default=True, required=False))], icon='tag'))])), ('is_half_width', wagtail.core.blocks.BooleanBlock(default=False, required=False))])), ('email_signup', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(default='Stay informed', required=False)), ('default_heading', wagtail.core.blocks.BooleanBlock(default=True, help_text='If selected, heading will be styled as an H5 with green top rule. Deselect to style header as H3.', label='Default heading style', required=False)), ('text', wagtail.core.blocks.CharBlock(help_text='Write a sentence or two about what kinds of emails the user is signing up for, how frequently they will be sent, etc.', required=False)), ('gd_code', wagtail.core.blocks.CharBlock(help_text='Code for the topic (i.e., mailing list) you want people who submit this form to subscribe to. Format: USCFPB_###', label='GovDelivery code', required=False)), ('disclaimer_page', wagtail.core.blocks.PageChooserBlock(help_text='Choose the page that the "See Privacy Act statement" link should go to. If in doubt, use "Generic Email Sign-Up Privacy Act Statement".', label='Privacy Act statement', required=False))])), ('sidebar_contact', wagtail.core.blocks.StructBlock([('contact', wagtail.snippets.blocks.SnippetChooserBlock('v1.Contact')), ('has_top_rule_line', wagtail.core.blocks.BooleanBlock(default=False, help_text='Add a horizontal rule line to top of contact block.', required=False))])), ('rss_feed', v1.atomic_elements.molecules.RSSFeed()), ('social_media', wagtail.core.blocks.StructBlock([('is_share_view', wagtail.core.blocks.BooleanBlock(default=True, help_text='If unchecked, social media icons will link users to official CFPB accounts. Do not fill in any further fields.', label='Desired action: share this page', required=False)), ('blurb', wagtail.core.blocks.CharBlock(default="Look what I found on the CFPB's site!", help_text='Sets the tweet text, email subject line, and LinkedIn post text.', required=False)), ('twitter_text', wagtail.core.blocks.CharBlock(help_text='(Optional) Custom text for Twitter shares. If blank, will default to value of blurb field above.', max_length=100, required=False)), ('twitter_related', wagtail.core.blocks.CharBlock(help_text='(Optional) A comma-separated list of accounts related to the content of the shared URL. Do not enter the @ symbol. If blank, it will default to just "cfpb".', required=False)), ('twitter_hashtags', wagtail.core.blocks.CharBlock(help_text='(Optional) A comma-separated list of hashtags to be appended to default tweet text.', required=False)), ('twitter_lang', wagtail.core.blocks.CharBlock(help_text='(Optional) Loads text components in the specified language, if other than English. E.g., use "es" for Spanish. See https://dev.twitter.com/web/overview/languages for a list of supported language codes.', required=False)), ('email_title', wagtail.core.blocks.CharBlock(help_text='(Optional) Custom subject for email shares. If blank, will default to value of blurb field above.', required=False)), ('email_text', wagtail.core.blocks.CharBlock(help_text='(Optional) Custom text for email shares. If blank, will default to "Check out this page from the CFPB".', required=False)), ('email_signature', wagtail.core.blocks.CharBlock(help_text='(Optional) Adds a custom signature line to email shares. ', required=False)), ('linkedin_title', wagtail.core.blocks.CharBlock(help_text='(Optional) Custom title for LinkedIn shares. If blank, will default to value of blurb field above.', required=False)), ('linkedin_text', wagtail.core.blocks.CharBlock(help_text='(Optional) Custom text for LinkedIn shares.', required=False))])), ('reusable_text', v1.blocks.ReusableTextChooserBlock(v1.models.snippets.ReusableText))], blank=True),
),
migrations.AlterField(
model_name='articlepage',
name='sidebar',
field=wagtail.core.fields.StreamField([('call_to_action', wagtail.core.blocks.StructBlock([('slug_text', wagtail.core.blocks.CharBlock(required=False)), ('paragraph_text', wagtail.core.blocks.RichTextBlock(required=False)), ('button', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('aria_label', wagtail.core.blocks.CharBlock(help_text='Add an ARIA label if the link text does not describe the destination of the link (e.g. has ambiguous text like "Learn more" that is not descriptive on its own).', required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False)), ('size', wagtail.core.blocks.ChoiceBlock(choices=[('regular', 'Regular'), ('large', 'Large Primary')]))]))])), ('related_links', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(required=False)), ('paragraph', wagtail.core.blocks.RichTextBlock(required=False)), ('links', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('aria_label', wagtail.core.blocks.CharBlock(help_text='Add an ARIA label if the link text does not describe the destination of the link (e.g. has ambiguous text like "Learn more" that is not descriptive on its own).', required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False))])))])), ('related_metadata', wagtail.core.blocks.StructBlock([('slug', wagtail.core.blocks.CharBlock(max_length=100)), ('content', wagtail.core.blocks.StreamBlock([('text', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(max_length=100)), ('blob', wagtail.core.blocks.RichTextBlock())], icon='pilcrow')), ('list', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(max_length=100)), ('links', wagtail.core.blocks.ListBlock(wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.CharBlock(required=False)), ('aria_label', wagtail.core.blocks.CharBlock(help_text='Add an ARIA label if the link text does not describe the destination of the link (e.g. has ambiguous text like "Learn more" that is not descriptive on its own).', required=False)), ('url', wagtail.core.blocks.CharBlock(default='/', required=False))])))], icon='list-ul')), ('date', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(max_length=100)), ('date', wagtail.core.blocks.DateBlock())], icon='date')), ('topics', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(default='Topics', max_length=100)), ('show_topics', wagtail.core.blocks.BooleanBlock(default=True, required=False))], icon='tag'))])), ('is_half_width', wagtail.core.blocks.BooleanBlock(default=False, required=False))])), ('email_signup', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock(default='Stay informed', required=False)), ('default_heading', wagtail.core.blocks.BooleanBlock(default=True, help_text='If selected, heading will be styled as an H5 with green top rule. Deselect to style header as H3.', label='Default heading style', required=False)), ('text', wagtail.core.blocks.CharBlock(help_text='Write a sentence or two about what kinds of emails the user is signing up for, how frequently they will be sent, etc.', required=False)), ('gd_code', wagtail.core.blocks.CharBlock(help_text='Code for the topic (i.e., mailing list) you want people who submit this form to subscribe to. Format: USCFPB_###', label='GovDelivery code', required=False)), ('disclaimer_page', wagtail.core.blocks.PageChooserBlock(help_text='Choose the page that the "See Privacy Act statement" link should go to. If in doubt, use "Generic Email Sign-Up Privacy Act Statement".', label='Privacy Act statement', required=False))])), ('reusable_text', v1.blocks.ReusableTextChooserBlock(v1.models.snippets.ReusableText))], blank=True),
),
]
| 360.333333
| 6,343
| 0.754117
|
51b693c303c20c715f185b2ec214b5500eb98d44
| 2,288
|
py
|
Python
|
Priv-esc/proc_privileges_monitor.py
|
crake7/Network-Pen-Test-Tools
|
80ddce5a9429611fbf5c313498979c624188d6c5
|
[
"MIT"
] | 6
|
2021-04-17T12:52:46.000Z
|
2021-07-09T23:41:26.000Z
|
Priv-esc/proc_privileges_monitor.py
|
crake7/Network-Pen-Test-Tools
|
80ddce5a9429611fbf5c313498979c624188d6c5
|
[
"MIT"
] | null | null | null |
Priv-esc/proc_privileges_monitor.py
|
crake7/Network-Pen-Test-Tools
|
80ddce5a9429611fbf5c313498979c624188d6c5
|
[
"MIT"
] | null | null | null |
import os
import sys
import win32api
import win32con
import win32security
import wmi
def get_process_privileges(pid):
''' Automatically retrieves the enabled privileges on the processes we monitor'''
try:
hproc = win32api.OpenProcess(
win32con.PROCESS_QUERY_INFORMATION, False, pid # PID used to obtain a process handle
)
htok = win32security.OpenProcessToken(hproc, win32con.TOKEN_QUERY)
privs = win32security.GetTokenInformation(
htok, win32security.TokenPrivileges
)
privileges = ''
# Privilege / Enabled(Y/N)
for priv_id, flags in privs:
# Check enabled bits
if flags == (win32security.SE_PRIVILEGE_ENABLED | win32security.SE_PRIVILEGE_ENABLED_BY_DEFAULT):
privileges += f'{win32security.LookupPrivilegeName(None, priv_id)} |'
except Exception:
privileges = 'N/A'
return privileges
def log_to_file(message):
with open('process_monitor_log.csv', 'a') as fd:
fd.write(f'{message}\r\n')
def monitor():
head = 'CommandLine, Time, Executable, Parent PID, PID, User, SID, Privileges'
log_to_file(head)
c = wmi.WMI()
process_watcher = c.Win32_Process.watch_for('creation') # Returns a 'New Process' event
while True:
try:
new_process = process_watcher() # The event is a Win32_Process WMI class
cmdline = new_process.CommandLine
create_date = new_process.CreationDate
executable = new_process.ExecutablePath
parent_pid = new_process.ParentProcessId
pid = new_process.ProcessId
proc_owner = new_process.GetOwner()
owner_sid = new_process.GetOwnerSid()
privileges = get_process_privileges(pid)
process_log_message = (
f'{cmdline} , {create_date} , {executable} ,'
f'{parent_pid} , {pid} , {proc_owner} , {owner_sid} , {privileges}'
)
print(process_log_message)
print()
log_to_file(process_log_message)
except Exception:
pass
if __name__ == '__main__':
monitor()
| 36.903226
| 110
| 0.604895
|
762c90e6e49d80bfef93bd55b5d8e48ea43f5bde
| 2,319
|
py
|
Python
|
quantum/tests/unit/test_common_utils.py
|
aristanetworks/arista-ovs-quantum
|
8e7fe17cafa70c3346e2a5d70da2d6e2637c338e
|
[
"Apache-2.0"
] | 1
|
2019-04-11T10:27:47.000Z
|
2019-04-11T10:27:47.000Z
|
quantum/tests/unit/test_common_utils.py
|
cuiwow/quantum
|
ce11b62046a0501e9fcd8442524d3c151d315dfb
|
[
"Apache-2.0"
] | null | null | null |
quantum/tests/unit/test_common_utils.py
|
cuiwow/quantum
|
ce11b62046a0501e9fcd8442524d3c151d315dfb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest2 as unittest
from quantum.common import utils
class TestParseMappings(unittest.TestCase):
def parse(self, mapping_list, unique_values=True):
return utils.parse_mappings(mapping_list, unique_values)
def test_parse_mappings_fails_for_missing_separator(self):
with self.assertRaises(ValueError):
self.parse(['key'])
def test_parse_mappings_fails_for_missing_key(self):
with self.assertRaises(ValueError):
self.parse([':val'])
def test_parse_mappings_fails_for_missing_value(self):
with self.assertRaises(ValueError):
self.parse(['key:'])
def test_parse_mappings_fails_for_extra_separator(self):
with self.assertRaises(ValueError):
self.parse(['key:val:junk'])
def test_parse_mappings_fails_for_duplicate_key(self):
with self.assertRaises(ValueError):
self.parse(['key:val1', 'key:val2'])
def test_parse_mappings_fails_for_duplicate_value(self):
with self.assertRaises(ValueError):
self.parse(['key1:val', 'key2:val'])
def test_parse_mappings_succeeds_for_one_mapping(self):
self.assertEqual(self.parse(['key:val']), {'key': 'val'})
def test_parse_mappings_succeeds_for_n_mappings(self):
self.assertEqual(self.parse(['key1:val1', 'key2:val2']),
{'key1': 'val1', 'key2': 'val2'})
def test_parse_mappings_succeeds_for_duplicate_value(self):
self.assertEqual(self.parse(['key1:val', 'key2:val'], False),
{'key1': 'val', 'key2': 'val'})
def test_parse_mappings_succeeds_for_no_mappings(self):
self.assertEqual(self.parse(['']), {})
| 38.016393
| 78
| 0.686072
|
72ca725e1a5e53ab2f8297d7d87bd40397db5121
| 1,690
|
py
|
Python
|
aries_cloudagency/protocols/trustping/v1_0/messages/tests/test_trust_ping_reponse.py
|
osancus/aries-cloudagency-python
|
d0ce77a3a11927715d1cb3533313d17a27b1cf7a
|
[
"Apache-2.0"
] | null | null | null |
aries_cloudagency/protocols/trustping/v1_0/messages/tests/test_trust_ping_reponse.py
|
osancus/aries-cloudagency-python
|
d0ce77a3a11927715d1cb3533313d17a27b1cf7a
|
[
"Apache-2.0"
] | null | null | null |
aries_cloudagency/protocols/trustping/v1_0/messages/tests/test_trust_ping_reponse.py
|
osancus/aries-cloudagency-python
|
d0ce77a3a11927715d1cb3533313d17a27b1cf7a
|
[
"Apache-2.0"
] | 3
|
2020-07-03T21:35:34.000Z
|
2020-09-09T13:26:20.000Z
|
from unittest import mock, TestCase
from asynctest import TestCase as AsyncTestCase
from ..ping_response import PingResponse
from ...message_types import PING_RESPONSE
class TestPingResponse(TestCase):
def setUp(self):
self.test_comment = "hello"
self.test_ping = PingResponse(comment=self.test_comment)
def test_init(self):
"""Test initialization."""
assert self.test_ping.comment == self.test_comment
def test_type(self):
"""Test type."""
assert self.test_ping._type == PING_RESPONSE
@mock.patch(
"aries_cloudagency.protocols.trustping.v1_0.messages.ping_response.PingResponseSchema.load"
)
def test_deserialize(self, mock_ping_schema_load):
"""
Test deserialization.
"""
obj = {"obj": "obj"}
msg = PingResponse.deserialize(obj)
mock_ping_schema_load.assert_called_once_with(obj)
assert msg is mock_ping_schema_load.return_value
@mock.patch(
"aries_cloudagency.protocols.trustping.v1_0.messages.ping_response.PingResponseSchema.dump"
)
def test_serialize(self, mock_ping_schema_load):
"""
Test serialization.
"""
msg_dict = self.test_ping.serialize()
mock_ping_schema_load.assert_called_once_with(self.test_ping)
assert msg_dict is mock_ping_schema_load.return_value
class TestPingResponseSchema(AsyncTestCase):
"""Test ping response schema."""
async def test_make_model(self):
ping = PingResponse(comment="hello")
data = ping.serialize()
model_instance = PingResponse.deserialize(data)
assert type(model_instance) is type(ping)
| 29.137931
| 99
| 0.691124
|
e00d55b9dca6d0c43a9fc85fd5977d84d64403a1
| 4,396
|
py
|
Python
|
huaweicloud-sdk-swr/huaweicloudsdkswr/v2/model/show_trigger_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-swr/huaweicloudsdkswr/v2/model/show_trigger_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-swr/huaweicloudsdkswr/v2/model/show_trigger_request.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowTriggerRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'namespace': 'str',
'repository': 'str',
'trigger': 'str'
}
attribute_map = {
'namespace': 'namespace',
'repository': 'repository',
'trigger': 'trigger'
}
def __init__(self, namespace=None, repository=None, trigger=None):
"""ShowTriggerRequest - a model defined in huaweicloud sdk"""
self._namespace = None
self._repository = None
self._trigger = None
self.discriminator = None
self.namespace = namespace
self.repository = repository
self.trigger = trigger
@property
def namespace(self):
"""Gets the namespace of this ShowTriggerRequest.
组织名称。小写字母开头,后面跟小写字母、数字、小数点、下划线或中划线(其中下划线最多允许连续两个,小数点、下划线、中划线不能直接相连),小写字母或数字结尾,1-64个字符。
:return: The namespace of this ShowTriggerRequest.
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this ShowTriggerRequest.
组织名称。小写字母开头,后面跟小写字母、数字、小数点、下划线或中划线(其中下划线最多允许连续两个,小数点、下划线、中划线不能直接相连),小写字母或数字结尾,1-64个字符。
:param namespace: The namespace of this ShowTriggerRequest.
:type: str
"""
self._namespace = namespace
@property
def repository(self):
"""Gets the repository of this ShowTriggerRequest.
镜像仓库名称
:return: The repository of this ShowTriggerRequest.
:rtype: str
"""
return self._repository
@repository.setter
def repository(self, repository):
"""Sets the repository of this ShowTriggerRequest.
镜像仓库名称
:param repository: The repository of this ShowTriggerRequest.
:type: str
"""
self._repository = repository
@property
def trigger(self):
"""Gets the trigger of this ShowTriggerRequest.
触发器名称
:return: The trigger of this ShowTriggerRequest.
:rtype: str
"""
return self._trigger
@trigger.setter
def trigger(self, trigger):
"""Sets the trigger of this ShowTriggerRequest.
触发器名称
:param trigger: The trigger of this ShowTriggerRequest.
:type: str
"""
self._trigger = trigger
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowTriggerRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.481928
| 94
| 0.569836
|
17b258bf2c7b3d5382606461e25cf35dd4548645
| 1,672
|
py
|
Python
|
tests/test_sysbottle.py
|
arvy/sperf
|
c047ae5f3b1daf70cc227784197e4ef37caaf556
|
[
"Apache-2.0"
] | null | null | null |
tests/test_sysbottle.py
|
arvy/sperf
|
c047ae5f3b1daf70cc227784197e4ef37caaf556
|
[
"Apache-2.0"
] | null | null | null |
tests/test_sysbottle.py
|
arvy/sperf
|
c047ae5f3b1daf70cc227784197e4ef37caaf556
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 DataStax, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tests the sysbottle module """
import os
from datetime import datetime
from pysper.sysbottle import SysbottleReport, IOStatParser
def test_sysbottle_analyze():
""" test sysbottle report """
iostat = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'testdata', 'iostat')
report = SysbottleReport(iostat)
report.analyze()
assert report.analyzed
assert report.count == 10
assert len(report.devices) == 1
assert len(report.cpu_stats['total']) == 10
report.print_report()
def test_date_format():
"""makes sure parsing works with all formats"""
#pylint: disable=protected-access
real_time = datetime.strptime("2011-09-16 19:01:10", "%Y-%m-%d %H:%M:%S")
parser = IOStatParser()
eu_format = "16/09/11 19:01:10"
parser._parse_date(eu_format)
assert parser.iostat['date'] == real_time
sky_format = "09/16/11 19:01:10"
parser._parse_date(sky_format)
assert parser.iostat['date'] == real_time
us_format = "09/16/2011 07:01:10 PM"
parser._parse_date(us_format)
assert parser.iostat['date'] == real_time
| 34.833333
| 91
| 0.713517
|
96479f981eab8c08ccf8cabb90dfcca0e22630b4
| 741
|
py
|
Python
|
peerplaysbase/asset_permissions.py
|
farisshajahan/python-peerplays
|
260e43d39307554f5c35574fe7a6f5451a51697f
|
[
"MIT"
] | 10
|
2019-03-14T03:09:42.000Z
|
2021-03-17T10:20:05.000Z
|
peerplaysbase/asset_permissions.py
|
farisshajahan/python-peerplays
|
260e43d39307554f5c35574fe7a6f5451a51697f
|
[
"MIT"
] | 8
|
2019-04-02T17:07:20.000Z
|
2020-04-30T08:24:01.000Z
|
peerplaysbase/asset_permissions.py
|
farisshajahan/python-peerplays
|
260e43d39307554f5c35574fe7a6f5451a51697f
|
[
"MIT"
] | 4
|
2019-04-02T17:00:22.000Z
|
2021-08-09T11:28:20.000Z
|
asset_permissions = {}
asset_permissions["charge_market_fee"] = 0x01
asset_permissions["white_list"] = 0x02
asset_permissions["override_authority"] = 0x04
asset_permissions["transfer_restricted"] = 0x08
asset_permissions["disable_force_settle"] = 0x10
asset_permissions["global_settle"] = 0x20
asset_permissions["disable_confidential"] = 0x40
asset_permissions["witness_fed_asset"] = 0x80
asset_permissions["committee_fed_asset"] = 0x100
def toint(permissions):
permissions_int = 0
for p in permissions:
if permissions[p]:
permissions_int += asset_permissions[p]
return permissions_int
def todict(number):
r = {}
for k, v in asset_permissions.items():
r[k] = bool(number & v)
return r
| 28.5
| 51
| 0.735493
|
2359c04d36d4e545d33150d496958dbbd1c9ea61
| 5,306
|
py
|
Python
|
src/runners/alert_suppressions_runner.py
|
ttracx/SnowAlert
|
2df7938ca8a5058b4d9b4f36bdf67ea638b9ff4c
|
[
"Apache-2.0"
] | null | null | null |
src/runners/alert_suppressions_runner.py
|
ttracx/SnowAlert
|
2df7938ca8a5058b4d9b4f36bdf67ea638b9ff4c
|
[
"Apache-2.0"
] | null | null | null |
src/runners/alert_suppressions_runner.py
|
ttracx/SnowAlert
|
2df7938ca8a5058b4d9b4f36bdf67ea638b9ff4c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import json
import uuid
import datetime
from typing import List
from config import ALERTS_TABLE, METADATA_TABLE, RULES_SCHEMA, ALERT_SQUELCH_POSTFIX, CLOUDWATCH_METRICS
from helpers import log
from helpers.db import connect, load_rules
RUN_METADATA = {'QUERY_HISTORY': [], 'RUN_TYPE': 'ALERT SUPPRESSIONS'} # Contains metadata about this run
def log_alerts(ctx, alerts):
"""We don't usually log alerts in the suppression runner, but we want the runner to create an alert if a
suppression fails to execute.
"""
if len(alerts):
print("Recording alerts.")
format_string = ", ".join(["(%s)"] * len(alerts))
try:
ctx.cursor().execute(
f'''
INSERT INTO {ALERTS_TABLE} (alert_time, alert)
SELECT PARSE_JSON(column1):ALERT_TIME,
PARSE_JSON(column1)
FROM VALUES {format_string};
''',
alerts
)
except Exception as e:
log.fatal("Failed to log alert", e)
else:
print("No alerts to log.")
def log_failure(ctx, suppression_name, e, event_data=None, description=None):
if event_data is None:
event_data = f"The suppression '{suppression_name}' failed to execute with error: {e}"
if description is None:
description = f"The suppression '{suppression_name}' failed to execute with error: {e}"
alert = {}
alert['ALERT_ID'] = uuid.uuid4().hex
alert['QUERY_ID'] = 'b1d02051dd2c4d62bb75274f2ee5996a'
alert['QUERY_NAME'] = 'Suppression Runner Failure'
alert['ENVIRONMENT'] = 'Suppressions'
alert['SOURCES'] = 'Suppression Runner'
alert['ACTOR'] = 'Suppression Runner'
alert['OBJECT'] = suppression_name
alert['ACTION'] = 'Suppression Execution'
alert['TITLE'] = 'Suppression Runner Failure'
alert['EVENT_TIME'] = str(datetime.datetime.utcnow())
alert['ALERT_TIME'] = str(datetime.datetime.utcnow())
alert['DESCRIPTION'] = description
alert['DETECTOR'] = 'Suppression Runner'
alert['EVENT_DATA'] = event_data
alert['SEVERITY'] = 'High'
alerts = []
alerts.append(json.dumps(alert))
try:
log_alerts(ctx, alerts)
log.fatal(f"Suppression {suppression_name} failure successfully logged", e)
except Exception as e:
print(f"Failed to log suppression failure")
log.fatal("Failed to log suppression failure", e)
def do_suppression(suppression_name, ctx):
# set alert instances matching the suppression to suppressed
query = f"""
MERGE INTO {ALERTS_TABLE} t
USING({RULES_SCHEMA}.{suppression_name}) s
ON t.alert:ALERT_ID = s.alert:ALERT_ID
WHEN MATCHED THEN UPDATE
SET t.SUPPRESSED = 'true', t.SUPPRESSION_RULE = '{suppression_name}';
"""
ctx.cursor().execute(query)
def run_suppressions(squelch_name):
print(f"Received suppression {squelch_name}")
metadata = {}
metadata['NAME'] = squelch_name
ctx = connect()
metadata['START_TIME'] = datetime.datetime.utcnow()
try:
do_suppression(squelch_name, ctx)
except Exception as e:
log_failure(ctx, squelch_name, e)
log.metadata_fill(metadata, status='failure', rows=0)
RUN_METADATA['QUERY_HISTORY'].append(metadata)
pass
log.metadata_fill(metadata, status='success', rows=ctx.cursor().rowcount)
RUN_METADATA['QUERY_HISTORY'].append(metadata)
print(f"Suppression query {squelch_name} executed. ")
def flag_remaining_alerts(ctx) -> List[str]:
try:
query = f"UPDATE {ALERTS_TABLE} SET suppressed=FALSE WHERE suppressed IS NULL;"
suppression_view_list = ctx.cursor().execute(query)
except Exception as e:
log.fatal("Failed to flag remaining alerts as unsuppressed", e)
return [name[1] for name in suppression_view_list]
def record_metadata(ctx, metadata):
metadata['RUN_START_TIME'] = str(metadata['RUN_START_TIME']) # We wantd them to be objects for mathing
metadata['RUN_END_TIME'] = str(metadata['RUN_END_TIME']) # then convert to string for json serializing
metadata['RUN_DURATION'] = str(metadata['RUN_DURATION'])
statement = f'''
INSERT INTO {METADATA_TABLE}
(event_time, v) select '{metadata['RUN_START_TIME']}',
PARSE_JSON(column1) from values('{json.dumps(metadata)}')
'''
try:
log.info("Recording run metadata.")
ctx.cursor().execute(statement)
except Exception as e:
log.fatal("Metadata failed to log", e)
log_failure(ctx, "Metadata Logging", e, event_data=metadata, description="The run metadata failed to log")
def main():
RUN_METADATA['RUN_START_TIME'] = datetime.datetime.utcnow()
ctx = connect()
for squelch_name in load_rules(ctx, ALERT_SQUELCH_POSTFIX):
run_suppressions(squelch_name)
flag_remaining_alerts(ctx)
RUN_METADATA['RUN_END_TIME'] = datetime.datetime.utcnow()
RUN_METADATA['RUN_DURATION'] = RUN_METADATA['RUN_END_TIME'] - RUN_METADATA['RUN_START_TIME']
record_metadata(ctx, RUN_METADATA)
if CLOUDWATCH_METRICS:
log.metric('Run', 'SnowAlert', [{'Name': 'Component', 'Value': 'Alert Suppression Runner'}], 1)
if __name__ == '__main__':
main()
| 35.610738
| 114
| 0.6634
|
619a20a42f57970c076e0791abbfb5630a6b5254
| 2,279
|
py
|
Python
|
fastFood/test.py
|
wangteng200000318/-DataMiningMeiTuan
|
eb152a090c3025726bcb793484d4a88f2072b744
|
[
"MIT"
] | 4
|
2020-11-23T04:50:41.000Z
|
2021-03-12T06:19:59.000Z
|
fastFood/test.py
|
wangteng200000318/-DataMiningMeiTuan
|
eb152a090c3025726bcb793484d4a88f2072b744
|
[
"MIT"
] | null | null | null |
fastFood/test.py
|
wangteng200000318/-DataMiningMeiTuan
|
eb152a090c3025726bcb793484d4a88f2072b744
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
a = [['108.937847', '34.132887'], ['108.868828', '34.199134'], ['109.015041', '34.211159'], ['108.944062', '34.352765'],
['108.919294', '34.180144'], ['108.892008', '34.283387'], ['108.871430', '34.261104'], ['108.961922', '34.276979'],
['108.909284', '34.247895'], ['108.955792', '34.211985'], ['108.846932', '34.238143'], ['108.939899', '34.358630'],
['108.963657', '34.268000'], ['108.943989', '34.260542'], ['108.981368', '34.269157'], ['108.950599', '34.198392'],
['108.902604', '34.236490'], ['108.825070', '34.253920'], ['108.972604', '34.287695'], ['108.965999', '34.351895']]
b = [['108.946641', '34.261887'], ['108.936903', '34.229282'], ['108.881699', '34.194321'], ['108.996278', '34.302895'],
['108.893550', '34.219525'], ['109.056958', '34.273964'], ['108.912227', '34.157825'], ['108.983815', '34.268932'],
['108.892709', '34.236872'], ['108.948727', '34.259112'], ['108.929258', '34.197846'], ['108.964078', '34.212422'],
['108.963703', '34.341407'], ['108.956981', '34.172927'], ['108.873426', '34.237753'], ['108.868778', '34.170026'],
['108.988853', '34.203912'], ['108.839449', '34.210518'], ['108.938550', '34.217573'], ['108.963224', '34.235112']]
c = [['109.044480', '34.343991'], ['109.072264', '34.279830'], ['108.937572', '34.342601'], ['108.992365', '34.216228'],
['108.972944', '34.212982'], ['108.947300', '34.257908'], ['108.898460', '34.280834'], ['108.937288', '34.342642'],
['108.974174', '34.269576'], ['108.881477', '34.245412'], ['108.919671', '34.286006'], ['108.946789', '34.315457'],
['108.909587', '34.247317'], ['108.932731', '34.156164'], ['108.946330', '34.242104'], ['108.948834', '34.330365'],
['108.954630', '34.241640'], ['108.964571', '34.247390'], ['109.233848', '34.665397'], ['108.995060', '34.278243']]
ax, ay = [], []
bx, by = [], []
cx, cy = [], []
for i in range(len(a)):
ax.append(float(a[i][0]))
ay.append(float(a[i][1]))
for i in range(len(b)):
bx.append(float(b[i][0]))
by.append(float(b[i][1]))
for i in range(len(c)):
cx.append(float(c[i][0]))
cy.append(float(c[i][1]))
plt.scatter(ax,ay,c="red")
plt.scatter(bx,by,c='blue')
plt.scatter(cx,cy,c='green')
plt.show()
| 65.114286
| 121
| 0.555946
|
69dac8dbc21bf585da7a33009285ba81ce8e9ce4
| 76
|
py
|
Python
|
test/data/lambda_function.py
|
ToQoz/kumogata-template
|
f7766a1cda27c30f01e743bf80b6465c7f55f2a4
|
[
"MIT"
] | 2
|
2017-06-12T13:16:58.000Z
|
2017-06-29T03:01:32.000Z
|
test/data/lambda_function.py
|
ToQoz/kumogata-template
|
f7766a1cda27c30f01e743bf80b6465c7f55f2a4
|
[
"MIT"
] | 7
|
2018-03-20T07:45:30.000Z
|
2019-03-27T10:29:28.000Z
|
test/data/lambda_function.py
|
ToQoz/kumogata-template
|
f7766a1cda27c30f01e743bf80b6465c7f55f2a4
|
[
"MIT"
] | 8
|
2017-05-02T07:40:18.000Z
|
2018-08-09T02:33:11.000Z
|
import boto3
def lambda_handler(event, context):
print('hello lambda')
| 15.2
| 35
| 0.736842
|
2c32b3dcddb1ba65e595c05df4bb79d476a45044
| 595
|
py
|
Python
|
experiments/2013-08-19-step-burst.py
|
jaesikchoi/gpss-research
|
2a64958a018f1668f7b8eedf33c4076a63af7868
|
[
"MIT"
] | 151
|
2015-01-09T19:25:05.000Z
|
2022-01-05T02:05:52.000Z
|
experiments/2013-08-19-step-burst.py
|
jaesikchoi/gpss-research
|
2a64958a018f1668f7b8eedf33c4076a63af7868
|
[
"MIT"
] | 1
|
2016-08-04T13:12:51.000Z
|
2016-08-04T13:12:51.000Z
|
experiments/2013-08-19-step-burst.py
|
jaesikchoi/gpss-research
|
2a64958a018f1668f7b8eedf33c4076a63af7868
|
[
"MIT"
] | 59
|
2015-02-04T19:13:58.000Z
|
2021-07-28T23:36:09.000Z
|
Experiment(description='Trying out the step and burst kernels',
data_dir='../data/temp/',
max_depth=8,
random_order=False,
k=1,
debug=False,
local_computation=False,
n_rand=5,
sd=4,
max_jobs=400,
verbose=False,
make_predictions=False,
skip_complete=True,
results_dir='../results/2013-08-19-step-burst/',
iters=500,
base_kernels='SE,Step,BurstSE',
zero_mean=True,
random_seed=0)
| 27.045455
| 63
| 0.492437
|
a25e68a27f657aee09db0e2871696ab1babf3408
| 185
|
py
|
Python
|
setup.py
|
pieterb/aiopluggy
|
40a28c38bed35cc8df8703d99c25c75b3fe82151
|
[
"MIT"
] | 1
|
2018-01-12T20:36:37.000Z
|
2018-01-12T20:36:37.000Z
|
setup.py
|
pieterb/aiopluggy
|
40a28c38bed35cc8df8703d99c25c75b3fe82151
|
[
"MIT"
] | null | null | null |
setup.py
|
pieterb/aiopluggy
|
40a28c38bed35cc8df8703d99c25c75b3fe82151
|
[
"MIT"
] | 2
|
2020-01-28T19:33:20.000Z
|
2022-01-17T00:13:28.000Z
|
#!/usr/bin/env python
""" See <https://setuptools.readthedocs.io/en/latest/>.
"""
from setuptools import setup
def main():
return setup()
if __name__ == '__main__':
main()
| 13.214286
| 55
| 0.643243
|
5ccf8a50dbfb891cae7d2dfbedc3df8aaff6ecec
| 8,478
|
py
|
Python
|
train_custom.py
|
eirene-aisa/glow-tts-practice
|
9210bf18b92ba5cd95a3d5fe2a725f6acd15e29f
|
[
"MIT"
] | null | null | null |
train_custom.py
|
eirene-aisa/glow-tts-practice
|
9210bf18b92ba5cd95a3d5fe2a725f6acd15e29f
|
[
"MIT"
] | null | null | null |
train_custom.py
|
eirene-aisa/glow-tts-practice
|
9210bf18b92ba5cd95a3d5fe2a725f6acd15e29f
|
[
"MIT"
] | null | null | null |
import os
import json
import argparse
import math
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch.multiprocessing as mp
import torch.distributed as dist
from apex.parallel import DistributedDataParallel as DDP
from apex import amp
from data_utils_custom import TextMelLoader, TextMelCollate
import models
import commons
import utils
from korean_text.symbols import kor_symbols
from setproctitle import setproctitle
global_step = 0
def main():
"""Assume Single Node Multi GPUs Training Only"""
assert torch.cuda.is_available(), "CPU training is not allowed."
n_gpus = torch.cuda.device_count()
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '80000'
hps = utils.get_hparams()
mp.spawn(train_and_eval, nprocs=n_gpus, args=(n_gpus, hps,))
def train_and_eval(rank, n_gpus, hps):
setproctitle("Glow-kor-batch-{}".format(hps.train.batch_size))
global global_step
if rank == 0:
logger = utils.get_logger(hps.model_dir)
logger.info(hps)
utils.check_git_hash(hps.model_dir)
writer = SummaryWriter(log_dir=hps.model_dir)
writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank)
torch.manual_seed(hps.train.seed)
torch.cuda.set_device(rank)
train_dataset = TextMelLoader(hps.data.training_files, hps.data)
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset,
num_replicas=n_gpus,
rank=rank,
shuffle=True)
collate_fn = TextMelCollate(1)
train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False,
batch_size=hps.train.batch_size, pin_memory=True,
drop_last=True, collate_fn=collate_fn, sampler=train_sampler)
if rank == 0:
val_dataset = TextMelLoader(hps.data.validation_files, hps.data)
val_loader = DataLoader(val_dataset, num_workers=8, shuffle=False,
batch_size=hps.train.batch_size, pin_memory=True,
drop_last=True, collate_fn=collate_fn)
generator = models.FlowGenerator(
n_vocab=len(kor_symbols) + getattr(hps.data, "add_blank", False),
out_channels=hps.data.n_mel_channels,
**hps.model).cuda(rank)
optimizer_g = commons.Adam(generator.parameters(), scheduler=hps.train.scheduler, dim_model=hps.model.hidden_channels, warmup_steps=hps.train.warmup_steps, lr=hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps)
if hps.train.fp16_run:
generator, optimizer_g._optim = amp.initialize(generator, optimizer_g._optim, opt_level="O1")
generator = DDP(generator)
epoch_str = 1
global_step = 0
try:
_, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), generator, optimizer_g)
epoch_str += 1
optimizer_g.step_num = (epoch_str - 1) * len(train_loader)
optimizer_g._update_learning_rate()
global_step = (epoch_str - 1) * len(train_loader)
except:
if hps.train.ddi and os.path.isfile(os.path.join(hps.model_dir, "ddi_G.pth")):
_ = utils.load_checkpoint(os.path.join(hps.model_dir, "ddi_G.pth"), generator, optimizer_g)
for epoch in range(epoch_str, hps.train.epochs + 1):
if rank == 0:
train(rank, epoch, hps, generator, optimizer_g, train_loader, logger, writer)
evaluate(rank, epoch, hps, generator, optimizer_g, val_loader, logger, writer_eval)
if epoch % 10 == 0: # nayoun edited
utils.save_checkpoint(generator, optimizer_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(epoch)))
else:
train(rank, epoch, hps, generator, optimizer_g, train_loader, None, None)
def train(rank, epoch, hps, generator, optimizer_g, train_loader, logger, writer):
train_loader.sampler.set_epoch(epoch)
global global_step
generator.train()
for batch_idx, (x, x_lengths, y, y_lengths) in enumerate(train_loader):
x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
# Train Generator
optimizer_g.zero_grad()
(z, z_m, z_logs, logdet, z_mask), (x_m, x_logs, x_mask), (attn, logw, logw_) = generator(x, x_lengths, y, y_lengths, gen=False)
l_mle = commons.mle_loss(z, z_m, z_logs, logdet, z_mask)
l_length = commons.duration_loss(logw, logw_, x_lengths)
loss_gs = [l_mle, l_length]
loss_g = sum(loss_gs)
if hps.train.fp16_run:
with amp.scale_loss(loss_g, optimizer_g._optim) as scaled_loss:
scaled_loss.backward()
grad_norm = commons.clip_grad_value_(amp.master_params(optimizer_g._optim), 5)
else:
loss_g.backward()
grad_norm = commons.clip_grad_value_(generator.parameters(), 5)
optimizer_g.step()
if rank==0:
if batch_idx % hps.train.log_interval == 0:
(y_gen, *_), *_ = generator.module(x[:1], x_lengths[:1], gen=True)
logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(x), len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss_g.item()))
logger.info([x.item() for x in loss_gs] + [global_step, optimizer_g.get_lr()])
scalar_dict = {"loss/g/total": loss_g, "learning_rate": optimizer_g.get_lr(), "grad_norm": grad_norm}
scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(loss_gs)})
utils.summarize(
writer=writer,
global_step=global_step,
images={"y_org": utils.plot_spectrogram_to_numpy(y[0].data.cpu().numpy()),
"y_gen": utils.plot_spectrogram_to_numpy(y_gen[0].data.cpu().numpy()),
"attn": utils.plot_alignment_to_numpy(attn[0,0].data.cpu().numpy()),
},
scalars=scalar_dict)
global_step += 1
if rank == 0:
logger.info('====> Epoch: {}'.format(epoch))
def evaluate(rank, epoch, hps, generator, optimizer_g, val_loader, logger, writer_eval):
if rank == 0:
global global_step
generator.eval()
losses_tot = []
with torch.no_grad():
for batch_idx, (x, x_lengths, y, y_lengths) in enumerate(val_loader):
x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True)
y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True)
(z, z_m, z_logs, logdet, z_mask), (x_m, x_logs, x_mask), (attn, logw, logw_) = generator(x, x_lengths, y, y_lengths, gen=False)
l_mle = commons.mle_loss(z, z_m, z_logs, logdet, z_mask)
l_length = commons.duration_loss(logw, logw_, x_lengths)
loss_gs = [l_mle, l_length]
loss_g = sum(loss_gs)
if batch_idx == 0:
losses_tot = loss_gs
else:
losses_tot = [x + y for (x, y) in zip(losses_tot, loss_gs)]
if batch_idx % hps.train.log_interval == 0:
logger.info('Eval Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(x), len(val_loader.dataset),
100. * batch_idx / len(val_loader),
loss_g.item()))
logger.info([x.item() for x in loss_gs])
losses_tot = [x / len(val_loader) for x in losses_tot]
loss_tot = sum(losses_tot)
scalar_dict = {"loss/g/total": loss_tot}
scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_tot)})
utils.summarize(
writer=writer_eval,
global_step=global_step,
scalars=scalar_dict)
logger.info('====> Epoch: {}'.format(epoch))
if __name__ == "__main__":
main()
| 43.035533
| 229
| 0.625383
|
d28c0a7e917796aa60246f8ff298b70e9465d095
| 20,322
|
py
|
Python
|
tests/test_torch.py
|
KeAWang/client
|
45af52a31db0b63118c7f2b15c7c8b40f1f05f29
|
[
"MIT"
] | null | null | null |
tests/test_torch.py
|
KeAWang/client
|
45af52a31db0b63118c7f2b15c7c8b40f1f05f29
|
[
"MIT"
] | null | null | null |
tests/test_torch.py
|
KeAWang/client
|
45af52a31db0b63118c7f2b15c7c8b40f1f05f29
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import wandb
import pytest
import json
import os
import sys
import time
from pprint import pprint
from torchvision import models
from torch.autograd import Variable
from pkg_resources import parse_version
# Tests which rely on row history in memory should set `History.keep_rows = True`
from wandb.history import History
History.keep_rows = True
def dummy_torch_tensor(size, requires_grad=True):
if parse_version(torch.__version__) >= parse_version('0.4'):
return torch.ones(size, requires_grad=requires_grad)
else:
return torch.autograd.Variable(torch.ones(size), requires_grad=requires_grad)
class DynamicModule(nn.Module):
def __init__(self):
super(DynamicModule, self).__init__()
self.choices = nn.ModuleDict({
'conv': nn.Conv2d(10, 10, 3),
'pool': nn.MaxPool2d(3)
})
self.activations = nn.ModuleDict([
['lrelu', nn.LeakyReLU()],
['prelu', nn.PReLU()]
])
def forward(self, x, choice, act):
x = self.choices[choice](x)
x = self.activations[act](x)
return x
class Discrete(nn.Module):
def __init__(self, num_outputs):
super(Discrete, self).__init__()
def forward(self, x):
probs = nn.functional.softmax(x, dim=0)
dist = torch.distributions.Categorical(probs=probs)
# TODO: if we don't call entropy here, PyTorch blows up with because we added hooks...
return dist.entropy()
class DiscreteModel(nn.Module):
def __init__(self, num_outputs=2):
super(DiscreteModel, self).__init__()
self.linear1 = nn.Linear(1, 10)
self.linear2 = nn.Linear(10, num_outputs)
self.dist = Discrete(num_outputs)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
return self.dist(x)
class ParameterModule(nn.Module):
def __init__(self):
super(ParameterModule, self).__init__()
self.params = nn.ParameterList(
[nn.Parameter(torch.ones(10, 10)) for i in range(10)])
self.otherparam = nn.Parameter(torch.Tensor(5))
def forward(self, x):
# ParameterList can act as an iterable, or be indexed using ints
for i, p in enumerate(self.params):
x = self.params[i // 2].mm(x) + p.mm(x)
return x
def init_conv_weights(layer, weights_std=0.01, bias=0):
'''Initialize weights for subnet convolution'''
if parse_version(torch.__version__) >= parse_version('0.4'):
nn.init.normal_(layer.weight.data, std=weights_std)
nn.init.constant_(layer.bias.data, val=bias)
else:
nn.init.normal(layer.weight.data, std=weights_std)
nn.init.constant(layer.bias.data, val=bias)
return layer
def conv3x3(in_channels, out_channels, **kwargs):
'''Return a 3x3 convolutional layer for SubNet'''
layer = nn.Conv2d(in_channels, out_channels, kernel_size=3, **kwargs)
layer = init_conv_weights(layer)
return layer
class SubNet(nn.Module):
def __init__(self, mode, anchors=9, classes=80, depth=4,
base_activation=F.relu,
output_activation=F.sigmoid):
super(SubNet, self).__init__()
self.anchors = anchors
self.classes = classes
self.depth = depth
self.base_activation = base_activation
self.output_activation = output_activation
self.subnet_base = nn.ModuleList([conv3x3(256, 256, padding=1)
for _ in range(depth)])
if mode == 'boxes':
self.subnet_output = conv3x3(256, 4 * self.anchors, padding=1)
elif mode == 'classes':
# add an extra dim for confidence
self.subnet_output = conv3x3(
256, (1 + self.classes) * self.anchors, padding=1)
self._output_layer_init(self.subnet_output.bias.data)
def _output_layer_init(self, tensor, pi=0.01):
fill_constant = 4.59 # - np.log((1 - pi) / pi)
return tensor.fill_(fill_constant)
def forward(self, x):
for layer in self.subnet_base:
x = self.base_activation(layer(x))
x = self.subnet_output(x)
x = x.permute(0, 2, 3, 1).contiguous().view(x.size(0),
x.size(2) * x.size(3) * self.anchors, -1)
return x
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
class LSTMModel(torch.nn.Module):
def __init__(self, embedding_dim, hidden_dim):
super(LSTMModel, self).__init__()
vocabLimit = 100
self.hidden_dim = hidden_dim
self.embeddings = nn.Embedding(vocabLimit + 1, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim)
self.linearOut = nn.Linear(hidden_dim, 2)
def forward(self, inputs, hidden):
x = self.embeddings(inputs).view(len(inputs), 1, -1)
lstm_out, lstm_h = self.lstm(x, hidden)
x = lstm_out[-1]
x = self.linearOut(x)
x = F.log_softmax(x, dim=1)
return x, lstm_h
def init_hidden(self):
return (
Variable(torch.zeros(1, 1, self.hidden_dim)), Variable(torch.zeros(1, 1, self.hidden_dim)))
class Sequence(nn.Module):
def __init__(self):
super(Sequence, self).__init__()
self.lstm1 = nn.LSTMCell(1, 51)
self.lstm2 = nn.LSTMCell(51, 51)
self.linear = nn.Linear(51, 1)
def forward(self, input, future=0):
outputs = []
h_t = dummy_torch_tensor((input.size(0), 51))
c_t = dummy_torch_tensor((input.size(0), 51))
h_t2 = dummy_torch_tensor((input.size(0), 51))
c_t2 = dummy_torch_tensor((input.size(0), 51))
for i, input_t in enumerate(input.chunk(input.size(1), dim=1)):
h_t, c_t = self.lstm1(input_t, (h_t, c_t))
h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
output = self.linear(h_t2)
outputs += [output]
for i in range(future): # if we should predict the future
h_t, c_t = self.lstm1(output, (h_t, c_t))
h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
output = self.linear(h_t2)
outputs += [output]
outputs = torch.stack(outputs, 1).squeeze(2)
return outputs
class FCLayer(nn.Module):
"""FC Layer + Activation"""
def __init__(self, dims, batchnorm_dim=0, act='ReLU', dropout=0):
super(FCLayer, self).__init__()
layers = []
for i in range(len(dims) - 2):
in_dim = dims[i] # input
out_dim = dims[i + 1] # output
if 0 < dropout:
layers.append(nn.Dropout(dropout))
print("BOOM", in_dim, out_dim)
layers.append(nn.Linear(in_dim, out_dim))
if '' != act:
layers.append(getattr(nn, act)())
if batchnorm_dim > 0:
layers.append(nn.BatchNorm1d(batchnorm_dim))
if 0 < dropout:
layers.append(nn.Dropout(dropout))
layers.append(nn.Linear(dims[-2], dims[-1]))
if '' != act:
layers.append(getattr(nn, act)())
self.net = nn.Sequential(*layers)
def forward(self, x):
return self.net(x)
class VGGConcator(nn.Module):
"""
Extracts feature of each four panels, concatenates 4 vgg features panel-wise.
"""
def __init__(self):
super(VGGConcator, self).__init__()
self.vgg = models.vgg16(pretrained=False)
self.vgg.classifier = nn.Sequential(
*list(self.vgg.classifier.children())[:-1])
def forward(self, panels, num=1):
if num == 1:
features = self.vgg(panels)
else:
img0 = panels[:, 0, :, :, :]
img1 = panels[:, 1, :, :, :]
img2 = panels[:, 2, :, :, :]
img3 = panels[:, 3, :, :, :]
feature0 = self.vgg(img0)
feature1 = self.vgg(img1)
feature2 = self.vgg(img2)
feature3 = self.vgg(img3)
features = torch.cat((feature0[:, None, :], feature1[:, None, :],
feature2[:, None, :], feature3[:, None, :]), dim=1)
return features
class Embedding(nn.Module):
def __init__(self, d_embedding, d_word, d_hidden, word_dim, dropout):
super(Embedding, self).__init__()
glove = torch.ones((10, 300))
self.vgg = VGGConcator()
# self.fine_tuning()
self.word_dim = word_dim
glove = glove[:self.word_dim]
self.d_word = d_word
self.emb = nn.Embedding(word_dim, 300, padding_idx=0)
self.emb.weight.data = glove
# consts
self.d_img = 4096
self.num_panels = 4
self.num_max_sentences = 3
self.num_max_words = 20
self.img_fc0 = FCLayer([self.d_img, d_hidden], dropout=dropout)
self.box_lstm = nn.LSTM(
300, d_word, 1, batch_first=True, bidirectional=False)
self.fc0 = FCLayer([d_word + d_hidden, d_embedding])
def forward(self, images, words):
words = words.long()
batch_size = words.size(0)
box_hidden = self.init_hidden(batch_size, self.d_word, 1, 4)
words = words.view(-1, words.size(-1))
emb_word = self.emb(words)
print("Shapes: ", emb_word.shape, words.shape)
emb_word = emb_word.view(-1, self.num_panels,
self.num_max_sentences, self.num_max_words, self.d_word)
emb_sentence = torch.sum(emb_word, dim=3)
emb_sentence = emb_sentence.view(-1,
self.num_max_sentences, self.d_word)
lstmed_sentence, _ = self.box_lstm(emb_sentence, box_hidden)
emb_panel_sentence = lstmed_sentence[:, -1, :]
emb_panel_sentence = emb_panel_sentence.view(
-1, self.num_panels, self.d_word)
img_feature = self.vgg(images, num=4)
img_feature = self.img_fc0(img_feature)
fusion = torch.cat((img_feature, emb_panel_sentence), dim=-1)
fusion = self.fc0(fusion)
return fusion
def init_hidden(self, batch, out, direction=1, n=1):
dims = (direction, batch * n, out)
hiddens = (Variable(torch.zeros(*dims)),
Variable(torch.zeros(*dims)))
return hiddens
@pytest.mark.skipif(sys.version_info < (3, 6), reason="Timeouts in older python versions")
def test_embedding(wandb_init_run):
net = Embedding(d_embedding=300, d_word=300,
d_hidden=300, word_dim=100, dropout=0)
wandb.watch(net, log="all", log_freq=1)
for i in range(2):
output = net(torch.ones((1, 4, 3, 224, 224)),
torch.ones((1, 4, 3, 20)))
output.backward(torch.ones(1, 4, 300))
wandb.log({"loss": 1})
assert len(wandb_init_run.history.rows[0]) == 82
def test_categorical(wandb_init_run):
net = DiscreteModel(num_outputs=2)
wandb.watch(net, log="all", log_freq=1)
for i in range(2):
output = net(torch.ones((1)))
samp = output.backward(torch.ones((2)))
wandb.log({"loss": samp})
assert wandb_init_run.summary["graph_0"].to_json()
assert len(wandb_init_run.history.rows[0]) == 12
def test_double_log(wandb_init_run):
net = ConvNet()
wandb.watch(net)
with pytest.raises(ValueError):
wandb.watch(net)
def test_gradient_logging(wandb_init_run):
net = ConvNet()
wandb.watch(net, log_freq=1)
for i in range(3):
output = net(dummy_torch_tensor((64, 1, 28, 28)))
grads = torch.ones(64, 10)
output.backward(grads)
assert(len(wandb_init_run.history.row) == 8)
assert(
wandb_init_run.history.row['gradients/fc2.bias'].histogram[0] > 0)
wandb.log({"a": 2})
assert(len(wandb_init_run.history.rows) == 3)
@pytest.mark.skipif(sys.version_info < (3, 6), reason="Timeouts in older python versions")
def test_gradient_logging_freq(wandb_init_run):
net = ConvNet()
log_freq = 50
wandb.watch(net, log_freq=log_freq)
for i in range(110):
output = net(dummy_torch_tensor((64, 1, 28, 28)))
grads = torch.ones(64, 10)
output.backward(grads)
if (i + 1) % log_freq == 0:
assert(len(wandb_init_run.history.row) == 8)
assert(
wandb_init_run.history.row['gradients/fc2.bias'].histogram[0] > 0)
else:
assert(len(wandb_init_run.history.row) == 0)
wandb.log({"a": 2})
assert(len(wandb_init_run.history.rows) == 110)
def test_all_logging(wandb_init_run):
net = ConvNet()
wandb.watch(net, log="all", log_freq=1)
for i in range(3):
output = net(dummy_torch_tensor((64, 1, 28, 28)))
grads = torch.ones(64, 10)
output.backward(grads)
assert(len(wandb_init_run.history.row) == 16)
assert(
wandb_init_run.history.row['parameters/fc2.bias'].histogram[0] > 0)
assert(
wandb_init_run.history.row['gradients/fc2.bias'].histogram[0] > 0)
wandb.log({"a": 2})
assert(len(wandb_init_run.history.rows) == 3)
@pytest.mark.skipif(sys.version_info < (3, 6), reason="Timeouts in older python versions")
def test_all_logging_freq(wandb_init_run):
net = ConvNet()
log_freq = 50
wandb.watch(net, log="all", log_freq=log_freq)
for i in range(110):
output = net(dummy_torch_tensor((64, 1, 28, 28)))
grads = torch.ones(64, 10)
output.backward(grads)
if (i + 1) % log_freq == 0:
assert(len(wandb_init_run.history.row) == 16)
assert(
wandb_init_run.history.row['parameters/fc2.bias'].histogram[0] > 0)
assert(
wandb_init_run.history.row['gradients/fc2.bias'].histogram[0] > 0)
else:
assert(len(wandb_init_run.history.row) == 0)
wandb.log({"a": 2})
assert(len(wandb_init_run.history.rows) == 110)
# These were timing out in old python
@pytest.mark.skipif(sys.version_info < (3, 6), reason="Timeouts in older python versions")
def test_parameter_logging(wandb_init_run):
net = ConvNet()
wandb.watch(net, log="parameters", log_freq=1)
for i in range(3):
output = net(dummy_torch_tensor((64, 1, 28, 28)))
grads = torch.ones(64, 10)
output.backward(grads)
assert(len(wandb_init_run.history.row) == 8)
assert(
wandb_init_run.history.row['parameters/fc2.bias'].histogram[0] > 0)
wandb.log({"a": 2})
assert wandb_init_run.summary["graph_0"]
file_summary = json.loads(
open(os.path.join(wandb_init_run.dir, "wandb-summary.json")).read())
assert file_summary["graph_0"]
assert(len(wandb_init_run.history.rows) == 3)
@pytest.mark.skipif(sys.version_info < (3, 6), reason="Timeouts in older python versions")
def test_parameter_logging_freq(wandb_init_run):
net = ConvNet()
log_freq = 20
wandb.hook_torch(net, log="parameters", log_freq=log_freq)
for i in range(50):
#TO debug timeouts
print("i: %i, time: %s" % (i, time.time()))
output = net(dummy_torch_tensor((64, 1, 28, 28)))
grads = torch.ones(64, 10)
output.backward(grads)
if (i + 1) % log_freq == 0:
assert(len(wandb_init_run.history.row) == 8)
assert(
wandb_init_run.history.row['parameters/fc2.bias'].histogram[0] > 0)
else:
assert(len(wandb_init_run.history.row) == 0)
wandb.log({"a": 2})
assert(len(wandb_init_run.history.rows) == 50)
@pytest.mark.skipif(sys.version_info == (3, 6), reason="Timeouts in 3.6 for some reason...")
def test_simple_net():
net = ConvNet()
graph = wandb.wandb_torch.TorchGraph.hook_torch(net)
output = net.forward(dummy_torch_tensor((64, 1, 28, 28)))
grads = torch.ones(64, 10)
output.backward(grads)
graph = graph.to_json()
assert len(graph["nodes"]) == 5
assert graph["nodes"][0]['class_name'] == "Conv2d(1, 10, kernel_size=(5, 5), stride=(1, 1))"
assert graph["nodes"][0]['name'] == "conv1"
def test_sequence_net():
net = Sequence()
graph = wandb.wandb_torch.TorchGraph.hook_torch(net)
output = net.forward(dummy_torch_tensor(
(97, 100)))
output.backward(torch.zeros((97, 100)))
graph = graph.to_json()
assert len(graph["nodes"]) == 3
assert len(graph["nodes"][0]['parameters']) == 4
assert graph["nodes"][0]['class_name'] == "LSTMCell(1, 51)"
assert graph["nodes"][0]['name'] == "lstm1"
def test_multi_net(wandb_init_run):
net = ConvNet()
graphs = wandb.watch((net, net))
output = net.forward(dummy_torch_tensor((64, 1, 28, 28)))
grads = torch.ones(64, 10)
output.backward(grads)
graph1 = graphs[0].to_json()
graph2 = graphs[1].to_json()
assert len(graph1["nodes"]) == 5
assert len(graph2["nodes"]) == 5
def test_alex_net():
alex = models.AlexNet()
graph = wandb.wandb_torch.TorchGraph.hook_torch(alex)
output = alex.forward(dummy_torch_tensor((2, 3, 224, 224)))
grads = torch.ones(2, 1000)
output.backward(grads)
graph = graph.to_json()
# This was failing in CI with 21 nodes?!?
print(graph["nodes"])
assert len(graph["nodes"]) >= 20
assert graph["nodes"][0]['class_name'] == "Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))"
assert graph["nodes"][0]['name'] == "features.0"
def test_lstm(wandb_init_run):
if parse_version(torch.__version__) < parse_version('0.4'):
return
net = LSTMModel(2, 2)
graph = wandb.wandb_torch.TorchGraph.hook_torch(net)
hidden = net.init_hidden()
input_data = torch.ones((100)).type(torch.LongTensor)
output = net.forward(input_data, hidden)
grads = torch.ones(2, 1000)
graph = graph.to_json()
assert len(graph["nodes"]) == 3
assert graph["nodes"][2]['output_shape'] == [[1, 2]]
def test_resnet18():
resnet = models.resnet18()
graph = wandb.wandb_torch.TorchGraph.hook_torch(resnet)
output = resnet.forward(dummy_torch_tensor((2, 3, 224, 224)))
grads = torch.ones(2, 1000)
output.backward(grads)
graph = graph.to_json()
assert graph["nodes"][0]['class_name'] == "Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)"
def test_subnet():
subnet = SubNet("boxes")
graph = wandb.wandb_torch.TorchGraph.hook_torch(subnet)
output = subnet.forward(dummy_torch_tensor((256, 256, 3, 3)))
grads = torch.ones(256, 81, 4)
output.backward(grads)
graph = graph.to_json()
assert graph["nodes"][0]['class_name'] == "Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))"
def test_false_requires_grad(wandb_init_run):
"""When we set requires_grad to False, wandb must not
add a hook to the variable"""
net = ConvNet()
net.fc1.weight.requires_grad = False
wandb.watch(net, log_freq=1)
output = net(dummy_torch_tensor((64, 1, 28, 28)))
grads = torch.ones(64, 10)
output.backward(grads)
# 7 gradients are logged because fc1.weight is fixed
assert(len(wandb_init_run.history.row) == 7)
def test_nested_shape():
shape = wandb.wandb_torch.nested_shape([2,4,5])
assert shape == [[],[],[]]
shape = wandb.wandb_torch.nested_shape([dummy_torch_tensor((2,3)),dummy_torch_tensor((4,5))])
assert shape == [[2,3],[4,5]]
# create recursive lists of tensors (t3 includes itself)
t1 = dummy_torch_tensor((2,3))
t2 = dummy_torch_tensor((4,5))
t3 = [t1, t2]
t3.append(t3)
t3.append(t2)
shape = wandb.wandb_torch.nested_shape([t1, t2, t3])
assert shape == [[2, 3], [4, 5], [[2, 3], [4, 5], 0, [4, 5]]]
| 34.797945
| 124
| 0.607765
|
49d34e437b492dbe7d5892209a69062c98135c27
| 584
|
py
|
Python
|
loadMat.py
|
xvlab/pupil-extraction
|
38e6f3c93ea073dac8a74e41c0f230552a9a927b
|
[
"MIT"
] | null | null | null |
loadMat.py
|
xvlab/pupil-extraction
|
38e6f3c93ea073dac8a74e41c0f230552a9a927b
|
[
"MIT"
] | null | null | null |
loadMat.py
|
xvlab/pupil-extraction
|
38e6f3c93ea073dac8a74e41c0f230552a9a927b
|
[
"MIT"
] | null | null | null |
import scipy.io
import numpy as np
path = "Thy1-GCaMP6s-M5-K-airpuff-0707"
data = scipy.io.loadmat('\\\\192.168.3.146\\public\\临时文件\\xpy\\' + path + '\\info.mat') # 读取mat文件
strengthData = scipy.io.loadmat('\\\\192.168.3.146\\public\\临时文件\\xpy\\' + path + '\\strength.mat')
print(strengthData.keys()) # 查看mat文件中的所有变量
print(data.keys()) # 查看mat文件中的所有变量
# print(float(data['sync1'][0][0]))
# print(data['brainState_01'][int(682 / 5)][0])
print(strengthData['puff_list'][0])
# offset = 682 # 在该帧之后开始记录FOV,也即sync1的第一个数据对应682帧
strengthSeries = np.array(strengthData['puff_list'][0])
| 36.5
| 99
| 0.686644
|
5d0416a6d2705942f55bc7540b68c517909b75b8
| 12,425
|
py
|
Python
|
src/m2ee/munin.py
|
pommi/m2ee-tools
|
20f1cd2ca4cf2bf789d0f47d87d873299f8dff31
|
[
"BSD-3-Clause"
] | null | null | null |
src/m2ee/munin.py
|
pommi/m2ee-tools
|
20f1cd2ca4cf2bf789d0f47d87d873299f8dff31
|
[
"BSD-3-Clause"
] | null | null | null |
src/m2ee/munin.py
|
pommi/m2ee-tools
|
20f1cd2ca4cf2bf789d0f47d87d873299f8dff31
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Copyright (c) 2009-2013, Mendix bv
# All Rights Reserved.
#
# http://www.mendix.com/
#
import pwd
import os
import string
from m2ee.log import logger
# Use json if available. If not (python 2.5) we need to import the simplejson
# module instead, which has to be available.
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError, ie:
logger.critical("Failed to import json as well as simplejson. If "
"using python 2.5, you need to provide the simplejson "
"module in your python library path.")
raise
config_funcs = {}
values_funcs = {}
default_stats = {
"languages": ["en_US"],
"entities": 0,
"threadpool": {
"threads_priority": 0,
"max_threads": 0,
"min_threads": 0,
"max_idle_time_s": 0,
"max_queued": -0,
"threads": 0,
"idle_threads": 0,
"max_stop_time_s": 0
},
"memory": {
"init_heap": 0,
"code": 0,
"used_heap": 0,
"survivor": 0,
"max_nonheap": 0,
"committed_heap": 0,
"tenured": 0,
"permanent": 0,
"used_nonheap": 0,
"eden": 0,
"init_nonheap": 0,
"committed_nonheap": 0,
"max_heap": 0
},
"sessions": {
"named_users": 0,
"anonymous_sessions": 0,
"named_user_sessions": 0,
"user_sessions": {}
},
"requests": {
"": 0,
"debugger/": 0,
"ws/": 0,
"xas/": 0,
"ws-doc/": 0,
"file": 0
},
"cache": {
"total_count": 0,
"disk_count": 0,
"memory_count": 0
},
"jetty": {
"max_idle_time_s": 0,
"current_connections": 0,
"max_connections": 0,
"max_idle_time_s_low_resources": 0
},
"connectionbus": {
"insert": 0,
"transaction": 0,
"update": 0,
"select": 0,
"delete": 0
}
}
def print_all(client, config, options, name, print_config=False):
if name == "":
name = pwd.getpwuid(os.getuid())[0]
if print_config:
funcs = config_funcs
else:
funcs = values_funcs
if options is None:
options = {}
# place to store last known good statistics result to be used for munin
# config when the app is down or b0rked
config_cache = options.get('config_cache', os.path.join(
config.get_default_dotm2ee_directory(), 'munin-cache.json'))
graph_total_named_users = options.get('graph_total_named_users', True)
# TODO: even better error/exception handling
stats = {}
try:
logger.debug("trying to fetch runtime/server statistics")
m2eeresponse = client.runtime_statistics()
if not m2eeresponse.has_error():
stats.update(m2eeresponse.get_feedback())
m2eeresponse = client.server_statistics()
if not m2eeresponse.has_error():
stats.update(m2eeresponse.get_feedback())
if type(stats['requests']) == list:
# convert back to normal, whraagh
bork = {}
for x in stats['requests']:
bork[x['name']] = x['value']
stats['requests'] = bork
# write last-known-good stats to cache
try:
file(config_cache, 'w+').write(json.dumps(stats))
except Exception, e:
logger.error("Error writing munin config cache to %s: %s",
(config_cache, e))
except Exception, e:
# assume something bad happened, like
# socket.error: [Errno 111] Connection refused
logger.error("Error fetching runtime/server statstics: %s", e)
if print_config:
logger.debug("Loading munin cache from %s" % config_cache)
try:
fd = open(config_cache)
stats = json.loads(fd.read())
fd.close()
except IOError, e:
logger.error("Error reading munin cache file %s: %s" %
(config_cache, e))
stats = default_stats
except ValueError, e:
logger.error("Error parsing munin cache file %s: %s" %
(config_cache, e))
return
else:
return
# requests
print("multigraph mxruntime_requests_%s" % name)
funcs['requests'](name, stats)
print
# connectionbus
if "connectionbus" in stats:
print("multigraph mxruntime_connectionbus_%s" % name)
funcs['connectionbus'](name, stats)
print
# concurrent user sessions
print("multigraph mxruntime_sessions_%s" % name)
if type(stats['sessions']) != dict:
funcs['sessions_pre254'](name, stats)
else:
funcs['sessions'](name, stats, graph_total_named_users)
print
# jvmheap
print("multigraph mxruntime_jvmheap_%s" % name)
funcs['jvmheap'](name, stats)
print
# threadpool
if "threadpool" in stats:
print("multigraph m2eeserver_threadpool_%s" % name)
funcs['threadpool'](name, stats)
print
def print_requests_config(name, stats):
print("""graph_args --base 1000 -l 0
graph_vlabel Requests per second
graph_title %s - MxRuntime Requests
graph_category Mendix
graph_info This graph shows the amount of requests this MxRuntime handles""" %
name)
for sub in stats['requests'].iterkeys():
substrip = '_' + string.strip(sub, '/').replace('-', '_')
if sub != '':
subname = sub
else:
subname = '/'
print("""%s.label %s
%s.draw LINE2
%s.info amount of requests this MxRuntime handles on %s
%s.type DERIVE
%s.min 0""" % (substrip, subname, substrip, substrip, subname, substrip,
substrip))
def print_requests_values(name, stats):
for sub, count in stats['requests'].iteritems():
substrip = '_' + string.strip(sub, '/').replace('-', '_')
print("%s.value %s" % (substrip, count))
config_funcs['requests'] = print_requests_config
values_funcs['requests'] = print_requests_values
def print_connectionbus_config(name, stats):
print("""graph_args --base 1000 -l 0
graph_vlabel Statements per second
graph_title %s - Database Queries
graph_category Mendix
graph_info This graph shows the amount of executed transactions and queries"""
% name)
for s in stats['connectionbus'].iterkeys():
print("""%s.label %ss
%s.draw LINE2
%s.info amount of %ss
%s.type DERIVE
%s.min 0""" % (s, s, s, s, s, s, s))
def print_connectionbus_values(name, stats):
for s, count in stats['connectionbus'].iteritems():
print("%s.value %s" % (s, count))
config_funcs['connectionbus'] = print_connectionbus_config
values_funcs['connectionbus'] = print_connectionbus_values
def print_sessions_pre254_config(name, stats):
"""
concurrent user sessions for mxruntime < 2.5.4
named_user_sessions counts names as well as anonymous sessions
!! you stil need to rename the rrd files in /var/lib/munin/ !!
"""
print("""graph_args --base 1000 -l 0
graph_vlabel Concurrent user sessions
graph_title %s - MxRuntime Users
graph_category Mendix
graph_info This graph shows the amount of concurrent user sessions
named_user_sessions.label concurrent user sessions
named_user_sessions.draw LINE2
named_user_sessions.info amount of concurrent user sessions""" % name)
def print_sessions_pre254_values(options, stats):
print("named_user_sessions.value %s" % stats['sessions'])
config_funcs['sessions_pre254'] = print_sessions_pre254_config
values_funcs['sessions_pre254'] = print_sessions_pre254_values
def print_sessions_config(name, stats, graph_total_named_users):
print("""graph_args --base 1000 -l 0
graph_vlabel Concurrent user sessions
graph_title %s - MxRuntime Users
graph_category Mendix
graph_info This graph shows the amount of user accounts and sessions""" % name)
if graph_total_named_users:
print("""named_users.label named users
named_users.draw LINE1
named_users.info total amount of named users in the application""")
print("""named_user_sessions.label concurrent named user sessions
named_user_sessions.draw LINE2
named_user_sessions.info amount of concurrent named user sessions
anonymous_sessions.label concurrent anonymous user sessions
anonymous_sessions.draw LINE2
anonymous_sessions.info amount of concurrent anonymous user sessions""")
def print_sessions_values(name, stats, graph_total_named_users):
if graph_total_named_users:
print("named_users.value %s" % stats['sessions']['named_users'])
print("named_user_sessions.value %s" %
stats['sessions']['named_user_sessions'])
print("anonymous_sessions.value %s" %
stats['sessions']['anonymous_sessions'])
config_funcs['sessions'] = print_sessions_config
values_funcs['sessions'] = print_sessions_values
def print_jvmheap_config(name, stats):
print("""graph_args --base 1024 -l 0
graph_vlabel Bytes
graph_title %s - JVM Memory Usage
graph_category Mendix
graph_info This graph shows memory pool information on the Java JVM
permanent.label permanent generation
permanent.draw AREA
permanent.info Non-heap memory used to store bytecode versions of classes
code.label code cache
code.draw STACK
code.info Non-heap memory used for compilation and storage of native code
tenured.label tenured generation
tenured.draw STACK
tenured.info Old generation of the heap that holds long living objects
survivor.label survivor space
survivor.draw STACK
survivor.info Survivor Space of the Young Generation
eden.label eden space
eden.draw STACK
eden.info Objects are created in Eden
free.label unused
free.draw STACK
free.info Unused memory allocated for use by this JVM
committed.label allocated memory
committed.draw LINE2
committed.info Allocated size of memory for all memory pools
max.label max memory
max.draw LINE1
max.info Total maximum size of memory that could be allocated for this JVM""" %
name)
def print_jvmheap_values(name, stats):
memory = stats['memory']
used = 0
for k in ['permanent', 'code', 'tenured', 'survivor', 'eden']:
used = used + memory[k]
print('%s.value %s' % (k, memory[k]))
committed = 0
free = 0
maxx = 0
committed = memory['committed_nonheap'] + memory['committed_heap']
free = committed - used
maxx = memory['max_nonheap'] + memory['max_heap']
print("free.value %s" % free)
print("committed.value %s" % committed)
print("max.value %s" % maxx)
config_funcs['jvmheap'] = print_jvmheap_config
values_funcs['jvmheap'] = print_jvmheap_values
def print_threadpool_config(name, stats):
print("""graph_args --base 1000 -l 0
graph_vlabel Jetty Threadpool
graph_title %s - Jetty Threadpool
graph_category Mendix
graph_info This graph shows threadpool usage information on Jetty
min_threads.label min threads
min_threads.draw LINE1
min_threads.info Minimum number of threads
max_threads.label max threads
max_threads.draw LINE1
max_threads.info Maximum number of threads
queue_size.label queue size
queue_size.draw STACK
queue_size.info Job queue size
threadpool_size.label threadpool size
threadpool_size.draw LINE2
threadpool_size.info Current threadpool size
active_threads.label active threads
active_threads.draw LINE2
active_threads.info Active thread count
max_threads_and_queue.label max threads and queue
max_threads_and_queue.draw LINE1
max_threads_and_queue.info Maximum number of threads and queue size""" % name)
def print_threadpool_values(name, stats):
min_threads = stats['threadpool']['min_threads']
max_threads = stats['threadpool']['max_threads']
#queue_size = stats['threadpool']['??']
threadpool_size = stats['threadpool']['threads']
idle_threads = stats['threadpool']['idle_threads']
max_queued = stats['threadpool']['max_queued']
active_threads = threadpool_size - idle_threads
print("min_threads.value %s" % min_threads)
print("max_threads.value %s" % max_threads)
#print("queue_size.value %s" % queue_size)
print("threadpool_size.value %s" % threadpool_size)
print("active_threads.value %s" % active_threads)
if max_queued != -1:
print("max_threads_and_queue %s" % max_threads + max_queued)
config_funcs['threadpool'] = print_threadpool_config
values_funcs['threadpool'] = print_threadpool_values
| 31.455696
| 79
| 0.666962
|
4ffe23f99deadf3fb9a441741a1dc3656e5b7f04
| 1,688
|
py
|
Python
|
app/users/serializers.py
|
kmnkit/recipe-app-API
|
50189cf19e6de40b7323055b73d427339a8f06a4
|
[
"MIT"
] | null | null | null |
app/users/serializers.py
|
kmnkit/recipe-app-API
|
50189cf19e6de40b7323055b73d427339a8f06a4
|
[
"MIT"
] | null | null | null |
app/users/serializers.py
|
kmnkit/recipe-app-API
|
50189cf19e6de40b7323055b73d427339a8f06a4
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers as sz
class UserSerializer(sz.ModelSerializer):
"""Serializer for the users object"""
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
"""Create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
"""Update a user, setting the password correctly and return it"""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(sz.Serializer):
"""Serializer for the user authentication object"""
email = sz.CharField()
password = sz.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
"""Validate and authenticate the user"""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise sz.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs
| 31.259259
| 74
| 0.637441
|
85a2ac784cbbb754006c78b62d1deea41e4130e1
| 593
|
py
|
Python
|
share/extended-cpt/analysis/trajectories/plot_trajectories.py
|
ucla-pbpl/pbpl-compton
|
a5afcdffc778f61a4726d7c5a231af2bca466900
|
[
"MIT"
] | 2
|
2019-09-24T23:52:58.000Z
|
2020-06-03T20:59:33.000Z
|
share/extended-cpt/analysis/trajectories/plot_trajectories.py
|
ucla-pbpl/pbpl-compton
|
a5afcdffc778f61a4726d7c5a231af2bca466900
|
[
"MIT"
] | null | null | null |
share/extended-cpt/analysis/trajectories/plot_trajectories.py
|
ucla-pbpl/pbpl-compton
|
a5afcdffc778f61a4726d7c5a231af2bca466900
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
import argparse
import numpy as np
from pbpl.common.units import *
import h5py
from scipy.linalg import norm
from collections import namedtuple
import ezdxf
def dump_dxf():
A = np.loadtxt(
'trajectories-12600.txt', usecols=(0,2,10), dtype=np.float, skiprows=7)
dwg = ezdxf.new('R2000')
msp = dwg.modelspace()
for i in range(int(A[:,2].max())+1):
mask = A[:,2] == i
x = A[mask,0]*meter
y = A[mask,1]*meter
msp.add_lwpolyline(np.array((x/mm, y/mm)).T)
dwg.saveas('trajectories-12600.dxf')
dump_dxf()
| 24.708333
| 79
| 0.639123
|
307adb503d9917cab1433dc6aebd1db2667c26cb
| 5,001
|
py
|
Python
|
nipype/interfaces/niftyseg/tests/test_maths.py
|
sebastientourbier/nipype
|
99c5904176481520c5bf42a501aae1a12184e672
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/niftyseg/tests/test_maths.py
|
sebastientourbier/nipype
|
99c5904176481520c5bf42a501aae1a12184e672
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/niftyseg/tests/test_maths.py
|
sebastientourbier/nipype
|
99c5904176481520c5bf42a501aae1a12184e672
|
[
"Apache-2.0"
] | null | null | null |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import pytest
from nipype.interfaces.niftyreg import no_nifty_package, get_custom_path
from nipype.interfaces.niftyseg import (UnaryMaths, BinaryMaths,
BinaryMathsInteger, TupleMaths,
Merge)
from nipype.testing import example_data
@pytest.mark.skipif(no_nifty_package(cmd='seg_maths'),
reason="niftyseg is not installed")
def test_unary_maths():
# Create a node object
unarym = UnaryMaths()
# Check if the command is properly defined
cmd = get_custom_path('seg_maths', env_dir='NIFTYSEGDIR')
assert unarym.cmd == cmd
# test raising error with mandatory args absent
with pytest.raises(ValueError):
unarym.run()
# Assign some input data
in_file = example_data('im1.nii')
unarym.inputs.in_file = in_file
unarym.inputs.operation = 'otsu'
unarym.inputs.output_datatype = 'float'
expected_cmd = '{cmd} {in_file} -otsu -odt float {out_file}'.format(
cmd=cmd,
in_file=in_file,
out_file='im1_otsu.nii')
assert unarym.cmdline == expected_cmd
@pytest.mark.skipif(no_nifty_package(cmd='seg_maths'),
reason="niftyseg is not installed")
def test_binary_maths():
# Create a node object
binarym = BinaryMaths()
# Check if the command is properly defined
cmd = get_custom_path('seg_maths', env_dir='NIFTYSEGDIR')
assert binarym.cmd == cmd
# test raising error with mandatory args absent
with pytest.raises(ValueError):
binarym.run()
# Assign some input data
in_file = example_data('im1.nii')
binarym.inputs.in_file = in_file
binarym.inputs.operand_value = 2.0
binarym.inputs.operation = 'sub'
binarym.inputs.output_datatype = 'float'
cmd_tmp = '{cmd} {in_file} -sub 2.00000000 -odt float {out_file}'
expected_cmd = cmd_tmp.format(
cmd=cmd,
in_file=in_file,
out_file='im1_sub.nii')
assert binarym.cmdline == expected_cmd
@pytest.mark.skipif(no_nifty_package(cmd='seg_maths'),
reason="niftyseg is not installed")
def test_int_binary_maths():
# Create a node object
ibinarym = BinaryMathsInteger()
# Check if the command is properly defined
cmd = get_custom_path('seg_maths', env_dir='NIFTYSEGDIR')
assert ibinarym.cmd == cmd
# test raising error with mandatory args absent
with pytest.raises(ValueError):
ibinarym.run()
# Assign some input data
in_file = example_data('im1.nii')
ibinarym.inputs.in_file = in_file
ibinarym.inputs.operand_value = 2
ibinarym.inputs.operation = 'dil'
ibinarym.inputs.output_datatype = 'float'
expected_cmd = '{cmd} {in_file} -dil 2 -odt float {out_file}'.format(
cmd=cmd,
in_file=in_file,
out_file='im1_dil.nii')
assert ibinarym.cmdline == expected_cmd
@pytest.mark.skipif(no_nifty_package(cmd='seg_maths'),
reason="niftyseg is not installed")
def test_tuple_maths():
# Create a node object
tuplem = TupleMaths()
# Check if the command is properly defined
cmd = get_custom_path('seg_maths', env_dir='NIFTYSEGDIR')
assert tuplem.cmd == cmd
# test raising error with mandatory args absent
with pytest.raises(ValueError):
tuplem.run()
# Assign some input data
in_file = example_data('im1.nii')
op_file = example_data('im2.nii')
tuplem.inputs.in_file = in_file
tuplem.inputs.operation = 'lncc'
tuplem.inputs.operand_file1 = op_file
tuplem.inputs.operand_value2 = 2.0
tuplem.inputs.output_datatype = 'float'
cmd_tmp = '{cmd} {in_file} -lncc {op} 2.00000000 -odt float {out_file}'
expected_cmd = cmd_tmp.format(
cmd=cmd,
in_file=in_file,
op=op_file,
out_file='im1_lncc.nii')
assert tuplem.cmdline == expected_cmd
@pytest.mark.skipif(no_nifty_package(cmd='seg_maths'),
reason="niftyseg is not installed")
def test_merge():
# Create a node object
merge = Merge()
# Check if the command is properly defined
cmd = get_custom_path('seg_maths', env_dir='NIFTYSEGDIR')
assert merge.cmd == cmd
# test raising error with mandatory args absent
with pytest.raises(ValueError):
merge.run()
# Assign some input data
in_file = example_data('im1.nii')
file1 = example_data('im2.nii')
file2 = example_data('im3.nii')
merge.inputs.in_file = in_file
merge.inputs.merge_files = [file1, file2]
merge.inputs.dimension = 2
merge.inputs.output_datatype = 'float'
cmd_tmp = '{cmd} {in_file} -merge 2 2 {f1} {f2} -odt float {out_file}'
expected_cmd = cmd_tmp.format(
cmd=cmd,
in_file=in_file,
f1=file1,
f2=file2,
out_file='im1_merged.nii')
assert merge.cmdline == expected_cmd
| 29.417647
| 75
| 0.658868
|
5728e9e08bb9b0c6002a5b6bb8415e78ef4fdd11
| 4,552
|
py
|
Python
|
detect.py
|
Promech2020/COVID_19_SDC_YOLOV4
|
7ba3d110f72f315d8ee2ec68db773d3d7d399774
|
[
"MIT"
] | null | null | null |
detect.py
|
Promech2020/COVID_19_SDC_YOLOV4
|
7ba3d110f72f315d8ee2ec68db773d3d7d399774
|
[
"MIT"
] | null | null | null |
detect.py
|
Promech2020/COVID_19_SDC_YOLOV4
|
7ba3d110f72f315d8ee2ec68db773d3d7d399774
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from absl import app, flags, logging
from absl.flags import FLAGS
import core.utils as utils
from core.yolov4 import filter_boxes
from tensorflow.python.saved_model import tag_constants
from PIL import Image
import cv2
import numpy as np
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
from helping_functions import get_centroids, get_human_box_detection, get_points_from_box
flags.DEFINE_string('framework', 'tf', '(tf, tflite, trt')
flags.DEFINE_string('weights', './checkpoints/yolov4-416',
'path to weights file')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_boolean('tiny', False, 'yolo or yolo-tiny')
flags.DEFINE_string('model', 'yolov4', 'yolov3 or yolov4')
flags.DEFINE_string('image', './data/kite.jpg', 'path to input image')
flags.DEFINE_string('output', 'result.png', 'path to output image')
flags.DEFINE_float('iou', 0.45, 'iou threshold')
flags.DEFINE_float('score', 0.25, 'score threshold')
def main(_argv):
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
input_size = FLAGS.size
image_path = FLAGS.image
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
# image_data = utils.image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = cv2.resize(original_image, (input_size, input_size))
image_data = image_data / 255.
# image_data = image_data[np.newaxis, ...].astype(np.float32)
images_data = []
for i in range(1):
images_data.append(image_data)
images_data = np.asarray(images_data).astype(np.float32)
if FLAGS.framework == 'tflite':
interpreter = tf.lite.Interpreter(model_path=FLAGS.weights)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print(input_details)
print(output_details)
interpreter.set_tensor(input_details[0]['index'], images_data)
interpreter.invoke()
pred = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
if FLAGS.model == 'yolov3' and FLAGS.tiny == True:
boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25, input_shape=tf.constant([input_size, input_size]))
else:
boxes, pred_conf = filter_boxes(pred[0], pred[1], score_threshold=0.25, input_shape=tf.constant([input_size, input_size]))
else:
saved_model_loaded = tf.saved_model.load(FLAGS.weights, tags=[tag_constants.SERVING])
infer = saved_model_loaded.signatures['serving_default']
batch_data = tf.constant(images_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=FLAGS.iou,
score_threshold=FLAGS.score
)
final_boxes = boxes.numpy()
final_scores = scores.numpy()
final_classes = classes.numpy()
array_boxes_detected = get_human_box_detection(final_boxes,final_scores[0].tolist(),final_classes[0].tolist(),original_image.shape[0],original_image.shape[1])
print(array_boxes_detected)
#Defining red color rgb value
COLOR_RED = (0, 0, 255)
for i,items in enumerate(array_boxes_detected):
first_point = array_boxes_detected[i][0]
second_point = array_boxes_detected[i][1]
third_point = array_boxes_detected[i][2]
fourth_point = array_boxes_detected[i][3]
cv2.rectangle(original_image,(second_point,first_point),(fourth_point,third_point),COLOR_RED,2)
image = cv2.cvtColor(np.array(original_image), cv2.COLOR_BGR2RGB)
cv2.imshow("final", image)
cv2.waitKey(0)
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
| 43.352381
| 162
| 0.707162
|
c08effc69b8386306a538fb0d8e135253d8c6f6b
| 30,081
|
py
|
Python
|
character_ui.py
|
nextr3d/Character-UI
|
ae15dcd2c83d912f8aa465ec82ea89b8c7eaf43f
|
[
"Apache-2.0"
] | 26
|
2020-12-02T09:16:53.000Z
|
2022-02-13T13:05:59.000Z
|
character_ui.py
|
nextr3d/Character-UI
|
ae15dcd2c83d912f8aa465ec82ea89b8c7eaf43f
|
[
"Apache-2.0"
] | 15
|
2021-04-14T21:10:42.000Z
|
2022-02-21T22:33:03.000Z
|
character_ui.py
|
nextr3d/Character-UI
|
ae15dcd2c83d912f8aa465ec82ea89b8c7eaf43f
|
[
"Apache-2.0"
] | 4
|
2021-04-13T07:17:50.000Z
|
2022-01-13T15:15:22.000Z
|
import bpy, time, re
from bpy.types import Operator, Panel, PropertyGroup
from bpy.utils import register_class, unregister_class
from bpy.props import EnumProperty, BoolProperty, StringProperty, IntProperty, FloatVectorProperty
"""
available variables
character_id
character_id_key
rig_layers_key
links_key
custom_label
"""
#script variables
custom_prefix = "CharacterUI_"
attributes_key = "%satt_%s"%(custom_prefix, character_id)
bl_info = {
"name": "Character UI",
"description": "Script rendering UI for your character",
"author": "Nextr3D",
"version": (5, 0, 1),
"blender": (3, 0, 0)
}
class CharacterUI(PropertyGroup):
@staticmethod
def initialize():
ch = CharacterUIUtils.get_character()
key = "%s%s"%(custom_prefix, character_id)
if ch:
if key not in ch:
ch[key] = {}
if "body_object" in ch.data and ch.data["body_object"]:
CharacterUI.remove_body_modifiers_drivers(ch)
CharacterUI.remove_body_shape_keys_drivers(ch)
if "hair_collection" in ch.data and ch.data["hair_collection"]:
CharacterUI.build_hair(ch, key)
if "outfits_collection" in ch.data and ch.data["outfits_collection"]:
CharacterUI.build_outfits(ch, key)
@classmethod
def build_outfits(self, ch, key):
"Builds outfit selector for the UI"
data = getattr(ch ,key)
outfits = ch.data["outfits_collection"].children.keys()
options = self.create_enum_options(outfits, "Show outfit: ")
default = 0
if "outfits_enum" in data:
default = data["outfits_enum"]
try:
self.ui_setup_enum("outfits_enum", CharacterUI.update_hair_by_outfit, "Outfits", "Changes outfits", options, default)
except:
pass
self.ui_build_outfit_buttons(ch, key)
@classmethod
def remove_body_modifiers_drivers(self, ch):
"removes drivers from modifiers"
if "character_ui_masks" in ch.data:
for m in ch.data["character_ui_masks"]:
if m["modifier"] in ch.data["body_object"].modifiers:
ch.data["body_object"].modifiers[m["modifier"]].driver_remove("show_viewport")
ch.data["body_object"].modifiers[m["modifier"]].driver_remove("show_render")
@classmethod
def remove_body_shape_keys_drivers(self, ch):
"removes drivers from shape keys"
if "character_ui_shape_keys" in ch.data:
for s in ch.data["character_ui_shape_keys"]:
if s["shape_key"] in ch.data["body_object"].data.shape_keys.key_blocks:
ch.data["body_object"].data.shape_keys.key_blocks[s["shape_key"]].driver_remove("value")
@classmethod
def ui_build_outfit_buttons(self, ch, key):
"Builds individual button for outfit pieces, their locks and creates drivers"
data = getattr(ch ,key)
index = 0
for collection in ch.data["outfits_collection"].children:
objects = collection.objects
for o in objects:
default = False
default_lock = False
name = o.name_full.replace(" ", "_")+"_outfit_toggle"
if name in data and name+"_lock" in data:
default = data[name]
default_lock = data[name+"_lock"]
self.ui_setup_toggle(name, None, o.name_full, "Toggles outfit piece on and off", default)
self.ui_setup_toggle(name+"_lock", None, "", "Locks the outfit piece to be visible even when changing outfits", default_lock)
variables = [{"name": "chui_outfit", "path": "%s.outfits_enum"%(key)},{"name": "chui_object", "path": "%s.%s"%(key,name)}]
lock_expression = "chui_lock==1"
expression = "not (chui_object == 1 and (chui_outfit ==%i or chui_lock==1))"%(index)
is_top_child = False
if o.parent:
if not o.users_collection[0] == o.parent.users_collection[0]: #parent is in different collection so it has to
is_top_child = True
else:
is_top_child = True
if is_top_child:
variables.append({"name": "chui_lock", "path": "%s.%s_lock"%(key,name)})
else:
expression = "not (chui_object == 1 and chui_parent == 0)"
variables.append({"name": "chui_parent", "path": "hide_viewport", "driver_id": o.parent})
CharacterUIUtils.create_driver(ch, o, 'hide_viewport', expression, variables)
CharacterUIUtils.create_driver(ch, o, 'hide_render', expression, variables)
if "character_ui_masks" in ch.data and "body_object" in ch.data:
if ch.data["body_object"]:
body = ch.data["body_object"]
for mask in ch.data["character_ui_masks"]:
if mask["driver_id"] == o and mask["modifier"] in body.modifiers:
CharacterUIUtils.create_driver(o, body.modifiers[mask["modifier"]], "show_viewport", "chui_object==0", [{"name": "chui_object", "path":"hide_viewport" }])
CharacterUIUtils.create_driver(o, body.modifiers[mask["modifier"]], "show_render", "chui_object==0", [{"name": "chui_object", "path":"hide_render" }])
if "character_ui_shape_keys" in ch.data and "body_object" in ch.data:
if ch.data["body_object"]:
body = ch.data["body_object"]
for shape_key in ch.data["character_ui_shape_keys"]:
if shape_key["driver_id"] == o and shape_key["shape_key"] in body.data.shape_keys.key_blocks:
CharacterUIUtils.create_driver(o, body.data.shape_keys.key_blocks[shape_key["shape_key"]], "value", "chui_object==0", [{"name": "chui_object", "path":"hide_render" }] )
index += 1
@classmethod
def build_hair(self, ch, key):
data = getattr(ch ,key)
default_value = 0
if 'hair_lock' in data:
default_value = data['hair_lock']
self.ui_setup_toggle("hair_lock", None, "", "Locks hair so it's not changed by the outfit", default_value)
hair_collection = ch.data["hair_collection"]
items = [*hair_collection.children, *hair_collection.objects]
names = [o.name for o in items]
def create_hair_drivers(target, index):
CharacterUIUtils.create_driver(ch, target, 'hide_viewport', "characterui_hair!=%i"%(index), [{"name": "characterui_hair", "path": "%s.hair_enum"%(key)}])
CharacterUIUtils.create_driver(ch, target, 'hide_render', "characterui_hair!=%i"%(index), [{"name": "characterui_hair", "path": "%s.hair_enum"%(key)}])
def recursive_hair(hair_items, index = -1):
for i in enumerate(hair_items):
if hasattr(i[1], "type"):
create_hair_drivers(i[1], i[0] if index < 0 else index)
else:
recursive_hair([*i[1].children, *i[1].objects], i[0])
recursive_hair(items)
default = 0
if "hair_enum" in data:
default = data["hair_enum"]
try:
self.ui_setup_enum('hair_enum', None, "Hairstyle", "Switch between different hairdos", self.create_enum_options(names, "Enables: "), default)
except:
pass
@classmethod
def ui_setup_toggle(self, property_name, update_function, name='Name', description='Empty description', default=False):
"method for easier creation of toggles (buttons)"
props = CharacterUIUtils.get_props_from_character()
props[property_name] = default
prop = BoolProperty(
name=name,
description=description,
update=update_function,
default=default
)
setattr(self, property_name, prop)
@classmethod
def ui_setup_enum(self, property_name, update_function, name="Name", description="Empty description", items=[], default=0):
"method for easier creation of enums (selects)"
props = CharacterUIUtils.get_props_from_character()
props[property_name] = default
prop = EnumProperty(
name=name,
description=description,
items=items,
update=update_function,
default='OP'+str(default)
)
setattr(self, property_name, prop)
@staticmethod
def create_enum_options(array, description_prefix="Empty description for:"):
"method for creating options for blender UI enums"
items = []
for array_item in array:
items.append(("OP"+str(array.index(array_item)),array_item, description_prefix+" "+array_item))
return items
@staticmethod
def update_hair_by_outfit(self, context):
ch = CharacterUIUtils.get_character()
if ch:
props = CharacterUIUtils.get_props_from_character()
outfit_name = ch.data["outfits_collection"].children[props["outfits_enum"]].name
if "hair_collection" in ch.data and ch.data["hair_collection"]:
if not props["hair_lock"]:
hairstyles = [*ch.data["hair_collection"].children, *ch.data["hair_collection"].objects]
for hairstyle in enumerate(hairstyles):
if outfit_name in hairstyle[1].name:
props["hair_enum"] = hairstyle[0]
class CharacterUIUtils:
@staticmethod
def get_character():
for o in bpy.data.objects:
if str(type(o.data)) != "<class 'NoneType'>": #empties...
if character_id_key in o.data:
if o.data[character_id_key] == character_id:
return o
return False
@staticmethod
def get_props_from_character():
ch = CharacterUIUtils.get_character()
return getattr(ch, "%s%s"%(custom_prefix, character_id))
@staticmethod
def create_driver(driver_id,driver_target, driver_path, driver_expression, variables):
"TODO: same exact code is in the add-on, make it that it's only once in the whole codebase"
driver_target.driver_remove(driver_path)
driver = driver_target.driver_add(driver_path)
def setup_driver(driver, addition_path = ""):
driver.type = "SCRIPTED"
driver.expression = driver_expression
for variable in variables:
var = driver.variables.new()
var.name = variable["name"]
var.targets[0].id_type = driver_id.rna_type.name.upper()
var.targets[0].id = variable["driver_id"] if "driver_id" in variable else driver_id
var.targets[0].data_path = "%s%s"%(variable["path"], addition_path)
if type(driver) == list:
for d in enumerate(driver):
setup_driver(d[1].driver,"[%i]"%(d[0]))
else:
setup_driver(driver.driver)
@staticmethod
def safe_render(parent, data, prop, **kwargs):
if hasattr(data, prop):
parent.prop(data, prop, **kwargs)
@staticmethod
def render_outfit_piece(o, element, props, is_child = False):
"recursively render outfit piece buttons"
row = element.row(align=True)
name = o.name.replace(" ", "_")+"_outfit_toggle"
if o.data:
CharacterUIUtils.safe_render(row, props, name, toggle=True, icon="TRIA_DOWN" if (props[name] and ("settings" in o.data or len(o.children))) else ("TRIA_RIGHT" if not props[name] and ("settings" in o.data or len(o.children)) else "NONE" ))
else:
CharacterUIUtils.safe_render(row, props, name, toggle=True, icon="TRIA_DOWN" if (props[name] and (len(o.children))) else ("TRIA_RIGHT" if not props[name] and (len(o.children)) else "NONE" ))
if not is_child:
CharacterUIUtils.safe_render(row, props, name+"_lock",icon="LOCKED" if props[name+"_lock"] else "UNLOCKED")
if not o.data:
if len(o.children) and props[name]:
settings_box = element.box()
settings_box.label(text="Items", icon="MOD_CLOTH")
for child in o.children:
child_name = child.name.replace(" ", "_")+"_outfit_toggle"
if hasattr(props, child_name):
CharacterUIUtils.render_outfit_piece(child, settings_box, props, True)
return
if (len(o.children) or "settings" in o.data) and props[name]:
if len(o.children):
settings_box = element.box()
settings_box.label(text="Items", icon="MOD_CLOTH")
for child in o.children:
child_name = child.name.replace(" ", "_")+"_outfit_toggle"
if hasattr(props, child_name):
CharacterUIUtils.render_outfit_piece(child, settings_box, props, True)
@staticmethod
def render_attributes(layout, groups, panel_name):
for g in groups:
box = layout.box()
header_row = box.row(align=True)
expanded_op = header_row.operator("character_ui_script.expand_attribute_group_%s"%(character_id.lower()), emboss=False, text="",icon="TRIA_DOWN" if g["expanded"] else "TRIA_RIGHT" )
expanded_op.panel_name = panel_name
expanded_op.group_name = g["name"]
try:
header_row.label(text=g["name"].replace("_", " "), icon=g["icon"])
except:
header_row.label(text=g["name"].replace("_", " "))
if g["expanded"]:
for a in g["attributes"]:
row = box.row(align=True)
delimiter = '][' if '][' in a['path'] else '.'
offset = 1 if '][' in a['path'] else 0
prop = a['path'][a['path'].rindex(delimiter)+1:]
path = a['path'][:a['path'].rindex(delimiter)+offset]
if a['name']:
try:
row.prop(eval(path), prop, text=a['name'])
except:
print("couldn't render ", path, " prop")
else:
try:
row.prop(eval(path), prop)
except:
print("couldn't render ", path, " prop")
@staticmethod
def create_unique_ids(panels, operators):
for p in panels:
unique_panel = type(
"%s_%s"%(p.bl_idname, character_id)
,(p,)
,{'bl_idname': "%s_%s"%(p.bl_idname, character_id), 'bl_label': p.bl_label, 'bl_parent_id': "%s_%s"%(p.bl_parent_id, character_id) if hasattr(p, "bl_parent_id") else ""}
)
register_class(unique_panel)
for o in operators:
name = "%s_%s"%(o.bl_idname, character_id.lower())
unique_operator = type(name,(o,),{"bl_idname": name})
register_class(unique_operator)
@staticmethod
def render_cages(layout, cages, panel = 1):
for c in cages:
if c[1] == "OP%i"%(panel):
for m in c[0].modifiers:
if m.type == "CLOTH":
box = layout.box()
box.label(text=c[0].name)
row = box.row(align=True)
row.prop(m, "show_viewport")
row.prop(m, "show_render")
box.prop(m.point_cache, "frame_start")
box.prop(m.point_cache, "frame_end")
icon = "TRASH"
text = "Delete Bake"
if not m.point_cache.is_baked:
icon = "PHYSICS"
text = "Bake"
box.operator("character_ui.bake_%s"%(character_id.lower()), text=text, icon=icon).object_name = c[0].name
class VIEW3D_PT_characterUI(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = custom_label
@classmethod
def poll(self, context):
ch = CharacterUIUtils.get_character()
if ch:
return ch == context.object
return False
class VIEW3D_PT_outfits(VIEW3D_PT_characterUI):
bl_label = "Outfits"
bl_idname = "VIEW3D_PT_outfits"
def draw(self, context):
layout = self.layout
ch = CharacterUIUtils.get_character()
props = CharacterUIUtils.get_props_from_character()
if ch and props:
if ch.data["outfits_collection"]:
outfits = ch.data["outfits_collection"]
if len(outfits.children) > 1:
CharacterUIUtils.safe_render(layout, props, "outfits_enum")
box = layout.box()
for o in outfits.children[props['outfits_enum']].objects:
is_top_child = True #True because if no parent than it's the top child
if not o.parent == None:
is_top_child = not o.users_collection[0] == o.parent.users_collection[0] #parent is in different collection so it has to
if is_top_child:
CharacterUIUtils.render_outfit_piece(o,box, props)
locked_pieces = {}
for i, c in enumerate(outfits.children):
pieces = []
for o in c.objects:
if i != props["outfits_enum"]:
name = o.name.replace(" ", "_")+"_outfit_toggle"
if props[name+"_lock"]:
pieces.append(o)
if len(pieces):
locked_pieces[c.name] = pieces
for n,pcs in locked_pieces.items():
box.label(text=n)
for p in pcs:
CharacterUIUtils.render_outfit_piece(p, box, props)
if attributes_key in ch:
if "outfits" in ch[attributes_key]:
attributes_box = layout.box()
attributes_box.label(text="Attributes")
CharacterUIUtils.render_attributes(attributes_box, ch[attributes_key]["outfits"], "outfits")
class VIEW3D_PT_body(VIEW3D_PT_characterUI):
"Body panel"
bl_label = "Body"
bl_idname = "VIEW3D_PT_body"
def draw(self, context):
layout = self.layout
ch = CharacterUIUtils.get_character()
if ch:
props = CharacterUIUtils.get_props_from_character()
hair_row = layout.row(align=True)
CharacterUIUtils.safe_render(hair_row, props, "hair_enum")
if hasattr(props, "hair_lock") and hasattr(props, "hair_enum"):
CharacterUIUtils.safe_render(hair_row, props, "hair_lock", icon="LOCKED" if props.hair_lock else "UNLOCKED", toggle=True )
if attributes_key in ch:
if "body" in ch[attributes_key]:
attributes_box = layout.box()
attributes_box.label(text="Attributes")
CharacterUIUtils.render_attributes(attributes_box, ch[attributes_key]["body"], "body")
class VIEW3D_PT_physics_body_panel(VIEW3D_PT_characterUI):
"Physics Sub-Panel"
bl_label = "Physics"
bl_idname = "VIEW3D_PT_physics_body_panel"
bl_parent_id = "VIEW3D_PT_body"
@classmethod
def poll(self, context):
ch = CharacterUIUtils.get_character()
if ch:
if "character_ui_cages" in ch.data:
if "cages" in ch.data["character_ui_cages"]:
out = list(filter(lambda x: "OP2" in x, ch.data["character_ui_cages"]["cages"]))
return len(out) > 0
return False
def draw(self, context):
layout = self.layout
ch = CharacterUIUtils.get_character()
CharacterUIUtils.render_cages(layout, ch.data["character_ui_cages"]["cages"], 2)
class VIEW3D_PT_physics_outfits_panel(VIEW3D_PT_characterUI):
"Physics Sub-Panel"
bl_label = "Physics"
bl_idname = "VIEW3D_PT_physics_outfits_panel"
bl_parent_id = "VIEW3D_PT_outfits"
@classmethod
def poll(self, context):
ch = CharacterUIUtils.get_character()
if ch:
if "character_ui_cages" in ch.data:
if "cages" in ch.data["character_ui_cages"]:
out = list(filter(lambda x: "OP1" in x, ch.data["character_ui_cages"]["cages"]))
return len(out) > 0
return False
def draw(self, context):
layout = self.layout
ch = CharacterUIUtils.get_character()
CharacterUIUtils.render_cages(layout, ch.data["character_ui_cages"]["cages"], 1)
class VIEW3D_PT_physics_misc_panel(VIEW3D_PT_characterUI):
"Physics Sub-Panel"
bl_label = "Physics"
bl_idname = "VIEW3D_PT_physics_misc_panel"
bl_parent_id = "VIEW3D_PT_miscellaneous"
@classmethod
def poll(self, context):
ch = CharacterUIUtils.get_character()
if ch:
if "character_ui_cages" in ch.data:
if "cages" in ch.data["character_ui_cages"]:
out = list(filter(lambda x: "OP3" in x, ch.data["character_ui_cages"]["cages"]))
return len(out) > 0
return False
def draw(self, context):
layout = self.layout
ch = CharacterUIUtils.get_character()
CharacterUIUtils.render_cages(layout, ch.data["character_ui_cages"]["cages"], 3)
class VIEW3D_PT_rig_layers(VIEW3D_PT_characterUI):
bl_label = "Rig"
bl_idname = "VIEW3D_PT_rig_layers"
@classmethod
def poll(self, context):
ch = CharacterUIUtils.get_character()
if ch:
if ch == context.active_object:
if attributes_key in ch:
if "rig" in ch[attributes_key]:
if len(ch[attributes_key]["rig"]):
return True
if rig_layers_key in ch.data:
if type(ch.data[rig_layers_key]) == list:
return len(ch.data[rig_layers_key])
return False
def draw(self, context):
box = self.layout.column().box()
ch = CharacterUIUtils.get_character()
if ch:
if rig_layers_key in ch.data:
#sorting "stolen" from CloudRig https://gitlab.com/blender/CloudRig/-/blob/a16df00d5da51d19f720f3e5fe917a84a85883a0/generation/cloudrig.py
layer_data = ch.data[rig_layers_key]
if type(layer_data) == list:
box.label(text="Layers")
rig_layers = [dict(l) for l in layer_data]
for i, l in enumerate(rig_layers):
# When the Rigify addon is not enabled, finding the original index after sorting is impossible, so just store it.
l['index'] = i
if 'row' not in l:
l['row'] = 1
sorted_layers = sorted(rig_layers, key=lambda l: l['row'])
sorted_layers = [l for l in sorted_layers if 'name' in l and l['name']!=" "]
current_row_index = -1
row = box.row()
for rig_layer in sorted_layers:
if rig_layer['name'] in ["", " "]: continue
if rig_layer['name'].startswith("$"): continue
if rig_layer['row'] > current_row_index:
current_row_index = rig_layer['row']
row = box.row()
row.prop(ch.data, "layers", index=rig_layer['index'], toggle=True, text=rig_layer['name'])
if attributes_key in ch:
if "rig" in ch[attributes_key]:
attributes_box = self.layout.box()
attributes_box.label(text="Attributes")
CharacterUIUtils.render_attributes(attributes_box, ch[attributes_key]["rig"], "rig")
class VIEW3D_PT_miscellaneous(VIEW3D_PT_characterUI):
"Panel for things which don't belong anywhere"
bl_label = "Miscellaneous"
bl_idname = "VIEW3D_PT_miscellaneous"
@classmethod
def poll(self, context):
ch = CharacterUIUtils.get_character()
if ch:
if ch == context.active_object:
if attributes_key in ch:
if "misc" in ch[attributes_key]:
if len(ch[attributes_key]["misc"]):
return True
if "character_ui_cages" in ch.data:
if "cages" in ch.data["character_ui_cages"]:
out = list(filter(lambda x: "OP3" in x, ch.data["character_ui_cages"]["cages"]))
return len(out) > 0
return False
def draw(self, context):
layout = self.layout
ch = CharacterUIUtils.get_character()
if attributes_key in ch:
if "misc" in ch[attributes_key]:
attributes_box = self.layout.box()
attributes_box.label(text="Attributes")
CharacterUIUtils.render_attributes(attributes_box, ch[attributes_key]["misc"], "misc")
class VIEW3D_PT_links(VIEW3D_PT_characterUI):
bl_label = "Links"
bl_idname = "VIEW3D_PT_links"
def draw(self, context):
layout = self.layout
layout.separator()
col = layout.column()
data = CharacterUIUtils.get_character().data
if links_key in data:
for section in data[links_key].to_dict():
box = col.box()
box.label(text=section)
column = box.column(align=True)
for link in data[links_key][section].to_dict():
try:
column.operator("wm.url_open", text=link, icon=data[links_key][section][link][0]).url = data[links_key][section][link][1]
except:
column.operator("wm.url_open", text=link).url = data[links_key][section][link][1]
layout.label(text='Character-UI v%s'%(".".join(str(i) for i in bl_info["version"])), icon='SETTINGS')
layout.operator("wm.url_open", text="UI bugs/suggestions").url = "https://github.com/nextr3d/Character-UI/issues/new/choose"
layout.operator("wm.url_open", text="Download Character-UI add-on").url = "https://github.com/nextr3d/Character-UI"
class OPS_OT_ExpandAttributeGroup(Operator):
"Expands or Contracts attribute group"
bl_idname = "character_ui_script.expand_attribute_group"
bl_label = "Expand/Contract"
bl_description = "Expands or Contracts Attribute Group"
panel_name : StringProperty()
group_name : StringProperty()
def execute(self, context):
ch = CharacterUIUtils.get_character()
if ch:
if attributes_key in ch:
if self.panel_name in ch[attributes_key]:
for i in range(len(ch[attributes_key][self.panel_name])):
g = ch[attributes_key][self.panel_name][i]
if g["name"] == self.group_name:
g["expanded"] = not g["expanded"]
ch[attributes_key][self.panel_name][i] = g
return {'FINISHED'}
class OPS_PT_BakePhysics(bpy.types.Operator):
bl_idname = "character_ui.bake"
bl_description = "Bake Physics"
bl_label = "Bake"
object_name: bpy.props.StringProperty()
def execute(self, context ):
for m in bpy.data.objects[self.object_name].modifiers:
if m.type == "CLOTH" and not m.point_cache.is_baked:
if not m.show_viewport:
self.report({'WARNING'}, "Modifier is not visible in the viewport, baking will have no effect!")
else:
override = {'scene': context.scene, 'active_object': bpy.data.objects[self.object_name], 'point_cache': m.point_cache}
bpy.ops.ptcache.bake(override, bake=True)
self.report({'INFO'}, "Done baking physics for: "+self.object_name)
elif m.type == "CLOTH" and m.point_cache.is_baked:
override = {'scene': context.scene, 'active_object': bpy.data.objects[self.object_name], 'point_cache': m.point_cache}
bpy.ops.ptcache.free_bake(override)
self.report({'INFO'}, "Removed physics cache for: "+self.object_name)
return {'FINISHED'}
classes = [
CharacterUI
]
panels = [
VIEW3D_PT_outfits,
VIEW3D_PT_rig_layers,
VIEW3D_PT_body,
VIEW3D_PT_physics_body_panel,
VIEW3D_PT_physics_outfits_panel,
VIEW3D_PT_miscellaneous,
VIEW3D_PT_physics_misc_panel,
VIEW3D_PT_links
]
operators = [
OPS_OT_ExpandAttributeGroup,
OPS_PT_BakePhysics
]
def register():
for c in classes:
register_class(c)
bpy.app.handlers.load_post.append(CharacterUIUtils.create_unique_ids(panels, operators))
setattr(bpy.types.Object, "%s%s"%(custom_prefix, character_id), bpy.props.PointerProperty(type=CharacterUI))
CharacterUI.initialize()
def unregister():
for c in reversed(classes):
unregister_class(c)
delattr(bpy.types.Object, "%s%s"%(custom_prefix, character_id))
if __name__ in ['__main__', 'builtins']:
# __main__ when executed through the editor
#builtins when executed after generation of the script
register()
| 44.897015
| 250
| 0.57415
|
713a31111f124deeeed3850126d1247ae8b239c7
| 3,496
|
py
|
Python
|
simscale_sdk/models/unit_temperature.py
|
slainesimscale/simscale-python-sdk
|
db483eeabe558e55d020f5f829a3bf13c9c287a7
|
[
"MIT"
] | 8
|
2021-01-22T13:41:03.000Z
|
2022-01-03T09:00:10.000Z
|
simscale_sdk/models/unit_temperature.py
|
slainesimscale/simscale-python-sdk
|
db483eeabe558e55d020f5f829a3bf13c9c287a7
|
[
"MIT"
] | null | null | null |
simscale_sdk/models/unit_temperature.py
|
slainesimscale/simscale-python-sdk
|
db483eeabe558e55d020f5f829a3bf13c9c287a7
|
[
"MIT"
] | 3
|
2021-03-18T15:52:52.000Z
|
2022-01-03T08:59:30.000Z
|
# coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class UnitTemperature(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'unit': 'str'
}
attribute_map = {
'unit': 'unit'
}
def __init__(self, unit=None, local_vars_configuration=None): # noqa: E501
"""UnitTemperature - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._unit = None
self.discriminator = None
if unit is not None:
self.unit = unit
@property
def unit(self):
"""Gets the unit of this UnitTemperature. # noqa: E501
:return: The unit of this UnitTemperature. # noqa: E501
:rtype: str
"""
return self._unit
@unit.setter
def unit(self, unit):
"""Sets the unit of this UnitTemperature.
:param unit: The unit of this UnitTemperature. # noqa: E501
:type: str
"""
allowed_values = ["°C", "°F", "K"] # noqa: E501
if self.local_vars_configuration.client_side_validation and unit not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `unit` ({0}), must be one of {1}" # noqa: E501
.format(unit, allowed_values)
)
self._unit = unit
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UnitTemperature):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, UnitTemperature):
return True
return self.to_dict() != other.to_dict()
| 27.968
| 109
| 0.561499
|
298f1c895dae0aad86500ee35a1875fa06e8313b
| 345
|
py
|
Python
|
scripts/figures/figure7/pipeswitch_inception_v3/remote_run_data.py
|
netx-repo/PipeSwitch
|
f321d399e501b79ad51da13074e2aecda36cb06a
|
[
"Apache-2.0"
] | 81
|
2020-11-05T16:15:58.000Z
|
2022-03-09T07:38:51.000Z
|
scripts/figures/figure7/pipeswitch_inception_v3/remote_run_data.py
|
baizh1994/PipeSwitch
|
cb7b03f0777cc59038a449e55ce1492f7ec973c6
|
[
"Apache-2.0"
] | 3
|
2020-12-21T13:00:09.000Z
|
2021-06-13T15:08:46.000Z
|
scripts/figures/figure7/pipeswitch_inception_v3/remote_run_data.py
|
baizh1994/PipeSwitch
|
cb7b03f0777cc59038a449e55ce1492f7ec973c6
|
[
"Apache-2.0"
] | 22
|
2020-11-06T07:51:35.000Z
|
2022-03-09T07:38:53.000Z
|
import os
import sys
from scripts.common.util import RunDocker
def main():
with RunDocker('pipeswitch:pipeswitch', 'figure7_pipeswitch_inception_v3') as rd:
# Start the server: pipeswitch
rd.run('python PipeSwitch/scripts/run_data.py')
# Get and return the data point
if __name__ == '__main__':
main()
| 24.642857
| 85
| 0.684058
|
112237a2e904de12d6ef5fe4b1f77d7fec1d6b99
| 65
|
py
|
Python
|
py/cap2/gauss.py
|
dacanizares/IntroCS-ES
|
1324b59a3bed86559117b01ad85384d593394d4a
|
[
"MIT"
] | 2
|
2020-03-21T19:12:10.000Z
|
2020-03-27T03:59:41.000Z
|
py/cap2/gauss.py
|
dacanizares/IntroCS-ES
|
1324b59a3bed86559117b01ad85384d593394d4a
|
[
"MIT"
] | 13
|
2020-03-20T01:27:57.000Z
|
2020-08-08T18:20:29.000Z
|
py/cap2/gauss.py
|
dacanizares/IntroCS-ES
|
1324b59a3bed86559117b01ad85384d593394d4a
|
[
"MIT"
] | null | null | null |
suma = 0
for i in range(1, 101):
suma = suma + i
print suma
| 10.833333
| 23
| 0.584615
|
eae8cd4712150890bc06abfe188adca320795641
| 154
|
py
|
Python
|
data/typing/pandas.testing.py
|
vfdev-5/python-record-api
|
006faf0bba9cd4cb55fbacc13d2bbda365f5bf0b
|
[
"MIT"
] | null | null | null |
data/typing/pandas.testing.py
|
vfdev-5/python-record-api
|
006faf0bba9cd4cb55fbacc13d2bbda365f5bf0b
|
[
"MIT"
] | null | null | null |
data/typing/pandas.testing.py
|
vfdev-5/python-record-api
|
006faf0bba9cd4cb55fbacc13d2bbda365f5bf0b
|
[
"MIT"
] | null | null | null |
from typing import *
# usage.dask: 1
assert_frame_equal: object
# usage.dask: 1
assert_index_equal: object
# usage.dask: 1
assert_series_equal: object
| 14
| 27
| 0.772727
|
69ccb658c0171f9b4d41a0983037df70b8ff2846
| 3,611
|
py
|
Python
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExternalDefinitionCreationOptions.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 182
|
2017-06-27T02:26:15.000Z
|
2022-03-30T18:53:43.000Z
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExternalDefinitionCreationOptions.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 28
|
2017-06-27T13:38:23.000Z
|
2022-03-15T11:19:44.000Z
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExternalDefinitionCreationOptions.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 67
|
2017-06-28T09:43:59.000Z
|
2022-03-20T21:17:10.000Z
|
class ExternalDefinitionCreationOptions(object,IDisposable):
"""
An option class used for creating a new shared parameter definition,including options such as name,type,visibility,
Guid description and modifiable flag.
ExternalDefinitionCreationOptions(name: str,type: ParameterType)
"""
def Dispose(self):
""" Dispose(self: ExternalDefinitionCreationOptions) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: ExternalDefinitionCreationOptions,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,name,type):
""" __new__(cls: type,name: str,type: ParameterType) """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
Description=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The description of the parameter definition to be created. The description will be used as tooltip in the
Revit UI including in the properties palette.
The default is an empty string.
Get: Description(self: ExternalDefinitionCreationOptions) -> str
Set: Description(self: ExternalDefinitionCreationOptions)=value
"""
GUID=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The GUID to use for this parameter definition.
If not explicitly set,a random GUID is used.
Get: GUID(self: ExternalDefinitionCreationOptions) -> Guid
Set: GUID(self: ExternalDefinitionCreationOptions)=value
"""
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: ExternalDefinitionCreationOptions) -> bool
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The name of the parameter definition to be created.
Get: Name(self: ExternalDefinitionCreationOptions) -> str
Set: Name(self: ExternalDefinitionCreationOptions)=value
"""
Type=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The type of the parameter definition to be created.
Get: Type(self: ExternalDefinitionCreationOptions) -> ParameterType
Set: Type(self: ExternalDefinitionCreationOptions)=value
"""
UserModifiable=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""This property indicates whether this parameter can be modified by UI user or not.
True if the parameter will be modifiable by the user in the user interface,false if the parameter will display as read-only.
The default is true.
Get: UserModifiable(self: ExternalDefinitionCreationOptions) -> bool
Set: UserModifiable(self: ExternalDefinitionCreationOptions)=value
"""
Visible=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""True if the parameter is visible to the user,false if it is hidden and accessible only via the API.
The default is true.
Get: Visible(self: ExternalDefinitionCreationOptions) -> bool
Set: Visible(self: ExternalDefinitionCreationOptions)=value
"""
| 26.748148
| 215
| 0.725007
|
1fbafb37f5e74f270776fd341f08baaa937f98d0
| 281
|
py
|
Python
|
crawler.py
|
oursonvie/xcar
|
2bc52f2935e62823c589e9a9fe708f1dcd2cdb69
|
[
"Apache-2.0"
] | null | null | null |
crawler.py
|
oursonvie/xcar
|
2bc52f2935e62823c589e9a9fe708f1dcd2cdb69
|
[
"Apache-2.0"
] | null | null | null |
crawler.py
|
oursonvie/xcar
|
2bc52f2935e62823c589e9a9fe708f1dcd2cdb69
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# crawler module
from BeautifulSoup import BeautifulSoup
import urllib2
import codecs
def readlink(link):
response = urllib2.urlopen(link)
html = response.read()
pagecontent = BeautifulSoup(html, fromEncoding='gb18030')
return pagecontent
| 21.615385
| 61
| 0.729537
|
c8450935823b4a639bc84c771ac82b2a52444023
| 8,646
|
py
|
Python
|
devnet_gui_functions_ebo.py
|
ebarredo84/DEVNET_PREP_SW
|
de3548e49e4f625dc571c9e675066f9e1646a50e
|
[
"BSD-3-Clause"
] | null | null | null |
devnet_gui_functions_ebo.py
|
ebarredo84/DEVNET_PREP_SW
|
de3548e49e4f625dc571c9e675066f9e1646a50e
|
[
"BSD-3-Clause"
] | null | null | null |
devnet_gui_functions_ebo.py
|
ebarredo84/DEVNET_PREP_SW
|
de3548e49e4f625dc571c9e675066f9e1646a50e
|
[
"BSD-3-Clause"
] | null | null | null |
#------USER INTERFACE (VIEW)----------
import tkinter as tk
#------CONTROLLER--------------------
import json
import base64
#----------SECUNDARY WINDOW----------
class SECUNDARY_WINDOW:
def __init__(self,window2,msg):
#VENTANA
window2.geometry("600x100")
window2.title("ASK PARAMETER")
#CUADRO DE TEXTO
text_box2=tk.Text(window2,width=75,height=25) #creo el objeto para poner texto
text_box2.place(x=0,y=0)
text_box2.insert("1.0",str(msg))
text_box2.config(state="disabled") #solo lectura
#CUADRO DE ENTRADA
self.entry2=tk.Entry(window2,fg="blue",bg="white",
width=40) #creo el objeto de escritura
self.entry2.place(x=5,y=35)
#BOTON FW
button_fw2=tk.Button(window2,text="Send",
fg="black",bg="grey",
width=5,height=1,
command=window2.quit) #creo el objeto del botón
button_fw2.place(x=5,y=60)
#BOTON EXIT
#button_exit2=tk.Button(window2,text="Exit",
# fg="black",bg="grey",
# width=5,height=1,
# command=window2.destroy) #creo el objeto del botón
#button_exit2.place(x=55,y=60)
def SEND(self,window2):
param=self.entry2.get()
return param
def ASK_PARAMETER(msg):
app2=tk.Tk()
window2=SECUNDARY_WINDOW(app2,msg)
app2.mainloop()
parameter=window2.SEND(window2)
app2.destroy()
return parameter
#--------ERASE AND PRINT FUNCTIONS-------------
def PRINT_STATUS_CODE(response,text_box):
text_box.config(state="normal") #habilito la escritura
text_box.delete("1.0", tk.END) #para borrar el texto
text_box.insert("1.0","Request status: "+str(response.status_code)+"\n")
text_box.config(state="disabled") #solo lectura
def PRINT_HEADERS(headers,text_box):
text_box.config(state="normal") #habilito la escritura
text_box.delete("1.0", tk.END) #para borrar el texto
text_box.insert("1.0","Headers: "+str(headers)+"\n")
text_box.config(state="disabled") #solo lectura
#--------PRINT FUNCTIONS----------------------
def PRINT_RESPONSE_JSON(resp,text_box):
response_json = resp.json()
json_formatted_str = json.dumps(response_json, indent=4)
#IMPRIMO EN EL TEXT BOX
text_box.config(state="normal") #habilito la escritura
text_box.insert(tk.END,json_formatted_str+"\n")
text_box.config(state="disabled") #solo lectura
def PRINT_RESPONSE(resp,text_box):
json_formatted_str = json.dumps(resp, indent=4)
#IMPRIMO EN EL TEXT BOX
text_box.config(state="normal") #habilito la escritura
text_box.insert(tk.END,json_formatted_str+"\n")
text_box.config(state="disabled") #solo lectura
def PRINT_CONTENT_JSON(resp,text_box):
json_formatted_str=json.dumps(json.loads(resp.content),indent=4)
#IMPRIMO EN EL TEXT BOX
text_box.config(state="normal") #habilito la escritura
text_box.insert(tk.END,json_formatted_str+"\n")
text_box.config(state="disabled") #solo lectura
def PRINT_TABLE_IN_TEXT(text_box,dictionary,**kwargs):
num_arg=len(kwargs)
text_box.config(state="normal")
#----------IMPRIME 2 KEY PAIRS VALUES
if num_arg==6:
print_header='{0:'+kwargs['size1']+'s}{1:1}{2:'+kwargs['size2']+'s}'
text_box.insert(tk.END,print_header.format(kwargs['name1'], "|",
kwargs['name2'])+"\n")
text_box.insert(tk.END,'-'*140+"\n")
d1=kwargs['data1']
d2=kwargs['data2']
for ITEM in dictionary:
text_box.insert(tk.END,print_header.format(str(ITEM[d1]),"|",
str(ITEM[d2]))+"\n")
#----------IMPRIME 3 KEY PAIRS VALUES---------------
elif num_arg==9:
print_header='{0:'+kwargs['size1']+'s}{1:1}{2:'+kwargs['size2']+'s}{3:1}{4:'+kwargs['size3']+'s}'
text_box.insert(tk.END,print_header.format(kwargs['name1'], "|",
kwargs['name2'], "|",
kwargs['name3'])+"\n")
text_box.insert(tk.END,'-'*140+"\n")
d1=kwargs['data1']
d2=kwargs['data2']
d3=kwargs['data3']
for ITEM in dictionary:
text_box.insert(tk.END,print_header.format(str(ITEM[d1]),"|",
str(ITEM[d2]), "|",
str(ITEM[d3]))+"\n")
#----------IMPRIME 4 KEY PAIRS VALUES---------------
elif num_arg==12:
print_header='{0:'+kwargs['size1']+'s}{1:1}{2:'+kwargs['size2']+'s}{3:1}{4:'+kwargs['size3']+'s}{5:1}{6:'+kwargs['size4']+'s}'
text_box.insert(tk.END,print_header.format(kwargs['name1'], "|",
kwargs['name2'], "|",
kwargs['name3'], "|",
kwargs['name4'])+"\n")
text_box.insert(tk.END,'-'*140+"\n")
d1=kwargs['data1']
d2=kwargs['data2']
d3=kwargs['data3']
d4=kwargs['data4']
for ITEM in dictionary:
text_box.insert(tk.END,print_header.format(str(ITEM[d1]),"|",
str(ITEM[d2]), "|",
str(ITEM[d3]), "|",
str(ITEM[d4]))+"\n")
#----------IMPRIME 5 KEY PAIRS VALUES---------------
elif num_arg==15:
print_header='{0:'+kwargs['size1']+'s}{1:1}{2:'+kwargs['size2']+'s}{3:1}{4:'+kwargs['size3']+'s}{5:1}{6:'+kwargs['size4']+'s}{7:1}{8:'+kwargs['size5']+'s}'
text_box.insert(tk.END,print_header.format(kwargs['name1'], "|",
kwargs['name2'], "|",
kwargs['name3'], "|",
kwargs['name4'], "|",
kwargs['name5'])+"\n")
text_box.insert(tk.END,'-'*140+"\n")
d1=kwargs['data1']
d2=kwargs['data2']
d3=kwargs['data3']
d4=kwargs['data4']
d5=kwargs['data5']
for ITEM in dictionary:
text_box.insert(tk.END,print_header.format(str(ITEM[d1]),"|",
str(ITEM[d2]), "|",
str(ITEM[d3]), "|",
str(ITEM[d4]), "|",
str(ITEM[d5]))+"\n")
#----------IMPRIME 6 KEY PAIRS VALUES---------------
elif num_arg==18:
print_header='{0:'+kwargs['size1']+'s}{1:1}{2:'+kwargs['size2']+'s}{3:1}{4:'+kwargs['size3']+'s}{5:1}{6:'+kwargs['size4']+'s}{7:1}{8:'+kwargs['size5']+'s}{9:1}{10:'+kwargs['size6']+'s}'
text_box.insert(tk.END,print_header.format(kwargs['name1'], "|",
kwargs['name2'], "|",
kwargs['name3'], "|",
kwargs['name4'], "|",
kwargs['name5'], "|",
kwargs['name6'])+"\n")
text_box.insert(tk.END,'-'*140+"\n")
d1=kwargs['data1']
d2=kwargs['data2']
d3=kwargs['data3']
d4=kwargs['data4']
d5=kwargs['data5']
d6=kwargs['data6']
for ITEM in dictionary:
text_box.insert(tk.END,print_header.format(str(ITEM[d1]),"|",
str(ITEM[d2]), "|",
str(ITEM[d3]), "|",
str(ITEM[d4]), "|",
str(ITEM[d5]), "|",
str(ITEM[d6]))+"\n")
text_box.config(state="disabled")
| 49.689655
| 194
| 0.445293
|
11d9031d1d62f5021dd640ce92b5b643fb94dad9
| 30,993
|
py
|
Python
|
tencentcloud/npp/v20190823/models.py
|
xuzixx/tencentcloud-sdk-python
|
98866ab9fd104cd6475b62fe78ff3fffd96d5ce0
|
[
"Apache-2.0"
] | null | null | null |
tencentcloud/npp/v20190823/models.py
|
xuzixx/tencentcloud-sdk-python
|
98866ab9fd104cd6475b62fe78ff3fffd96d5ce0
|
[
"Apache-2.0"
] | null | null | null |
tencentcloud/npp/v20190823/models.py
|
xuzixx/tencentcloud-sdk-python
|
98866ab9fd104cd6475b62fe78ff3fffd96d5ce0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class CallBackCdr(AbstractModel):
"""话单详情
"""
def __init__(self):
"""
:param CallId: 呼叫通话 ID
:type CallId: str
:param Src: 主叫号码
:type Src: str
:param Dst: 被叫号码
:type Dst: str
:param StartSrcCallTime: 主叫呼叫开始时间
:type StartSrcCallTime: str
:param StartSrcRingTime: 主叫响铃开始时间
:type StartSrcRingTime: str
:param SrcAcceptTime: 主叫接听时间
:type SrcAcceptTime: str
:param StartDstCallTime: 被叫呼叫开始时间
:type StartDstCallTime: str
:param StartDstRingTime: 被叫响铃开始时间
:type StartDstRingTime: str
:param DstAcceptTime: 被叫接听时间
:type DstAcceptTime: str
:param EndCallTime: 用户挂机通话结束时间
:type EndCallTime: str
:param CallEndStatus: 通话最后状态:0:未知状态 1:正常通话 2:主叫未接 3:主叫接听,被叫未接 4:主叫未接通 5:被叫未接通
:type CallEndStatus: str
:param Duration: 通话计费时间
:type Duration: str
:param RecordUrl: 录音 URL,如果不录音或录音失败,该值为空
:type RecordUrl: str
:param CallType: 通话类型(1: VOIP 2:IP TO PSTN 3: PSTN TO PSTN),如果话单中没有该字段,默认值为回拨 3 (PSTN TO PSTN)
注意:此字段可能返回 null,表示取不到有效值。
:type CallType: str
:param BizId: 同回拨请求中的 bizId,如果回拨请求中带 bizId 会有该字段返回
注意:此字段可能返回 null,表示取不到有效值。
:type BizId: str
:param OrderId: 订单 ID,最大长度不超过 64 个字节,对于一些有订单状态 App 相关应用(如达人帮接入 App 应用),该字段只在帐单中带上,其它回调不附带该字段
注意:此字段可能返回 null,表示取不到有效值。
:type OrderId: str
"""
self.CallId = None
self.Src = None
self.Dst = None
self.StartSrcCallTime = None
self.StartSrcRingTime = None
self.SrcAcceptTime = None
self.StartDstCallTime = None
self.StartDstRingTime = None
self.DstAcceptTime = None
self.EndCallTime = None
self.CallEndStatus = None
self.Duration = None
self.RecordUrl = None
self.CallType = None
self.BizId = None
self.OrderId = None
def _deserialize(self, params):
self.CallId = params.get("CallId")
self.Src = params.get("Src")
self.Dst = params.get("Dst")
self.StartSrcCallTime = params.get("StartSrcCallTime")
self.StartSrcRingTime = params.get("StartSrcRingTime")
self.SrcAcceptTime = params.get("SrcAcceptTime")
self.StartDstCallTime = params.get("StartDstCallTime")
self.StartDstRingTime = params.get("StartDstRingTime")
self.DstAcceptTime = params.get("DstAcceptTime")
self.EndCallTime = params.get("EndCallTime")
self.CallEndStatus = params.get("CallEndStatus")
self.Duration = params.get("Duration")
self.RecordUrl = params.get("RecordUrl")
self.CallType = params.get("CallType")
self.BizId = params.get("BizId")
self.OrderId = params.get("OrderId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CallBackPhoneCode(AbstractModel):
"""回拨号码字段
"""
def __init__(self):
"""
:param Nation: 国家码,统一以 00 开头
:type Nation: str
:param Phone: 号码(固话区号前加 0,如075586013388)
:type Phone: str
"""
self.Nation = None
self.Phone = None
def _deserialize(self, params):
self.Nation = params.get("Nation")
self.Phone = params.get("Phone")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateCallBackRequest(AbstractModel):
"""CreateCallBack请求参数结构体
"""
def __init__(self):
"""
:param BizAppId: 业务appid
:type BizAppId: str
:param Src: 主叫号码(必须为 11 位手机号,号码前加 0086,如 008613631686024)
:type Src: str
:param Dst: 被叫号码(必须为 11 位手机或固话号码,号码前加 0086,如008613631686024,固话如:0086075586013388)
:type Dst: str
:param SrcDisplayNum: 主叫显示系统分配的固话号码,如不填显示随机分配号码
:type SrcDisplayNum: str
:param DstDisplayNum: 被叫显示系统分配的固话号码,如不填显示随机分配号码
:type DstDisplayNum: str
:param Record: 是否录音,0 表示不录音,1 表示录音。默认为不录音
:type Record: str
:param MaxAllowTime: 允许最大通话时间,不填默认为 30 分钟(单位:分钟)
:type MaxAllowTime: str
:param StatusFlag: 主叫发起呼叫状态:1 被叫发起呼叫状态:256 主叫响铃状态:2 被叫响铃状态:512 主叫接听状态:4 被叫接听状态:1024 主叫拒绝接听状态:8 被叫拒绝接听状态:2048 主叫正常挂机状态:16 被叫正常挂机状态:4096 主叫呼叫异常:32 被叫呼叫异常:8192
例如:当值为 0:表示所有状态不需要推送;当值为 4:表示只要推送主叫接听状态;当值为 16191:表示所有状态都需要推送(上面所有值和)
:type StatusFlag: str
:param StatusUrl: 状态回调通知地址,正式环境可以配置默认推送地址
:type StatusUrl: str
:param HangupUrl: 话单回调通知地址,正式环境可以配置默认推送地址
:type HangupUrl: str
:param RecordUrl: 录单 URL 回调通知地址,正式环境可以配置默认推送地址
:type RecordUrl: str
:param BizId: 业务应用 key,业务用该 key 可以区分内部业务或客户产品等,该 key 需保证在该 appId 下全局唯一,最大长度不超过 64 个字节,bizId 只能包含数字、字母。
:type BizId: str
:param LastCallId: 最后一次呼叫 callId,带上该字段以后,平台会参考该 callId 分配线路,优先不分配该 callId 通话线路(注:谨慎使用,这个会影响线路调度)
:type LastCallId: str
:param PreCallerHandle: 结构体,主叫呼叫预处理操作,根据不同操作确认是否呼通被叫。如需使用,本结构体需要与 keyList 结构体配合使用,此时这两个参数都为必填项
:type PreCallerHandle: :class:`tencentcloud.npp.v20190823.models.RreCallerHandle`
:param OrderId: 订单 ID,最大长度不超过64个字节,对于一些有订单状态 App 相关应用使用(如达人帮接入 App 应用),该字段只在帐单中带上,其它回调不附带该字段
:type OrderId: str
"""
self.BizAppId = None
self.Src = None
self.Dst = None
self.SrcDisplayNum = None
self.DstDisplayNum = None
self.Record = None
self.MaxAllowTime = None
self.StatusFlag = None
self.StatusUrl = None
self.HangupUrl = None
self.RecordUrl = None
self.BizId = None
self.LastCallId = None
self.PreCallerHandle = None
self.OrderId = None
def _deserialize(self, params):
self.BizAppId = params.get("BizAppId")
self.Src = params.get("Src")
self.Dst = params.get("Dst")
self.SrcDisplayNum = params.get("SrcDisplayNum")
self.DstDisplayNum = params.get("DstDisplayNum")
self.Record = params.get("Record")
self.MaxAllowTime = params.get("MaxAllowTime")
self.StatusFlag = params.get("StatusFlag")
self.StatusUrl = params.get("StatusUrl")
self.HangupUrl = params.get("HangupUrl")
self.RecordUrl = params.get("RecordUrl")
self.BizId = params.get("BizId")
self.LastCallId = params.get("LastCallId")
if params.get("PreCallerHandle") is not None:
self.PreCallerHandle = RreCallerHandle()
self.PreCallerHandle._deserialize(params.get("PreCallerHandle"))
self.OrderId = params.get("OrderId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateCallBackResponse(AbstractModel):
"""CreateCallBack返回参数结构体
"""
def __init__(self):
"""
:param CallId: 话单id
注意:此字段可能返回 null,表示取不到有效值。
:type CallId: str
:param SrcDisplayNum: 主叫显示系统分配的固话号码
注意:此字段可能返回 null,表示取不到有效值。
:type SrcDisplayNum: str
:param DstDisplayNum: 被叫显示系统分配的固话号码
注意:此字段可能返回 null,表示取不到有效值。
:type DstDisplayNum: str
:param ErrorCode: 错误码
:type ErrorCode: str
:param Msg: 错误原因
注意:此字段可能返回 null,表示取不到有效值。
:type Msg: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CallId = None
self.SrcDisplayNum = None
self.DstDisplayNum = None
self.ErrorCode = None
self.Msg = None
self.RequestId = None
def _deserialize(self, params):
self.CallId = params.get("CallId")
self.SrcDisplayNum = params.get("SrcDisplayNum")
self.DstDisplayNum = params.get("DstDisplayNum")
self.ErrorCode = params.get("ErrorCode")
self.Msg = params.get("Msg")
self.RequestId = params.get("RequestId")
class DelVirtualNumRequest(AbstractModel):
"""DelVirtualNum请求参数结构体
"""
def __init__(self):
"""
:param BizAppId: 业务appid
:type BizAppId: str
:param BindId: 双方号码 + 中间号绑定 ID,该 ID 全局唯一
:type BindId: str
:param BizId: 应用二级业务 ID,bizId 需保证在该 appId 下全局唯一,最大长度不超过 16 个字节。
:type BizId: str
"""
self.BizAppId = None
self.BindId = None
self.BizId = None
def _deserialize(self, params):
self.BizAppId = params.get("BizAppId")
self.BindId = params.get("BindId")
self.BizId = params.get("BizId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DelVirtualNumResponse(AbstractModel):
"""DelVirtualNum返回参数结构体
"""
def __init__(self):
"""
:param ErrorCode: 错误码
:type ErrorCode: str
:param Msg: 错误信息
注意:此字段可能返回 null,表示取不到有效值。
:type Msg: str
:param BindId: 绑定 ID,该 ID 全局唯一
注意:此字段可能返回 null,表示取不到有效值。
:type BindId: str
:param RefLeftNum: 中间号还剩引用计数,如果计数为 0 会解绑
注意:此字段可能返回 null,表示取不到有效值。
:type RefLeftNum: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ErrorCode = None
self.Msg = None
self.BindId = None
self.RefLeftNum = None
self.RequestId = None
def _deserialize(self, params):
self.ErrorCode = params.get("ErrorCode")
self.Msg = params.get("Msg")
self.BindId = params.get("BindId")
self.RefLeftNum = params.get("RefLeftNum")
self.RequestId = params.get("RequestId")
class DeleteCallBackRequest(AbstractModel):
"""DeleteCallBack请求参数结构体
"""
def __init__(self):
"""
:param BizAppId: 业务appid
:type BizAppId: str
:param CallId: 回拨请求响应中返回的 callId
:type CallId: str
:param CancelFlag: 0:不管通话状态直接拆线(默认) 1:主叫响铃以后状态不拆线 2:主叫接听以后状态不拆线 3:被叫响铃以后状态不拆线 4:被叫接听以后状态不拆线
:type CancelFlag: str
"""
self.BizAppId = None
self.CallId = None
self.CancelFlag = None
def _deserialize(self, params):
self.BizAppId = params.get("BizAppId")
self.CallId = params.get("CallId")
self.CancelFlag = params.get("CancelFlag")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteCallBackResponse(AbstractModel):
"""DeleteCallBack返回参数结构体
"""
def __init__(self):
"""
:param ErrorCode: 错误码
:type ErrorCode: str
:param Msg: 错误原因
注意:此字段可能返回 null,表示取不到有效值。
:type Msg: str
:param CallId: 话单id
注意:此字段可能返回 null,表示取不到有效值。
:type CallId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ErrorCode = None
self.Msg = None
self.CallId = None
self.RequestId = None
def _deserialize(self, params):
self.ErrorCode = params.get("ErrorCode")
self.Msg = params.get("Msg")
self.CallId = params.get("CallId")
self.RequestId = params.get("RequestId")
class DescribeCallBackCdrRequest(AbstractModel):
"""DescribeCallBackCdr请求参数结构体
"""
def __init__(self):
"""
:param BizAppId: 业务appid
:type BizAppId: str
:param CallId: 回拨请求响应中返回的 callId,按 callId 查询该话单详细信息
:type CallId: str
:param Src: 查询主叫用户产生的呼叫话单,如填空表示拉取这个时间段所有话单
:type Src: str
:param StartTimeStamp: 话单开始时间戳
:type StartTimeStamp: str
:param EndTimeStamp: 话单结束时间戳
:type EndTimeStamp: str
"""
self.BizAppId = None
self.CallId = None
self.Src = None
self.StartTimeStamp = None
self.EndTimeStamp = None
def _deserialize(self, params):
self.BizAppId = params.get("BizAppId")
self.CallId = params.get("CallId")
self.Src = params.get("Src")
self.StartTimeStamp = params.get("StartTimeStamp")
self.EndTimeStamp = params.get("EndTimeStamp")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeCallBackCdrResponse(AbstractModel):
"""DescribeCallBackCdr返回参数结构体
"""
def __init__(self):
"""
:param Cdr: 话单列表
注意:此字段可能返回 null,表示取不到有效值。
:type Cdr: list of CallBackCdr
:param Offset: 偏移
注意:此字段可能返回 null,表示取不到有效值。
:type Offset: str
:param ErrorCode: 错误码
注意:此字段可能返回 null,表示取不到有效值。
:type ErrorCode: str
:param Msg: 错误原因
注意:此字段可能返回 null,表示取不到有效值。
:type Msg: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Cdr = None
self.Offset = None
self.ErrorCode = None
self.Msg = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Cdr") is not None:
self.Cdr = []
for item in params.get("Cdr"):
obj = CallBackCdr()
obj._deserialize(item)
self.Cdr.append(obj)
self.Offset = params.get("Offset")
self.ErrorCode = params.get("ErrorCode")
self.Msg = params.get("Msg")
self.RequestId = params.get("RequestId")
class DescribeCallBackStatusRequest(AbstractModel):
"""DescribeCallBackStatus请求参数结构体
"""
def __init__(self):
"""
:param BizAppId: 业务appid
:type BizAppId: str
:param CallId: 回拨请求响应中返回的 callId
:type CallId: str
:param Src: 主叫号码
:type Src: str
:param Dst: 被叫号码
:type Dst: str
:param CallStatus: 通话最后状态:0:未知状态 1:主叫响铃中 2:主叫接听 3:被叫响铃中 4:正常通话中 5:通话结束
:type CallStatus: str
"""
self.BizAppId = None
self.CallId = None
self.Src = None
self.Dst = None
self.CallStatus = None
def _deserialize(self, params):
self.BizAppId = params.get("BizAppId")
self.CallId = params.get("CallId")
self.Src = params.get("Src")
self.Dst = params.get("Dst")
self.CallStatus = params.get("CallStatus")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeCallBackStatusResponse(AbstractModel):
"""DescribeCallBackStatus返回参数结构体
"""
def __init__(self):
"""
:param ErrorCode: 错误码
:type ErrorCode: str
:param Msg: 错误信息
:type Msg: str
:param AppId: 业务appid
:type AppId: str
:param CallId: 回拨请求响应中返回的 callId
:type CallId: str
:param Src: 主叫号码
:type Src: str
:param Dst: 被叫号码
:type Dst: str
:param CallStatus: 通话最后状态:0:未知状态 1:主叫响铃中 2:主叫接听 3:被叫响铃中 4:正常通话中 5:通话结束
:type CallStatus: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ErrorCode = None
self.Msg = None
self.AppId = None
self.CallId = None
self.Src = None
self.Dst = None
self.CallStatus = None
self.RequestId = None
def _deserialize(self, params):
self.ErrorCode = params.get("ErrorCode")
self.Msg = params.get("Msg")
self.AppId = params.get("AppId")
self.CallId = params.get("CallId")
self.Src = params.get("Src")
self.Dst = params.get("Dst")
self.CallStatus = params.get("CallStatus")
self.RequestId = params.get("RequestId")
class DescribeCallerDisplayListRequest(AbstractModel):
"""DescribeCallerDisplayList请求参数结构体
"""
def __init__(self):
"""
:param BizAppId: 业务appid
:type BizAppId: str
"""
self.BizAppId = None
def _deserialize(self, params):
self.BizAppId = params.get("BizAppId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeCallerDisplayListResponse(AbstractModel):
"""DescribeCallerDisplayList返回参数结构体
"""
def __init__(self):
"""
:param AppId: appid
注意:此字段可能返回 null,表示取不到有效值。
:type AppId: str
:param CodeList: 主叫显号号码集合,codeList[0...*] 结构体数组,如果业务是主被叫互显,该字段为空
注意:此字段可能返回 null,表示取不到有效值。
:type CodeList: list of CallBackPhoneCode
:param ErrorCode: 错误码
:type ErrorCode: str
:param Msg: 错误原因
注意:此字段可能返回 null,表示取不到有效值。
:type Msg: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AppId = None
self.CodeList = None
self.ErrorCode = None
self.Msg = None
self.RequestId = None
def _deserialize(self, params):
self.AppId = params.get("AppId")
if params.get("CodeList") is not None:
self.CodeList = []
for item in params.get("CodeList"):
obj = CallBackPhoneCode()
obj._deserialize(item)
self.CodeList.append(obj)
self.ErrorCode = params.get("ErrorCode")
self.Msg = params.get("Msg")
self.RequestId = params.get("RequestId")
class Get400CdrRequest(AbstractModel):
"""Get400Cdr请求参数结构体
"""
def __init__(self):
"""
:param BizAppId: 业务appid
:type BizAppId: str
:param CallId: 通话唯一标识 callId,即直拨呼叫响应中返回的 callId
:type CallId: str
:param Src: 查询主叫用户产生的呼叫话单(0086开头),设置为空表示拉取该时间段的所有话单
:type Src: str
:param StartTimeStamp: 话单开始时间戳
:type StartTimeStamp: str
:param EndTimeStamp: 话单结束时间戳
:type EndTimeStamp: str
"""
self.BizAppId = None
self.CallId = None
self.Src = None
self.StartTimeStamp = None
self.EndTimeStamp = None
def _deserialize(self, params):
self.BizAppId = params.get("BizAppId")
self.CallId = params.get("CallId")
self.Src = params.get("Src")
self.StartTimeStamp = params.get("StartTimeStamp")
self.EndTimeStamp = params.get("EndTimeStamp")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class Get400CdrResponse(AbstractModel):
"""Get400Cdr返回参数结构体
"""
def __init__(self):
"""
:param ErrorCode: 错误码
:type ErrorCode: str
:param Msg: 错误原因
注意:此字段可能返回 null,表示取不到有效值。
:type Msg: str
:param Offset: 偏移
注意:此字段可能返回 null,表示取不到有效值。
:type Offset: str
:param Cdr: 话单列表
注意:此字段可能返回 null,表示取不到有效值。
:type Cdr: list of VirturalNumCdr
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ErrorCode = None
self.Msg = None
self.Offset = None
self.Cdr = None
self.RequestId = None
def _deserialize(self, params):
self.ErrorCode = params.get("ErrorCode")
self.Msg = params.get("Msg")
self.Offset = params.get("Offset")
if params.get("Cdr") is not None:
self.Cdr = []
for item in params.get("Cdr"):
obj = VirturalNumCdr()
obj._deserialize(item)
self.Cdr.append(obj)
self.RequestId = params.get("RequestId")
class GetVirtualNumRequest(AbstractModel):
"""GetVirtualNum请求参数结构体
"""
def __init__(self):
"""
:param BizAppId: 业务appid
:type BizAppId: str
:param Dst: 被叫号码(号码前加 0086,如 008613631686024)
:type Dst: str
:param Src: 主叫号码(号码前加 0086,如 008613631686024),xb 模式下是不用填写,axb 模式下是必选
:type Src: str
:param AccreditList: {“accreditList”:[“008613631686024”,”008612345678910”]},主要用于 N-1 场景,号码绑定非共享是独占型,指定了 dst 独占中间号绑定,accreditList 表示这个列表成员可以拨打 dst 绑 定的中间号,默认值为空,表示所有号码都可以拨打独占型中间号绑定,最大集合不允许超过 30 个,仅适用于xb模式
:type AccreditList: list of str
:param AssignVirtualNum: 指定中间号(格式:008617013541251),如果该中间号已被使用则返回绑定失败。如果不带该字段则由腾讯侧从号码池里自动分配
:type AssignVirtualNum: str
:param Record: 是否录音,0表示不录音,1表示录音。默认为不录音,注意如果需要录音回调,通话完成后需要等待一段时间,收到录音回调之后,再解绑中间号。
:type Record: str
:param CityId: 主被叫显号号码归属地,指定该参数说明显号归属该城市,如果没有该城市号码会随机选取一个城市或者后台配置返回107,返回码详见 《腾讯-中间号-城市id.xlsx》
:type CityId: str
:param BizId: 应用二级业务 ID,bizId 需保证在该 appId 下全局唯一,最大长度不超过 16 个字节。
:type BizId: str
:param MaxAssignTime: 号码最大绑定时间,不填默认为 24 小时,最长绑定时间是168小时,单位秒
:type MaxAssignTime: str
:param StatusFlag: 主叫发起呼叫状态:1
被叫发起呼叫状态:256
主叫响铃状态:2
被叫响铃状态:512
主叫接听状态:4
被叫接听状态:1024
主叫拒绝接听状态:8
被叫拒绝接听状态:2048
主叫正常挂机状态:16
被叫正常挂机状态:4096
主叫呼叫异常:32
被叫呼叫异常:8192
例如:
值为 0:表示所有状态不需要推送
值为 4:表示只要推送主叫接听状态
值为 16191:表示所有状态都需要推送(上面所有值和)
:type StatusFlag: str
:param StatusUrl: 请填写statusFlag并设置值,状态回调通知地址,正式环境可以配置默认推送地址
:type StatusUrl: str
:param HangupUrl: 话单回调通知地址,正式环境可以配置默认推送地址
:type HangupUrl: str
:param RecordUrl: 录单 URL 回调通知地址,正式环境可以配置默认推送地址
:type RecordUrl: str
"""
self.BizAppId = None
self.Dst = None
self.Src = None
self.AccreditList = None
self.AssignVirtualNum = None
self.Record = None
self.CityId = None
self.BizId = None
self.MaxAssignTime = None
self.StatusFlag = None
self.StatusUrl = None
self.HangupUrl = None
self.RecordUrl = None
def _deserialize(self, params):
self.BizAppId = params.get("BizAppId")
self.Dst = params.get("Dst")
self.Src = params.get("Src")
self.AccreditList = params.get("AccreditList")
self.AssignVirtualNum = params.get("AssignVirtualNum")
self.Record = params.get("Record")
self.CityId = params.get("CityId")
self.BizId = params.get("BizId")
self.MaxAssignTime = params.get("MaxAssignTime")
self.StatusFlag = params.get("StatusFlag")
self.StatusUrl = params.get("StatusUrl")
self.HangupUrl = params.get("HangupUrl")
self.RecordUrl = params.get("RecordUrl")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GetVirtualNumResponse(AbstractModel):
"""GetVirtualNum返回参数结构体
"""
def __init__(self):
"""
:param ErrorCode: 错误码
:type ErrorCode: str
:param BindId: 绑定 ID,该 ID 全局唯一
注意:此字段可能返回 null,表示取不到有效值。
:type BindId: str
:param RefNum: 中间号还剩引用计数,如果计数为 0 会解绑
注意:此字段可能返回 null,表示取不到有效值。
:type RefNum: str
:param VirtualNum: 中间号
注意:此字段可能返回 null,表示取不到有效值。
:type VirtualNum: str
:param Msg: 错误原因
注意:此字段可能返回 null,表示取不到有效值。
:type Msg: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ErrorCode = None
self.BindId = None
self.RefNum = None
self.VirtualNum = None
self.Msg = None
self.RequestId = None
def _deserialize(self, params):
self.ErrorCode = params.get("ErrorCode")
self.BindId = params.get("BindId")
self.RefNum = params.get("RefNum")
self.VirtualNum = params.get("VirtualNum")
self.Msg = params.get("Msg")
self.RequestId = params.get("RequestId")
class KeyList(AbstractModel):
"""对应按键操作,如果没有结构体里定义按键操作用户按键以后都从 interruptPrompt 重新播放
"""
def __init__(self):
"""
:param Key: 用户按键(0-9、*、#、A-D)
:type Key: str
:param Operate: 1: 呼通被叫 2:interruptPrompt 重播提示 3:拆线
:type Operate: str
"""
self.Key = None
self.Operate = None
def _deserialize(self, params):
self.Key = params.get("Key")
self.Operate = params.get("Operate")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class RreCallerHandle(AbstractModel):
"""结构体,主叫呼叫预处理操作,根据不同操作确认是否呼通被叫。如需使用,本结构体需要与 keyList 结构体配合使用,此时这两个参数都为必填项
"""
def __init__(self):
"""
:param ReadPrompt: 呼叫主叫以后,给主叫用户的语音提示,播放该提示时用户所有按键无效
:type ReadPrompt: str
:param InterruptPrompt: 可中断提示,播放该提示时,用户可以按键
:type InterruptPrompt: str
:param KeyList: 对应按键操作,如果没有结构体里定义按键操作用户按键以后都从 interruptPrompt 重新播放
:type KeyList: list of KeyList
:param RepeatTimes: 最多重复播放次数,超过该次数拆线
:type RepeatTimes: str
:param KeyPressUrl: 用户按键回调通知地址,如果为空不回调
:type KeyPressUrl: str
:param PromptGender: 提示音男声女声:1女声,2男声。默认女声
:type PromptGender: str
"""
self.ReadPrompt = None
self.InterruptPrompt = None
self.KeyList = None
self.RepeatTimes = None
self.KeyPressUrl = None
self.PromptGender = None
def _deserialize(self, params):
self.ReadPrompt = params.get("ReadPrompt")
self.InterruptPrompt = params.get("InterruptPrompt")
if params.get("KeyList") is not None:
self.KeyList = []
for item in params.get("KeyList"):
obj = KeyList()
obj._deserialize(item)
self.KeyList.append(obj)
self.RepeatTimes = params.get("RepeatTimes")
self.KeyPressUrl = params.get("KeyPressUrl")
self.PromptGender = params.get("PromptGender")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class VirturalNumCdr(AbstractModel):
"""直拨话单详情
"""
def __init__(self):
"""
:param CallId: 呼叫通话 ID
:type CallId: str
:param BindId: 双方号码 + 中间号绑定 ID,该 ID 全局唯一
:type BindId: str
:param Src: 主叫号码
:type Src: str
:param Dst: 被叫号码
:type Dst: str
:param DstVirtualNum: 主叫通讯录直拨虚拟保护号码
:type DstVirtualNum: str
:param CallCenterAcceptTime: 虚拟保护号码平台收到呼叫时间
:type CallCenterAcceptTime: str
:param StartDstCallTime: 被叫呼叫开始时间
:type StartDstCallTime: str
:param StartDstRingTime: 被叫响铃开始时间
:type StartDstRingTime: str
:param DstAcceptTime: 被叫接听时间
:type DstAcceptTime: str
:param EndCallTime: 用户挂机通话结束时间
:type EndCallTime: str
:param CallEndStatus: 通话最后状态:0:未知状态 1:正常通话 2:查询呼叫转移被叫号异常 3:未接通 4:未接听 5:拒接挂断 6:关机 7:空号 8:通话中 9:欠费 10:运营商线路或平台异常
:type CallEndStatus: str
:param SrcDuration: 主叫接通虚拟保护号码到通话结束通话时间
:type SrcDuration: str
:param DstDuration: 呼叫转接被叫接通到通话结束通话时间
:type DstDuration: str
:param RecordUrl: 录音 URL,如果不录音或录音失败,该值为空
:type RecordUrl: str
"""
self.CallId = None
self.BindId = None
self.Src = None
self.Dst = None
self.DstVirtualNum = None
self.CallCenterAcceptTime = None
self.StartDstCallTime = None
self.StartDstRingTime = None
self.DstAcceptTime = None
self.EndCallTime = None
self.CallEndStatus = None
self.SrcDuration = None
self.DstDuration = None
self.RecordUrl = None
def _deserialize(self, params):
self.CallId = params.get("CallId")
self.BindId = params.get("BindId")
self.Src = params.get("Src")
self.Dst = params.get("Dst")
self.DstVirtualNum = params.get("DstVirtualNum")
self.CallCenterAcceptTime = params.get("CallCenterAcceptTime")
self.StartDstCallTime = params.get("StartDstCallTime")
self.StartDstRingTime = params.get("StartDstRingTime")
self.DstAcceptTime = params.get("DstAcceptTime")
self.EndCallTime = params.get("EndCallTime")
self.CallEndStatus = params.get("CallEndStatus")
self.SrcDuration = params.get("SrcDuration")
self.DstDuration = params.get("DstDuration")
self.RecordUrl = params.get("RecordUrl")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
| 31.528993
| 211
| 0.609105
|
6c4fb3cdb5c6804e94fd5ced175528a90fbbcbd8
| 16,364
|
py
|
Python
|
psdaq/psdaq/pyxpm/surf/protocols/pgp/_Pgp3AxiL.py
|
ZhenghengLi/lcls2
|
94e75c6536954a58c8937595dcac295163aa1cdf
|
[
"BSD-3-Clause-LBNL"
] | 134
|
2017-02-22T18:07:00.000Z
|
2022-03-21T16:12:23.000Z
|
python/surf/protocols/pgp/_Pgp3AxiL.py
|
a-panella/surf
|
b7c116c9f84760bda2c1ea9fa89fddef58dd831d
|
[
"BSD-3-Clause-LBNL"
] | 251
|
2017-04-26T23:42:42.000Z
|
2022-03-03T18:48:43.000Z
|
python/surf/protocols/pgp/_Pgp3AxiL.py
|
a-panella/surf
|
b7c116c9f84760bda2c1ea9fa89fddef58dd831d
|
[
"BSD-3-Clause-LBNL"
] | 38
|
2017-02-21T21:15:03.000Z
|
2022-02-06T00:22:37.000Z
|
#-----------------------------------------------------------------------------
# Title : PyRogue _pgp2baxi Module
#-----------------------------------------------------------------------------
# Description:
# PyRogue _pgp2baxi Module
#-----------------------------------------------------------------------------
# This file is part of 'SLAC Firmware Standard Library'.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of 'SLAC Firmware Standard Library', including this file,
# may be copied, modified, propagated, or distributed except according to
# the terms contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import pyrogue as pr
import surf.xilinx
class Pgp3AxiL(pr.Device):
def __init__(self,
description = "Configuration and status a PGP 3 link",
numVc = 4,
statusCountBits = 16,
errorCountBits = 8,
writeEn = False,
**kwargs):
super().__init__(description=description, **kwargs)
def addErrorCountVar(**ecvkwargs):
self.add(pr.RemoteVariable(
bitSize = errorCountBits,
mode = 'RO',
bitOffset = 0,
base = pr.UInt,
disp = '{:d}',
pollInterval = 1,
**ecvkwargs))
self.add(pr.RemoteCommand(
name = 'CountReset',
offset = 0x00,
bitSize = 1,
bitOffset = 0,
function = pr.BaseCommand.toggle,
))
self.add(pr.RemoteVariable(
name = "AutoStatus",
description = "Auto Status Send Enable (PPI)",
offset = 0x04,
bitSize = 1,
bitOffset = 0,
mode = 'RW',
base = pr.Bool,
))
self.add(pr.RemoteVariable(
name = "Loopback",
description = "GT Loopback Mode",
offset = 0x08,
bitSize = 3,
bitOffset = 0,
mode = 'RW' if writeEn else 'RO',
base = pr.UInt,
))
self.add(pr.RemoteVariable(
name = 'SkipInterval',
mode = 'RW',
offset = 0xC,
disp = '{:d}',
))
####################
# RX
###################
self.add(pr.RemoteVariable(
name = "RxPhyActive",
offset = 0x10,
bitSize = 1,
bitOffset = 0,
mode = 'RO',
base = pr.Bool,
description = "RX Phy is Ready",
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = "RxLocalLinkReady",
offset = 0x10,
bitSize = 1,
bitOffset = 1,
mode = 'RO',
base = pr.Bool,
description = "Rx Local Link Ready",
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = "RxRemLinkReady",
offset = 0x10,
bitSize = 1,
bitOffset = 2,
mode = 'RO',
base = pr.Bool,
description = "Rx Remote Link Ready",
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = "RxRemPause",
offset = 0x20,
bitSize = numVc,
bitOffset = 16,
mode = 'RO',
base = pr.UInt,
disp = '{:#_b}',
description = "RX Remote Pause Asserted",
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = "RxRemOverflow",
offset = 0x20,
bitSize = numVc,
bitOffset = 0,
mode = 'RO',
base = pr.UInt,
disp = '{:#_b}',
description = "Received remote overflow flag",
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = "RxClockFreqRaw",
offset = 0x2C,
bitSize = 32,
bitOffset = 0,
mode = 'RO',
base = pr.UInt,
hidden = True,
pollInterval = 1,
))
self.add(pr.LinkVariable(
name = "RxClockFrequency",
units = "MHz",
mode = 'RO',
dependencies = [self.RxClockFreqRaw],
linkedGet = lambda: self.RxClockFreqRaw.value() * 1.0e-6,
disp = '{:0.3f}',
))
self.add(pr.RemoteVariable(
name = "RxFrameCount",
offset = 0x24,
bitSize = statusCountBits,
bitOffset = 0,
mode = 'RO',
base = pr.UInt,
pollInterval = 1,
))
addErrorCountVar(
name = 'RxFrameErrorCount',
offset = 0x28,
)
addErrorCountVar(
name = "RxCellErrorCount",
offset = 0x14,
)
addErrorCountVar(
name = "RxLinkDownCount",
offset = 0x18
)
addErrorCountVar(
name = 'RxLinkErrorCount',
offset = 0x1C,
)
for i in range(16):
addErrorCountVar(
name = f'RxRemOverflowCount[{i}]',
offset = 0x40+(i*4),
hidden = (i >= numVc),
)
addErrorCountVar(
name = 'RxOpCodeCount',
offset = 0x30,
)
self.add(pr.RemoteVariable(
name = 'RxOpCodeDataLastRaw',
mode = 'RO',
offset = 0x34,
bitSize = 56,
base = pr.UInt,
hidden = True,
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = 'RxOpCodeNumLastRaw',
mode = 'RO',
offset = 0x34,
bitOffset = 56,
bitSize = 3,
hidden = True,
pollInterval = 1,
))
self.add(pr.LinkVariable(
name = 'RxOpCodeLast',
mode = 'RO',
dependencies = [self.RxOpCodeDataLastRaw, self.RxOpCodeNumLastRaw],
linkedGet = lambda: f'{self.RxOpCodeNumLastRaw.value()} - {self.RxOpCodeDataLastRaw.value():x}',
))
self.add(pr.RemoteVariable(
name = 'PhyRxValid',
mode = 'RO',
offset = 0x108,
bitOffset = 2,
bitSize = 1,
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = 'PhyRxData',
mode = 'RO',
offset = 0x100,
bitOffset = 0,
bitSize = 64,
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = 'PhyRxHeader',
mode = 'RO',
offset = 0x108,
bitOffset = 0,
bitSize = 2,
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = 'EbRxValid',
mode = 'RO',
offset = 0x118,
bitOffset = 2,
bitSize = 1,
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = 'EbRxData',
mode = 'RO',
offset = 0x110,
bitOffset = 0,
bitSize = 64,
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = 'EbRxHeader',
mode = 'RO',
offset = 0x118,
bitOffset = 0,
bitSize = 2,
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = 'EbRxStatus',
mode = 'RO',
offset = 0x118,
bitOffset = 3,
bitSize = 9,
disp = '{:d}',
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = 'EbRxOverflow',
mode = 'RO',
offset = 0x11C,
bitOffset = 0,
bitSize = 1,
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = 'EbRxOverflowCnt',
mode = 'RO',
offset = 0x11C,
bitOffset = 1,
bitSize = errorCountBits,
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = 'GearboxAligned',
mode = 'RO',
offset = 0x120,
bitOffset = 0,
bitSize = 1,
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = 'GearboxAlignCnt',
mode = 'RO',
offset = 0x120,
bitOffset = 8,
bitSize = 8,
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = 'PhyRxInitCnt',
mode = 'RO',
offset = 0x130,
bitOffset = 0,
bitSize = 4,
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = 'RemLinkData',
mode = 'RO',
offset = 0x138,
bitSize = 56,
pollInterval = 1,
))
################
# TX
################
self.add(pr.RemoteVariable(
name = 'FlowControlDisable',
offset = 0x80,
bitOffset = 0,
bitSize = 1,
base = pr.Bool,
mode = 'RW' if writeEn else 'RO'
))
self.add(pr.RemoteVariable(
name = 'TxDisable',
offset = 0x80,
bitOffset = 1,
bitSize = 1,
base = pr.Bool,
mode = 'RW' if writeEn else 'RO'
))
self.add(pr.RemoteVariable(
name = "TxPhyActive",
offset = 0x84,
bitSize = 1,
bitOffset = 1,
mode = 'RO',
base = pr.Bool,
description = "TX Phy is Ready",
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = 'TxLinkReady',
offset = 0x84,
bitOffset = 0,
bitSize = 1,
mode = 'RO',
base = pr.Bool,
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = "TxLocPause",
offset = 0x8C,
bitSize = numVc,
bitOffset = 16,
mode = 'RO',
base = pr.UInt,
disp = '{:#_b}',
description = "Tx Local Pause Asserted",
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = "TxLocOverflow",
offset = 0x8C,
bitSize = numVc,
bitOffset = 0,
mode = 'RO',
base = pr.UInt,
disp = '{:#_b}',
description = "Received local overflow flag",
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = "TxClockFreqRaw",
offset = 0x9C,
bitSize = 32,
bitOffset = 0,
mode = 'RO',
base = pr.UInt,
hidden = True,
pollInterval = 1,
))
self.add(pr.LinkVariable(
name = "TxClockFrequency",
units = "MHz",
mode = 'RO',
dependencies = [self.TxClockFreqRaw],
linkedGet = lambda: self.TxClockFreqRaw.value() * 1.0e-6,
disp = '{:0.3f}',
))
self.add(pr.RemoteVariable(
name = "TxFrameCount",
offset = 0x90,
bitSize = statusCountBits,
bitOffset = 0,
mode = 'RO',
base = pr.UInt,
pollInterval = 1,
))
addErrorCountVar(
name = 'TxFrameErrorCount',
offset = 0x94,
)
for i in range(16):
addErrorCountVar(
name = f'TxLocOverflowCount[{i}]',
offset = 0xB0 + (i*4),
hidden = (i >= numVc),
)
addErrorCountVar(
name = 'TxOpCodeCount',
offset = 0xA0,
)
self.add(pr.RemoteVariable(
name = 'TxOpCodeDataLastRaw',
mode = 'RO',
offset = 0xA4,
bitSize = 56,
base = pr.UInt,
hidden = True,
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = 'TxOpCodeNumLastRaw',
mode = 'RO',
offset = 0xA4,
bitOffset = 56,
bitSize = 3,
hidden = True,
pollInterval = 1,
))
self.add(pr.RemoteVariable(
name = 'TxDiffCtrl',
mode = 'RW',
offset = 0xAC,
bitOffset = 0,
bitSize = 5,
))
self.add(pr.RemoteVariable(
name = 'TxPreCursor',
mode = 'RW',
offset = 0xAC,
bitOffset = 8,
bitSize = 5,
))
self.add(pr.RemoteVariable(
name = 'TxPostCursor',
mode = 'RW',
offset = 0xAC,
bitOffset = 16,
bitSize = 5,
))
self.add(pr.LinkVariable(
name = 'TxOpCodeLast',
mode = 'RO',
dependencies = [self.TxOpCodeDataLastRaw, self.TxOpCodeNumLastRaw],
linkedGet = lambda: f'{self.TxOpCodeNumLastRaw.value()} - {self.TxOpCodeDataLastRaw.value():x}'),
)
def countReset(self):
self.CountReset()
class Pgp3GthUs(pr.Device):
def __init__(self, *,
enDrp = True,
enMon = True,
numVc = 4,
monWriteEn = False,
monErrorCountBits = 4,
monStatusCountBits = 32,
**kwargs):
super().__init__(self, **kwargs)
if enMon:
self.add(Pgp3AxiL(
offset = 0x0,
numVc = numVc,
writeEn = monWriteEn,
errorCountBits = monErrorCountBits,
statusCountBits = monStatusCountBits,
))
if enDrp:
self.add(surf.xilinx.Gthe3Channel(
offset = 0x1000,
expand = False,
))
class Pgp3GthUsWrapper(pr.Device):
def __init__(self, *,
lanes = 1,
enGtDrp = True,
enQpllDrp = False,
enMon = True,
**kwargs):
super().__init__(self, **kwargs)
self.addNodes(
nodeClass = Pgp3GthUs,
number = lanes,
stride = 0x2000,
name = 'Pgp3GthUs',
offset = 0x0,
enMon = enMon,
enDrp = enGtDrp
)
| 29.59132
| 112
| 0.385847
|
9c2c8be8670918f507f524f88126bed83efdaceb
| 1,587
|
py
|
Python
|
logging_utils/context/adapter.py
|
michalbachowski/pylogging_utils
|
0fe12fbea940fe4c9ae0eab76988d47bcda75213
|
[
"MIT"
] | null | null | null |
logging_utils/context/adapter.py
|
michalbachowski/pylogging_utils
|
0fe12fbea940fe4c9ae0eab76988d47bcda75213
|
[
"MIT"
] | null | null | null |
logging_utils/context/adapter.py
|
michalbachowski/pylogging_utils
|
0fe12fbea940fe4c9ae0eab76988d47bcda75213
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""" Package contains logging adapter """
import logging
from contextlib import contextmanager
class LoggerAdapterWithContext(logging.LoggerAdapter):
""" Logger subclass that allows to define context.
IT IS NEITHER THREAD NOR ASYNC SAFE
"""
def __init__(self, stack, logger):
""" Object initializator
:param stack: context stack to be used
:type stack: logging_utils.context.stack
:param logger: inner (decoratee) logger instance
:type logger: logging.Logger
:returns: new instance of self
:rtype: logging_utils.context.LoggerContextual
"""
self._stack = stack
return super(LoggerAdapterWithContext, self).__init__(logger, None)
@contextmanager
def context(self, **kwargs):
""" Returns current (most-recently created) context
:returns: instance of logging context
:rtype: logging_utils.context.LoggerContextual
"""
self._stack.push(kwargs)
try:
yield self
finally:
self._stack.pop()
def process(self, msg, kwargs):
""" Process log message before write them into log
:param msg: message to be written into log
:type msg: str
:param kwargs: additional arguments to be logged along with message
:type kwargs: dict
:returns: tuple containing pre-processed msg and kwargs
:rtype: tuple
"""
return super(LoggerAdapterWithContext, self).process(
'; '.join([msg, str(self._stack)]), kwargs)
| 31.117647
| 75
| 0.637681
|
c403bd42b7122fdfb40a4328debc6dcf328b2d50
| 5,101
|
py
|
Python
|
cctbx/geometry_restraints/lbfgs.py
|
hbrunie/cctbx_project
|
2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2021-03-18T12:31:57.000Z
|
2022-03-14T06:27:06.000Z
|
cctbx/geometry_restraints/lbfgs.py
|
hbrunie/cctbx_project
|
2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
cctbx/geometry_restraints/lbfgs.py
|
hbrunie/cctbx_project
|
2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2021-03-26T12:52:30.000Z
|
2021-03-26T12:52:30.000Z
|
from __future__ import absolute_import, division, print_function
from cctbx import crystal
from cctbx.array_family import flex
import scitbx.lbfgs
class empty: pass
class lbfgs(object):
def __init__(self,
sites_cart,
geometry_restraints_manager,
riding_h_manager=None,
correct_special_position_tolerance=1.0,
geometry_restraints_flags=None,
lbfgs_termination_params=None,
lbfgs_core_params=None,
lbfgs_exception_handling_params=None,
disable_asu_cache=False,
sites_cart_selection=None,
site_labels=None,
states_collector=None):
if (lbfgs_termination_params is None):
lbfgs_termination_params = scitbx.lbfgs.termination_parameters(
max_iterations=1000)
self.riding_h_manager = riding_h_manager
self.correct_special_position_tolerance=correct_special_position_tolerance
self.site_labels = site_labels
self.states_collector = states_collector
self.tmp = empty()
self.rmsd_bonds, self.rmsd_angles = None, None
if sites_cart_selection:
self.sites_cart_selection = flex.bool(sites_cart_selection)
self.tmp.reduced_sites_cart=sites_cart.select(self.sites_cart_selection)
self.x = flex.double(self.tmp.reduced_sites_cart.size()*3, 0)
else:
self.sites_cart_selection = None
if(self.riding_h_manager is not None):
self.x = self.riding_h_manager.refinable_parameters_init()
else:
self.x = flex.double(sites_cart.size()*3, 0)
self.tmp.geometry_restraints_manager = geometry_restraints_manager
self.tmp.geometry_restraints_flags = geometry_restraints_flags
self.tmp.disable_asu_cache = disable_asu_cache
self.tmp.sites_cart = sites_cart
self.tmp.sites_shifted = sites_cart
self.first_target_result = None
self.minimizer = scitbx.lbfgs.run(
target_evaluator=self,
termination_params=lbfgs_termination_params,
core_params=lbfgs_core_params,
exception_handling_params=lbfgs_exception_handling_params)
self.apply_shifts()
self.compute_target(compute_gradients=True)
self.final_target_result = self.tmp.target_result
sites_cart.clear()
sites_cart.extend(self.tmp.sites_shifted)
del self.tmp
del self.x
self.first_target_value = self.first_target_result.target
self.final_target_value = self.final_target_result.target
def apply_shifts(self):
if self.sites_cart_selection:
shifted = self.tmp.reduced_sites_cart + flex.vec3_double(self.x)
self.tmp.sites_shifted = self.tmp.sites_cart.deep_copy()
self.tmp.sites_shifted.set_selected(self.sites_cart_selection, shifted)
else:
if(self.riding_h_manager is None):
self.tmp.sites_shifted = self.tmp.sites_cart + flex.vec3_double(self.x)
else:
new_sites = self.tmp.sites_cart.select(
self.riding_h_manager.not_hd_selection) + flex.vec3_double(self.x)
self.tmp.sites_shifted.set_selected(
self.riding_h_manager.not_hd_selection, new_sites)
self.riding_h_manager.idealize(sites_cart = self.tmp.sites_shifted)
if(self.states_collector is not None):
self.states_collector.add(sites_cart = self.tmp.sites_shifted)
if (self.tmp.geometry_restraints_manager.crystal_symmetry is not None):
crystal_symmetry = self.tmp.geometry_restraints_manager.crystal_symmetry
site_symmetry_table \
= self.tmp.geometry_restraints_manager.site_symmetry_table
assert site_symmetry_table is not None
for i_seq in site_symmetry_table.special_position_indices():
self.tmp.sites_shifted[i_seq] = crystal.correct_special_position(
crystal_symmetry=crystal_symmetry,
tolerance=self.correct_special_position_tolerance,
special_op=site_symmetry_table.get(i_seq).special_op(),
site_cart=self.tmp.sites_shifted[i_seq])
def compute_target(self, compute_gradients):
self.tmp.target_result = \
self.tmp.geometry_restraints_manager.energies_sites(
sites_cart=self.tmp.sites_shifted,
flags=self.tmp.geometry_restraints_flags,
compute_gradients=compute_gradients,
disable_asu_cache=self.tmp.disable_asu_cache,
site_labels=self.site_labels)
def compute_functional_and_gradients(self):
if (self.first_target_result is None):
assert self.x.all_eq(0)
else:
self.apply_shifts()
self.compute_target(compute_gradients=True)
self.f = self.tmp.target_result.target
if (self.first_target_result is None):
self.first_target_result = self.tmp.target_result
if self.sites_cart_selection:
ptr = self.tmp.target_result.gradients
self.g = ptr.select(self.sites_cart_selection).as_double()
else:
if(self.riding_h_manager is None):
self.g = self.tmp.target_result.gradients.as_double()
else:
self.g = self.riding_h_manager.gradients_reduced_cpp(
gradients = self.tmp.target_result.gradients,
sites_cart = self.tmp.sites_shifted,
hd_selection = self.riding_h_manager.hd_selection)
return self.f, self.g
| 42.157025
| 79
| 0.743972
|
7f42eb7abf0824bfcfe27ab977b04d9f8fd2f53e
| 2,144
|
py
|
Python
|
8puzzle/Solver.py
|
RobMcZag/python-algorithms
|
307f243dfcbc6d08e55e0186df8606d3ed34be9b
|
[
"Apache-2.0"
] | null | null | null |
8puzzle/Solver.py
|
RobMcZag/python-algorithms
|
307f243dfcbc6d08e55e0186df8606d3ed34be9b
|
[
"Apache-2.0"
] | null | null | null |
8puzzle/Solver.py
|
RobMcZag/python-algorithms
|
307f243dfcbc6d08e55e0186df8606d3ed34be9b
|
[
"Apache-2.0"
] | null | null | null |
import Board
import heapq
import sys
class Node:
def __init__(self, board, move, prev):
self.board = board # Board object, not only the tiles
self.move = move
self.prev = prev
def manhattan(self):
return self.move + self.board.manhattan()
class Solver:
def __init__(self, board):
startNode = Node(board, 0, None)
startTwinNode = Node(board.twin(), 0, None)
solNode = self.solveManhattan(startNode, startTwinNode)
node = solNode
self.solution = []
while node != None:
self.solution.insert(0, node.board) # the Board object
node = node.prev
def solveManhattan(self, startNode, startTwinNode):
pq = []
heapq.heappush(pq, (startNode.manhattan(), startNode))
pqTwin = []
heapq.heappush(pqTwin, (startTwinNode.manhattan(), startTwinNode))
while True:
(m, node) = heapq.heappop(pq)
(mt, nodeTwin) = heapq.heappop(pqTwin)
if node.board.isgoal() or nodeTwin.board.isgoal():
break
self.addNeighbours(node, pq)
self.addNeighbours(nodeTwin, pqTwin)
if node.board.isgoal(): return node
return None
def addNeighbours(self, node, pq):
for board in node.board.neighbors():
if node.prev == None or board != node.prev.board:
n = Node(board, node.move + 1, node)
heapq.heappush(pq, (n.manhattan(), n))
def isSolvable(self):
return len(self.solution) > 0
def moves(self):
return len(self.solution) -1
def main(args):
fh = open(args[1])
l = fh.readline()
N = int(l.strip())
board = []
for line in fh:
for word in line.split():
board.append(int(word))
solver = Solver(Board.Board(board, N))
if not solver.isSolvable():
print "No solution possible"
else:
print "Minimum number of moves = ", solver.moves()
for board in solver.solution:
print board.toString()
print "Minimum number of moves = ", solver.moves()
main(sys.argv)
| 24.930233
| 74
| 0.58069
|
6691fa2e04b77f521ef276505619ed6ab46f9efe
| 412
|
py
|
Python
|
restaurants/migrations/0011_remove_restaurantlocations_overview.py
|
sunilsm7/django_resto
|
b7698653093af7e6f26dd0d0c7b8d6046b402ea4
|
[
"MIT"
] | 1
|
2017-08-03T01:40:12.000Z
|
2017-08-03T01:40:12.000Z
|
restaurants/migrations/0011_remove_restaurantlocations_overview.py
|
sunilsm7/django_resto
|
b7698653093af7e6f26dd0d0c7b8d6046b402ea4
|
[
"MIT"
] | null | null | null |
restaurants/migrations/0011_remove_restaurantlocations_overview.py
|
sunilsm7/django_resto
|
b7698653093af7e6f26dd0d0c7b8d6046b402ea4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-05 12:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('restaurants', '0010_auto_20170804_1749'),
]
operations = [
migrations.RemoveField(
model_name='restaurantlocations',
name='overview',
),
]
| 20.6
| 51
| 0.631068
|
d4862bf89df29693687cff0b156df7b1384762f6
| 2,216
|
py
|
Python
|
py/testdir_multi_jvm/test_rf_1ktrees_job_cancel_3_fvec.py
|
gigliovale/h2o
|
be350f3f2c2fb6f135cc07c41f83fd0e4f521ac1
|
[
"Apache-2.0"
] | 882
|
2015-05-22T02:59:21.000Z
|
2022-02-17T05:02:48.000Z
|
py/testdir_multi_jvm/test_rf_1ktrees_job_cancel_3_fvec.py
|
VonRosenchild/h2o-2
|
be350f3f2c2fb6f135cc07c41f83fd0e4f521ac1
|
[
"Apache-2.0"
] | 1
|
2022-02-22T12:15:02.000Z
|
2022-02-22T12:15:02.000Z
|
py/testdir_multi_jvm/test_rf_1ktrees_job_cancel_3_fvec.py
|
VonRosenchild/h2o-2
|
be350f3f2c2fb6f135cc07c41f83fd0e4f521ac1
|
[
"Apache-2.0"
] | 392
|
2015-05-22T17:04:11.000Z
|
2022-02-22T09:04:39.000Z
|
import unittest, time, sys
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(3)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_rf_1ktrees_job_cancel_3_fvec(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
# always match the run below!
# just using one file for now
for x in [1000]:
shCmdString = "perl " + h2o.find_file("syn_scripts/parity.pl") + " 128 4 "+ str(x) + " quad " + SYNDATASETS_DIR
h2o.spawn_cmd_and_wait('parity.pl', shCmdString.split(),4)
csvFilename = "parity_128_4_" + str(x) + "_quad.data"
# always match the gen above!
for trial in range (1,20):
sys.stdout.write('.')
sys.stdout.flush()
csvFilename = "parity_128_4_" + str(1000) + "_quad.data"
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
hex_key = csvFilename + "_" + str(trial) + ".hex"
parseResult = h2o_cmd.parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=30)
h2o.verboseprint("Trial", trial)
start = time.time()
h2o_cmd.runRF(parseResult=parseResult, trees=trial, max_depth=2, rfView=False,
timeoutSecs=600, retryDelaySecs=3)
print "RF #", trial, "started on ", csvFilename, 'took', time.time() - start, 'seconds'
# FIX! need to get more intelligent here
time.sleep(1)
a = h2o.nodes[0].jobs_admin()
print "jobs_admin():", h2o.dump_json(a)
# "destination_key": "pytest_model",
# FIX! using 'key': 'pytest_model" with no time delay causes a failure
time.sleep(1)
jobsList = a['jobs']
for j in jobsList:
b = h2o.nodes[0].jobs_cancel(key=j['key'])
print "jobs_cancel():", h2o.dump_json(b)
# redirects to jobs, but we just do it directly.
if __name__ == '__main__':
h2o.unit_main()
| 36.933333
| 129
| 0.579422
|
4276cc0a2d671b7a868ed75b6b36b5a97e8db12d
| 7,618
|
py
|
Python
|
lte/gateway/python/magma/pipelined/tests/test_service_manager.py
|
paboldin/magma
|
e6796e0be7b34ccdb3b61a0e8c5de309f78e5fe4
|
[
"BSD-3-Clause"
] | null | null | null |
lte/gateway/python/magma/pipelined/tests/test_service_manager.py
|
paboldin/magma
|
e6796e0be7b34ccdb3b61a0e8c5de309f78e5fe4
|
[
"BSD-3-Clause"
] | 10
|
2021-03-31T20:19:00.000Z
|
2022-02-19T07:09:57.000Z
|
lte/gateway/python/magma/pipelined/tests/test_service_manager.py
|
119Vik/magma-1
|
107a7b374466a837fc0a49b283ba9d6ff1d702e3
|
[
"BSD-3-Clause"
] | 3
|
2020-08-20T18:45:34.000Z
|
2020-08-20T20:18:42.000Z
|
"""
Copyright (c) 2019-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import unittest
from collections import OrderedDict
from unittest.mock import MagicMock
from magma.pipelined.app.base import ControllerType
from magma.pipelined.app.access_control import AccessControlController
from magma.pipelined.app.arp import ArpController
from lte.protos.mconfig.mconfigs_pb2 import PipelineD
from magma.pipelined.app.dpi import DPIController
from magma.pipelined.app.gy import GYController
from magma.pipelined.app.enforcement import EnforcementController
from magma.pipelined.app.enforcement_stats import EnforcementStatsController
from magma.pipelined.app.inout import INGRESS, EGRESS, PHYSICAL_TO_LOGICAL
from magma.pipelined.app.ipfix import IPFIXController
from magma.pipelined.service_manager import (
ServiceManager,
TableNumException,
Tables,
)
class ServiceManagerTest(unittest.TestCase):
def setUp(self):
magma_service_mock = MagicMock()
magma_service_mock.mconfig = PipelineD()
magma_service_mock.mconfig.services.extend(
[PipelineD.ENFORCEMENT, PipelineD.DPI])
magma_service_mock.config = {
'static_services': ['arpd', 'access_control', 'ipfix']
}
self.service_manager = ServiceManager(magma_service_mock)
def test_get_table_num(self):
self.assertEqual(self.service_manager.get_table_num(INGRESS), 1)
self.assertEqual(self.service_manager.get_table_num(EGRESS), 20)
self.assertEqual(
self.service_manager.get_table_num(ArpController.APP_NAME), 2)
self.assertEqual(
self.service_manager.get_table_num(
AccessControlController.APP_NAME), 3)
self.assertEqual(
self.service_manager.get_table_num(DPIController.APP_NAME),
11)
self.assertEqual(
self.service_manager.get_table_num(GYController.APP_NAME),
12)
self.assertEqual(
self.service_manager.get_table_num(EnforcementController.APP_NAME),
13)
self.assertEqual(
self.service_manager.get_table_num(EnforcementStatsController.APP_NAME),
14)
self.assertEqual(
self.service_manager.get_table_num(IPFIXController.APP_NAME),
15)
self.assertEqual(
self.service_manager.get_table_num(PHYSICAL_TO_LOGICAL),
10)
def test_get_next_table_num(self):
self.assertEqual(self.service_manager.get_next_table_num(INGRESS), 2)
self.assertEqual(
self.service_manager.get_next_table_num(ArpController.APP_NAME), 3)
self.assertEqual(
self.service_manager.get_next_table_num(
AccessControlController.APP_NAME), 10)
self.assertEqual(
self.service_manager.get_next_table_num(DPIController.APP_NAME),
12)
self.assertEqual(
self.service_manager.get_next_table_num(
GYController.APP_NAME),
13)
self.assertEqual(
self.service_manager.get_next_table_num(
EnforcementController.APP_NAME),
14)
self.assertEqual(
self.service_manager.get_next_table_num(
EnforcementStatsController.APP_NAME),
15)
self.assertEqual(
self.service_manager.get_next_table_num(IPFIXController.APP_NAME),
20)
self.assertEqual(
self.service_manager.get_next_table_num(PHYSICAL_TO_LOGICAL),
11)
with self.assertRaises(TableNumException):
self.service_manager.get_next_table_num(EGRESS)
def test_is_app_enabled(self):
self.assertTrue(self.service_manager.is_app_enabled(
EnforcementController.APP_NAME))
self.assertTrue(self.service_manager.is_app_enabled(
DPIController.APP_NAME))
self.assertTrue(self.service_manager.is_app_enabled(
EnforcementStatsController.APP_NAME))
self.assertFalse(
self.service_manager.is_app_enabled("Random name lol"))
def test_allocate_scratch_tables(self):
self.assertEqual(self.service_manager.allocate_scratch_tables(
EnforcementController.APP_NAME, 1), [21])
self.assertEqual(self.service_manager.allocate_scratch_tables(
EnforcementController.APP_NAME, 2), [22, 23])
# There are a total of 200 tables. First 20 tables are reserved as
# main tables and 3 scratch tables are allocated above.
with self.assertRaises(TableNumException):
self.service_manager.allocate_scratch_tables(
EnforcementController.APP_NAME, 200 - 20 - 3)
def test_get_scratch_table_nums(self):
enforcement_scratch = \
self.service_manager.allocate_scratch_tables(
EnforcementController.APP_NAME, 2) + \
self.service_manager.allocate_scratch_tables(
EnforcementController.APP_NAME, 3)
self.assertEqual(self.service_manager.get_scratch_table_nums(
EnforcementController.APP_NAME), enforcement_scratch)
def test_get_all_table_assignments(self):
self.service_manager.allocate_scratch_tables(
EnforcementController.APP_NAME, 1)
self.service_manager.allocate_scratch_tables(
EnforcementStatsController.APP_NAME, 2)
result = self.service_manager.get_all_table_assignments()
print(result)
expected = OrderedDict([
('mme', Tables(main_table=0, scratch_tables=[],
type=ControllerType.SPECIAL)),
('ingress', Tables(main_table=1, scratch_tables=[],
type=ControllerType.SPECIAL)),
('arpd', Tables(main_table=2, scratch_tables=[],
type=ControllerType.PHYSICAL)),
('access_control', Tables(main_table=3, scratch_tables=[],
type=ControllerType.PHYSICAL)),
('middle', Tables(main_table=10, scratch_tables=[], type=None)),
('dpi', Tables(main_table=11, scratch_tables=[],
type=ControllerType.LOGICAL)),
('gy', Tables(main_table=12, scratch_tables=[],
type=ControllerType.LOGICAL)),
('enforcement', Tables(main_table=13, scratch_tables=[21],
type=ControllerType.LOGICAL)),
('enforcement_stats', Tables(main_table=14, scratch_tables=[22, 23],
type=ControllerType.LOGICAL)),
('ipfix', Tables(main_table=15, scratch_tables=[],
type=ControllerType.LOGICAL)),
('egress', Tables(main_table=20, scratch_tables=[],
type=ControllerType.SPECIAL)),
])
self.assertEqual(len(result), len(expected))
for result_key, expected_key in zip(result, expected):
self.assertEqual(result_key, expected_key)
self.assertEqual(result[result_key].main_table,
expected[expected_key].main_table)
self.assertEqual(result[result_key].scratch_tables,
expected[expected_key].scratch_tables)
if __name__ == "__main__":
unittest.main()
| 43.284091
| 84
| 0.657784
|
e4077ef79544db7e9591a692fcd45ba677a3b942
| 16,625
|
py
|
Python
|
cltk/tests/test_tokenize.py
|
BananaNosh/cltk
|
92049088213ad1cd4b78f65edfb715b907038a33
|
[
"MIT"
] | null | null | null |
cltk/tests/test_tokenize.py
|
BananaNosh/cltk
|
92049088213ad1cd4b78f65edfb715b907038a33
|
[
"MIT"
] | null | null | null |
cltk/tests/test_tokenize.py
|
BananaNosh/cltk
|
92049088213ad1cd4b78f65edfb715b907038a33
|
[
"MIT"
] | null | null | null |
# -*-coding:utf-8-*-
"""Test cltk.tokenize.
TODO: Add tests for the Indian lang tokenizers: from cltk.tokenize.indian_tokenizer import indian_punctuation_tokenize_regex
"""
from cltk.corpus.utils.importer import CorpusImporter
from cltk.tokenize.sentence import TokenizeSentence
from cltk.tokenize.word import nltk_tokenize_words
from cltk.tokenize.word import WordTokenizer
from cltk.tokenize.line import LineTokenizer
import os
import unittest
__license__ = 'MIT License. See LICENSE.'
class TestSequenceFunctions(unittest.TestCase): # pylint: disable=R0904
"""Class for unittest"""
def setUp(self):
"""Clone Greek models in order to test pull function and other model
tests later.
"""
corpus_importer = CorpusImporter('greek')
corpus_importer.import_corpus('greek_models_cltk')
file_rel = os.path.join('~/cltk_data/greek/model/greek_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter('latin')
# corpus_importer.import_corpus('latin_models_cltk')
file_rel = os.path.join('~/cltk_data/latin/model/latin_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
if file_exists:
self.assertTrue(file_exists)
else:
corpus_importer.import_corpus('latin_models_cltk')
self.assertTrue(file_exists)
def test_sentence_tokenizer_latin(self):
"""Test tokenizing Latin sentences."""
text = "O di inmortales! ubinam gentium sumus? in qua urbe vivimus? quam rem publicam habemus? Hic, hic sunt in nostro numero, patres conscripti, in hoc orbis terrae sanctissimo gravissimoque consilio, qui de nostro omnium interitu, qui de huius urbis atque adeo de orbis terrarum exitio cogitent! Hos ego video consul et de re publica sententiam rogo et, quos ferro trucidari oportebat, eos nondum voce volnero! Fuisti igitur apud Laecam illa nocte, Catilina, distribuisti partes Italiae, statuisti, quo quemque proficisci placeret, delegisti, quos Romae relinqueres, quos tecum educeres, discripsisti urbis partes ad incendia, confirmasti te ipsum iam esse exiturum, dixisti paulum tibi esse etiam nunc morae, quod ego viverem." # pylint: disable=line-too-long
target = ['O di inmortales!', 'ubinam gentium sumus?', 'in qua urbe vivimus?', 'quam rem publicam habemus?', 'Hic, hic sunt in nostro numero, patres conscripti, in hoc orbis terrae sanctissimo gravissimoque consilio, qui de nostro omnium interitu, qui de huius urbis atque adeo de orbis terrarum exitio cogitent!', 'Hos ego video consul et de re publica sententiam rogo et, quos ferro trucidari oportebat, eos nondum voce volnero!', 'Fuisti igitur apud Laecam illa nocte, Catilina, distribuisti partes Italiae, statuisti, quo quemque proficisci placeret, delegisti, quos Romae relinqueres, quos tecum educeres, discripsisti urbis partes ad incendia, confirmasti te ipsum iam esse exiturum, dixisti paulum tibi esse etiam nunc morae, quod ego viverem.'] # pylint: disable=line-too-long
tokenizer = TokenizeSentence('latin')
tokenized_sentences = tokenizer.tokenize_sentences(text)
self.assertEqual(tokenized_sentences, target)
'''
def test_sentence_tokenizer_greek(self):
"""Test tokenizing Greek sentences.
TODO: Re-enable this. Test & code are good, but now fail on Travis CI for some reason.
"""
sentences = 'εἰ δὲ καὶ τῷ ἡγεμόνι πιστεύσομεν ὃν ἂν Κῦρος διδῷ, τί κωλύει καὶ τὰ ἄκρα ἡμῖν κελεύειν Κῦρον προκαταλαβεῖν; ἐγὼ γὰρ ὀκνοίην μὲν ἂν εἰς τὰ πλοῖα ἐμβαίνειν ἃ ἡμῖν δοίη, μὴ ἡμᾶς ταῖς τριήρεσι καταδύσῃ, φοβοίμην δ᾽ ἂν τῷ ἡγεμόνι ὃν δοίη ἕπεσθαι, μὴ ἡμᾶς ἀγάγῃ ὅθεν οὐκ ἔσται ἐξελθεῖν· βουλοίμην δ᾽ ἂν ἄκοντος ἀπιὼν Κύρου λαθεῖν αὐτὸν ἀπελθών· ὃ οὐ δυνατόν ἐστιν.' # pylint: disable=line-too-long
good_tokenized_sentences = ['εἰ δὲ καὶ τῷ ἡγεμόνι πιστεύσομεν ὃν ἂν Κῦρος διδῷ, τί κωλύει καὶ τὰ ἄκρα ἡμῖν κελεύειν Κῦρον προκαταλαβεῖν;', 'ἐγὼ γὰρ ὀκνοίην μὲν ἂν εἰς τὰ πλοῖα ἐμβαίνειν ἃ ἡμῖν δοίη, μὴ ἡμᾶς ταῖς τριήρεσι καταδύσῃ, φοβοίμην δ᾽ ἂν τῷ ἡγεμόνι ὃν δοίη ἕπεσθαι, μὴ ἡμᾶς ἀγάγῃ ὅθεν οὐκ ἔσται ἐξελθεῖν· βουλοίμην δ᾽ ἂν ἄκοντος ἀπιὼν Κύρου λαθεῖν αὐτὸν ἀπελθών· ὃ οὐ δυνατόν ἐστιν.'] # pylint: disable=line-too-long
tokenizer = TokenizeSentence('greek')
tokenized_sentences = tokenizer.tokenize_sentences(sentences)
self.assertEqual(len(tokenized_sentences), len(good_tokenized_sentences))
'''
def test_greek_word_tokenizer(self):
"""Test Latin-specific word tokenizer."""
word_tokenizer = WordTokenizer('greek')
# Test sources:
# - Thuc. 1.1.1
test = "Θουκυδίδης Ἀθηναῖος ξυνέγραψε τὸν πόλεμον τῶν Πελοποννησίων καὶ Ἀθηναίων, ὡς ἐπολέμησαν πρὸς ἀλλήλους, ἀρξάμενος εὐθὺς καθισταμένου καὶ ἐλπίσας μέγαν τε ἔσεσθαι καὶ ἀξιολογώτατον τῶν προγεγενημένων, τεκμαιρόμενος ὅτι ἀκμάζοντές τε ᾖσαν ἐς αὐτὸν ἀμφότεροι παρασκευῇ τῇ πάσῃ καὶ τὸ ἄλλο Ἑλληνικὸν ὁρῶν ξυνιστάμενον πρὸς ἑκατέρους, τὸ μὲν εὐθύς, τὸ δὲ καὶ διανοούμενον."
target = ['Θουκυδίδης', 'Ἀθηναῖος', 'ξυνέγραψε', 'τὸν', 'πόλεμον', 'τῶν', 'Πελοποννησίων', 'καὶ', 'Ἀθηναίων', ',', 'ὡς', 'ἐπολέμησαν', 'πρὸς', 'ἀλλήλους', ',', 'ἀρξάμενος', 'εὐθὺς', 'καθισταμένου', 'καὶ', 'ἐλπίσας', 'μέγαν', 'τε', 'ἔσεσθαι', 'καὶ', 'ἀξιολογώτατον', 'τῶν', 'προγεγενημένων', ',', 'τεκμαιρόμενος', 'ὅτι', 'ἀκμάζοντές', 'τε', 'ᾖσαν', 'ἐς', 'αὐτὸν', 'ἀμφότεροι', 'παρασκευῇ', 'τῇ', 'πάσῃ', 'καὶ', 'τὸ', 'ἄλλο', 'Ἑλληνικὸν', 'ὁρῶν', 'ξυνιστάμενον', 'πρὸς', 'ἑκατέρους', ',', 'τὸ', 'μὲν', 'εὐθύς', ',', 'τὸ', 'δὲ', 'καὶ', 'διανοούμενον', '.']
result = word_tokenizer.tokenize(test)
self.assertEqual(result, target)
def test_latin_word_tokenizer(self):
"""Test Latin-specific word tokenizer."""
word_tokenizer = WordTokenizer('latin')
#Test sources:
# - V. Aen. 1.1
# - Prop. 2.5.1-2
# - Ov. Am. 1.8.65-66
# - Cic. Phillip. 13.14
# - Plaut. Capt. 937
# - Lucr. DRN. 5.1351-53
# - Plaut. Bacch. 837-38
# - Plaut. Amph. 823
tests = ['Arma virumque cano, Troiae qui primus ab oris.',
'Hoc verumst, tota te ferri, Cynthia, Roma, et non ignota vivere nequitia?',
'Nec te decipiant veteres circum atria cerae. Tolle tuos tecum, pauper amator, avos!',
'Neque enim, quod quisque potest, id ei licet, nec, si non obstatur, propterea etiam permittitur.',
'Quid opust verbis? lingua nullast qua negem quidquid roges.',
'Textile post ferrumst, quia ferro tela paratur, nec ratione alia possunt tam levia gigni insilia ac fusi, radii, scapique sonantes.', # pylint: disable=line-too-long
'Dic sodes mihi, bellan videtur specie mulier?',
'Cenavin ego heri in navi in portu Persico?'
]
results = []
for test in tests:
result = word_tokenizer.tokenize(test)
results.append(result)
target = [['Arma', 'virum', '-que', 'cano', ',', 'Troiae', 'qui', 'primus', 'ab', 'oris', '.'],
['Hoc', 'verum', 'est', ',', 'tota', 'te', 'ferri', ',', 'Cynthia', ',', 'Roma', ',', 'et', 'non', 'ignota', 'vivere', 'nequitia', '?'], # pylint: disable=line-too-long
['Nec', 'te', 'decipiant', 'veteres', 'circum', 'atria', 'cerae', '.', 'Tolle', 'tuos', 'cum', 'te', ',', 'pauper', 'amator', ',', 'avos', '!'], # pylint: disable=line-too-long
['Neque', 'enim', ',', 'quod', 'quisque', 'potest', ',', 'id', 'ei', 'licet', ',', 'nec', ',', 'si', 'non', 'obstatur', ',', 'propterea', 'etiam', 'permittitur', '.'], # pylint: disable=line-too-long
['Quid', 'opus', 'est', 'verbis', '?', 'lingua', 'nulla', 'est', 'qua', 'negem', 'quidquid', 'roges', '.'], # pylint: disable=line-too-long
['Textile', 'post', 'ferrum', 'est', ',', 'quia', 'ferro', 'tela', 'paratur', ',', 'nec', 'ratione', 'alia', 'possunt', 'tam', 'levia', 'gigni', 'insilia', 'ac', 'fusi', ',', 'radii', ',', 'scapi', '-que', 'sonantes', '.'], # pylint: disable=line-too-long
['Dic', 'si', 'audes', 'mihi', ',', 'bella', '-ne', 'videtur', 'specie', 'mulier', '?'],
['Cenavi', '-ne', 'ego', 'heri', 'in', 'navi', 'in', 'portu', 'Persico', '?']
]
self.assertEqual(results, target)
def test_tokenize_arabic_words(self):
word_tokenizer = WordTokenizer('arabic')
tests = ['اللُّغَةُ الْعَرَبِيَّةُ جَمِيلَةٌ.',
'انما الْمُؤْمِنُونَ اخوه فاصلحوا بَيْنَ اخويكم',
'الْعَجُزُ عَنِ الْإِدْرَاكِ إِدْرَاكٌ، وَالْبَحْثَ فِي ذاتِ اللَّه اشراك.',
'اللَّهُمُّ اُسْتُرْ عُيُوبَنَا وَأَحْسَنَ خَوَاتِيمَنَا الْكَاتِبِ: نَبِيلُ جلهوم',
'الرَّأْي قَبْلَ شَجَاعَة الشّجعَانِ',
'فَأَنْزَلْنَا مِنْ السَّمَاء مَاء فَأَسْقَيْنَاكُمُوهُ',
'سُئِلَ بَعْضُ الْكُتَّابِ عَنِ الْخَطّ، مَتَى يَسْتَحِقُّ أَنْ يُوصَفَ بِالْجَوْدَةِ ؟'
]
results = []
for test in tests:
result = word_tokenizer.tokenize(test)
results.append(result)
target = [['اللُّغَةُ', 'الْعَرَبِيَّةُ', 'جَمِيلَةٌ', '.'],
['انما', 'الْمُؤْمِنُونَ', 'اخوه', 'فاصلحوا', 'بَيْنَ', 'اخويكم'],
['الْعَجُزُ', 'عَنِ', 'الْإِدْرَاكِ', 'إِدْرَاكٌ', '،', 'وَالْبَحْثَ', 'فِي', 'ذاتِ', 'اللَّه', 'اشراك', '.'], # pylint: disable=line-too-long
['اللَّهُمُّ', 'اُسْتُرْ', 'عُيُوبَنَا', 'وَأَحْسَنَ', 'خَوَاتِيمَنَا', 'الْكَاتِبِ', ':', 'نَبِيلُ', 'جلهوم'], # pylint: disable=line-too-long
['الرَّأْي', 'قَبْلَ', 'شَجَاعَة', 'الشّجعَانِ'],
['فَأَنْزَلْنَا', 'مِنْ', 'السَّمَاء', 'مَاء', 'فَأَسْقَيْنَاكُمُوهُ'],
['سُئِلَ', 'بَعْضُ', 'الْكُتَّابِ', 'عَنِ', 'الْخَطّ', '،', 'مَتَى', 'يَسْتَحِقُّ', 'أَنْ', 'يُوصَفَ', 'بِالْجَوْدَةِ', '؟'] # pylint: disable=line-too-long
]
self.assertEqual(results, target)
def test_word_tokenizer_french(self):
word_tokenizer = WordTokenizer('french')
tests = ["S'a table te veulz maintenir, Honnestement te dois tenir Et garder les enseignemens Dont cilz vers sont commancemens."] # pylint: disable=line-too-long
results = []
for test in tests:
result = word_tokenizer.tokenize(test)
results.append(result)
target = [["S'", 'a', 'table', 'te', 'veulz', 'maintenir', ',', 'Honnestement', 'te', 'dois', 'tenir', 'Et', 'garder', 'les', 'enseignemens', 'Dont', 'cilz', 'vers', 'sont', 'commancemens', '.']] # pylint: disable=line-too-long
self.assertEqual(results, target)
def test_nltk_tokenize_words(self):
"""Test wrapper for NLTK's PunktLanguageVars()"""
tokens = nltk_tokenize_words("Sentence 1. Sentence 2.", attached_period=False)
target = ['Sentence', '1', '.', 'Sentence', '2', '.']
self.assertEqual(tokens, target)
def test_nltk_tokenize_words_attached(self):
"""Test wrapper for NLTK's PunktLanguageVars(), returning unaltered output."""
tokens = nltk_tokenize_words("Sentence 1. Sentence 2.", attached_period=True)
target = ['Sentence', '1.', 'Sentence', '2.']
self.assertEqual(tokens, target)
def test_sanskrit_nltk_tokenize_words(self):
"""Test wrapper for NLTK's PunktLanguageVars()"""
tokens = nltk_tokenize_words("कृपया।", attached_period=False, language='sanskrit')
target = ['कृपया', '।']
self.assertEqual(tokens, target)
def test_sanskrit_nltk_tokenize_words_attached(self):
"""Test wrapper for NLTK's PunktLanguageVars(), returning unaltered output."""
tokens = nltk_tokenize_words("कृपया।", attached_period=True, language='sanskrit')
target = ['कृपया।']
self.assertEqual(tokens, target)
def test_nltk_tokenize_words_assert(self):
"""Test assert error for CLTK's word tokenizer."""
with self.assertRaises(AssertionError):
nltk_tokenize_words(['Sentence', '1.'])
def test_line_tokenizer(self):
"""Test LineTokenizer"""
text = """49. Miraris verbis nudis me scribere versus?\nHoc brevitas fecit, sensus coniungere binos."""
target = ['49. Miraris verbis nudis me scribere versus?','Hoc brevitas fecit, sensus coniungere binos.']
tokenizer = LineTokenizer('latin')
tokenized_lines = tokenizer.tokenize(text)
self.assertTrue(tokenized_lines == target)
def test_line_tokenizer_include_blanks(self):
"""Test LineTokenizer"""
text = """48. Cum tibi contigerit studio cognoscere multa,\nFac discas multa, vita nil discere velle.\n\n49. Miraris verbis nudis me scribere versus?\nHoc brevitas fecit, sensus coniungere binos.""" # pylint: disable=line-too-long
target = ['48. Cum tibi contigerit studio cognoscere multa,','Fac discas multa, vita nil discere velle.','','49. Miraris verbis nudis me scribere versus?','Hoc brevitas fecit, sensus coniungere binos.'] # pylint: disable=line-too-long
tokenizer = LineTokenizer('latin')
tokenized_lines = tokenizer.tokenize(text, include_blanks=True)
self.assertTrue(tokenized_lines == target)
def test_french_line_tokenizer(self):
"""Test LineTokenizer"""
text = """Ki de bone matire traite,\nmult li peise, se bien n’est faite.\nOëz, seignur, que dit Marie,\nki en sun tens pas ne s’oblie. """ # pylint: disable=line-too-long
target = ['Ki de bone matire traite,', 'mult li peise, se bien n’est faite.','Oëz, seignur, que dit Marie,', 'ki en sun tens pas ne s’oblie. '] # pylint: disable=line-too-long
tokenizer = LineTokenizer('french')
tokenized_lines = tokenizer.tokenize(text)
self.assertTrue(tokenized_lines == target)
def test_french_line_tokenizer_include_blanks(self):
"""Test LineTokenizer"""
text = """Ki de bone matire traite,\nmult li peise, se bien n’est faite.\nOëz, seignur, que dit Marie,\nki en sun tens pas ne s’oblie.\n\nLes contes que jo sai verais,\ndunt li Bretun unt fait les lais,\nvos conterai assez briefment.""" # pylint: disable=line-too-long
target = ['Ki de bone matire traite,', 'mult li peise, se bien n’est faite.', 'Oëz, seignur, que dit Marie,', 'ki en sun tens pas ne s’oblie.','','Les contes que jo sai verais,','dunt li Bretun unt fait les lais,','vos conterai assez briefment.'] # pylint: disable=line-too-long
tokenizer = LineTokenizer('french')
tokenized_lines = tokenizer.tokenize(text, include_blanks=True)
self.assertTrue(tokenized_lines == target)
def test_old_norse_word_tokenizer(self):
text = "Gylfi konungr var maðr vitr ok fjölkunnigr. " \
"Hann undraðist þat mjök, er ásafólk var svá kunnigt, at allir hlutir gengu at vilja þeira."
target = ['Gylfi', 'konungr', 'var', 'maðr', 'vitr', 'ok', 'fjölkunnigr', '.', 'Hann', 'undraðist', 'þat',
'mjök', ',', 'er', 'ásafólk', 'var', 'svá', 'kunnigt', ',', 'at', 'allir', 'hlutir', 'gengu', 'at',
'vilja', 'þeira', '.']
word_tokenizer = WordTokenizer('old_norse')
result = word_tokenizer.tokenize(text)
#print(result)
self.assertTrue(result == target)
def test_middle_english_tokenizer(self):
text = " Fers am I ferd of oure fare;\n Fle we ful fast þer-fore. \n Can Y no cownsel bot care.\n\n"
target = ['Fers', 'am', 'I', 'ferd', 'of', 'oure', 'fare', ';', 'Fle', 'we', 'ful', 'fast', 'þer', '-', 'fore', '.',
'Can', 'Y', 'no', 'cownsel', 'bot', 'care', '.']
tokenizer = WordTokenizer('middle_english')
tokenized = tokenizer.tokenize(text)
self.assertTrue(tokenized == target)
def test_middle_high_german_tokenizer(self):
text = "Gâwân het êre unde heil,\nieweders volleclîchen teil:\nnu nâht och sînes kampfes zît."
target = ['Gâwân', 'het', 'êre', 'unde', 'heil', ',', 'ieweders', 'volleclîchen', 'teil', ':', 'nu', 'nâht', 'och', 'sînes', 'kampfes', 'zît', '.']
tokenizer = WordTokenizer('middle_high_german')
tokenized_lines = tokenizer.tokenize(text)
self.assertTrue(tokenized_lines == target)
if __name__ == '__main__':
unittest.main()
| 65.711462
| 793
| 0.624962
|
40724a822382ad4babd8267e28fc829e620fb619
| 6,030
|
py
|
Python
|
optimizer/optimize.py
|
emitch/SEAMLeSS
|
cae21c67316ed36529fdc2e470a105a9f847975c
|
[
"MIT"
] | 4
|
2018-12-17T18:45:57.000Z
|
2021-04-29T16:30:42.000Z
|
optimizer/optimize.py
|
emitch/SEAMLeSS
|
cae21c67316ed36529fdc2e470a105a9f847975c
|
[
"MIT"
] | 19
|
2019-01-02T19:09:12.000Z
|
2020-12-14T18:50:47.000Z
|
optimizer/optimize.py
|
emitch/SEAMLeSS
|
cae21c67316ed36529fdc2e470a105a9f847975c
|
[
"MIT"
] | 2
|
2020-03-18T01:24:03.000Z
|
2022-01-06T06:19:58.000Z
|
import os
import sys
import time
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.optim import lr_scheduler
from torch.autograd import Variable
import numpy as np
import collections
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import random
class Optimizer():
def __init__(self, ndownsamples=4, currn=5, avgn=20, lambda1=0.4, lr=0.2, eps=0.01, min_iter=20, max_iter=1000):
self.ndownsamples = ndownsamples
self.currn = currn
self.avgn = avgn
self.lambda1 = lambda1
self.lr = lr
self.eps = eps
self.identities = {}
self.min_iter = min_iter
self.max_iter = max_iter
@staticmethod
def center(var, dims, d):
if not isinstance(d, collections.Sequence):
d = [d for i in range(len(dims))]
for idx, dim in enumerate(dims):
if d[idx] == 0:
continue
var = var.narrow(dim, int(d[idx]/2), int(var.size()[dim] - d[idx]))
return var
def get_identity_grid(self, dim, cache=True):
if dim not in self.identities:
gx, gy = np.linspace(-1, 1, dim), np.linspace(-1, 1, dim)
I = np.stack(np.meshgrid(gx, gy))
I = np.expand_dims(I, 0)
I = torch.FloatTensor(I)
I = torch.autograd.Variable(I, requires_grad=False)
I = I.permute(0,2,3,1).cuda()
self.identities[dim] = I
if cache:
return self.identities[dim]
else:
return self.identities[dim].clone()
def jacob(self, fields):
def dx(f):
p = Variable(torch.zeros((1,1,f.size(1),2))).cuda()
return torch.cat((p, f[:,2:,:,:] - f[:,:-2,:,:], p), 1)
def dy(f):
p = Variable(torch.zeros((1,f.size(1),1,2))).cuda()
return torch.cat((p, f[:,:,2:,:] - f[:,:,:-2,:], p), 2)
fields = sum(map(lambda f: [dx(f), dy(f)], fields), [])
field = torch.sum(torch.cat(fields, -1) ** 2, -1)
return field
def penalty(self, fields, mask=1):
jacob = self.jacob(fields)
jacob = torch.mul(jacob, mask)
return torch.sum(jacob)
def render(self, src, field):
src, field = torch.FloatTensor(src).cuda(), torch.FloatTensor(field).cuda()
src, field = Variable(src).unsqueeze(0).unsqueeze(0), Variable(field).unsqueeze(0)
#print(src, field)
y = F.grid_sample(src, field + self.get_identity_grid(field.size(2)), mode='bilinear')
return y.data.cpu().numpy()
def process(self, s, t, crop=0, mask=1):
print(s.shape, t.shape)
downsample = lambda x: nn.AvgPool2d(2**x,2**x, count_include_pad=False) if x > 0 else (lambda y: y)
upsample = nn.Upsample(scale_factor=2, mode='bilinear')
s, t = torch.FloatTensor(s), torch.FloatTensor(t)
src = Variable((s - torch.mean(s)) / torch.std(s)).cuda().unsqueeze(0).unsqueeze(0)
target = Variable((t - torch.mean(t)) / torch.std(t)).cuda().unsqueeze(0).unsqueeze(0)
mask = Variable(torch.FloatTensor(mask)).cuda().unsqueeze(0)
dim = int(src.size()[-1] / (2 ** (self.ndownsamples - 1)))
field = Variable(torch.zeros((1,dim,dim,2))).cuda().detach()
field.requires_grad = True
updates = 0
masking = not list(mask.shape)[-1] == 1
for downsamples in reversed(range(self.ndownsamples)):
src_, target_ = downsample(downsamples)(src).detach(), downsample(downsamples)(target).detach()
mask_ = downsample(downsamples)(mask).detach() if masking else mask.detach()
mask_.requires_grad = False
src_.requires_grad = False
target_.requires_grad = False
field = field.detach()
field.requires_grad = True
opt = torch.optim.SGD([field], lr=self.lr/(downsamples+1))
#sched = lr_scheduler.StepLR(opt, step_size=1, gamma=0.995)
costs = []
start_updates = updates
print(downsamples)
while True:
updates += 1
pred = F.grid_sample(src_, field + self.get_identity_grid(field.size(2)), mode='bilinear')
if masking:
penalty1 = self.penalty([self.center(field, (1,2), 128 / (2**downsamples))], self.center(mask_, (1,2), 128 / (2**downsamples)))
else:
penalty1 = self.penalty([self.center(field, (1,2), 128 / (2**downsamples))])
diff = torch.mean(self.center((pred - target_)**2, (-1,-2), 128 / (2**downsamples)))
cost = diff + penalty1 * self.lambda1/(downsamples+1)
print(cost.data.cpu().numpy())
costs.append(cost)
cost.backward()
opt.step()
#sched.step()uniform
opt.zero_grad()
if len(costs) > self.avgn + self.currn and len(costs)>self.min_iter:
hist = sum(costs[-(self.avgn+self.currn):-self.currn]).data[0] / self.avgn
curr = sum(costs[-self.currn:]).data[0] / self.currn
if abs((hist-curr)/hist) < self.eps/(2**downsamples) or len(costs)>self.max_iter:
break
#print downsamples, updates - start_updates
if downsamples > 0:
field = upsample(field.permute(0,3,1,2)).permute(0,2,3,1)
#print(cost.data[0], diff.data[0], penalty1.data[0])
print('done:', updates)
print(field.shape)
return self.center(field, (1,2), crop*2).data.cpu().numpy()[0]
if __name__ == '__main__':
o = Optimizer()
print('Testing...')
s = np.random.uniform(0, 1, (256,256)).astype(np.float32)
t = np.random.uniform(0, 1, (256,256)).astype(np.float32)
flow = o.process(s, t)
print(flow.shape)
assert flow.shape == (1,256,256,2)
flow = o.process(s, t, crop=10)
assert flow.shape == (1,236,236,2)
print ('All tests passed.')
| 42.167832
| 147
| 0.564013
|
7d4770879de082fb5b5e698b2872bd2c50c5b9d4
| 597
|
py
|
Python
|
apppeluqueria/admin.py
|
samuelgmar/django_peluqueria_app_brooklyn
|
80d17c64ea605c20d4caa75201ad3767681e517b
|
[
"Unlicense"
] | null | null | null |
apppeluqueria/admin.py
|
samuelgmar/django_peluqueria_app_brooklyn
|
80d17c64ea605c20d4caa75201ad3767681e517b
|
[
"Unlicense"
] | null | null | null |
apppeluqueria/admin.py
|
samuelgmar/django_peluqueria_app_brooklyn
|
80d17c64ea605c20d4caa75201ad3767681e517b
|
[
"Unlicense"
] | null | null | null |
from django.contrib import admin
from .models import cliente, direccion, categoria,servicio, jefe, trabajador, reserva, hora, fecha, fechaTrabajador, galeria
from django.contrib.auth.models import Permission
# Register your models here.
admin.site.register(cliente)
admin.site.register(direccion)
admin.site.register(Permission)
admin.site.register(categoria)
admin.site.register(servicio)
admin.site.register(jefe)
admin.site.register(trabajador)
admin.site.register(reserva)
admin.site.register(hora)
admin.site.register(fecha)
admin.site.register(fechaTrabajador)
admin.site.register(galeria)
| 33.166667
| 124
| 0.825796
|
c2667d54a1b20992b870fcc90e28a2ea50199393
| 18,295
|
py
|
Python
|
pybind/slxos/v16r_1_00b/brocade_vswitch_rpc/get_vmpolicy_macaddr/output/vmpolicy_macaddr/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/brocade_vswitch_rpc/get_vmpolicy_macaddr/output/vmpolicy_macaddr/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/brocade_vswitch_rpc/get_vmpolicy_macaddr/output/vmpolicy_macaddr/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class vmpolicy_macaddr(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-vswitch - based on the path /brocade_vswitch_rpc/get-vmpolicy-macaddr/output/vmpolicy-macaddr. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__mac','__name','__datacenter','__dvpg_nn','__port_nn','__port_prof',)
_yang_name = 'vmpolicy-macaddr'
_rest_name = 'vmpolicy-macaddr'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__datacenter = YANGDynClass(base=unicode, is_leaf=True, yang_name="datacenter", rest_name="datacenter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
self.__name = YANGDynClass(base=unicode, is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
self.__port_prof = YANGDynClass(base=unicode, is_leaf=True, yang_name="port-prof", rest_name="port-prof", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
self.__dvpg_nn = YANGDynClass(base=unicode, is_leaf=True, yang_name="dvpg-nn", rest_name="dvpg-nn", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
self.__mac = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'}), is_leaf=True, yang_name="mac", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='yang:mac-address', is_config=True)
self.__port_nn = YANGDynClass(base=unicode, is_leaf=True, yang_name="port-nn", rest_name="port-nn", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_vswitch_rpc', u'get-vmpolicy-macaddr', u'output', u'vmpolicy-macaddr']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'get-vmpolicy-macaddr', u'output', u'vmpolicy-macaddr']
def _get_mac(self):
"""
Getter method for mac, mapped from YANG variable /brocade_vswitch_rpc/get_vmpolicy_macaddr/output/vmpolicy_macaddr/mac (yang:mac-address)
YANG Description: vnic Mac address in HH:HH:HH:HH:HH:HH format
"""
return self.__mac
def _set_mac(self, v, load=False):
"""
Setter method for mac, mapped from YANG variable /brocade_vswitch_rpc/get_vmpolicy_macaddr/output/vmpolicy_macaddr/mac (yang:mac-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_mac is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mac() directly.
YANG Description: vnic Mac address in HH:HH:HH:HH:HH:HH format
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'}), is_leaf=True, yang_name="mac", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='yang:mac-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mac must be of a type compatible with yang:mac-address""",
'defined-type': "yang:mac-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'}), is_leaf=True, yang_name="mac", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='yang:mac-address', is_config=True)""",
})
self.__mac = t
if hasattr(self, '_set'):
self._set()
def _unset_mac(self):
self.__mac = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'}), is_leaf=True, yang_name="mac", rest_name="mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='yang:mac-address', is_config=True)
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /brocade_vswitch_rpc/get_vmpolicy_macaddr/output/vmpolicy_macaddr/name (string)
YANG Description: virutal machine name
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /brocade_vswitch_rpc/get_vmpolicy_macaddr/output/vmpolicy_macaddr/name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
YANG Description: virutal machine name
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=unicode, is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
def _get_datacenter(self):
"""
Getter method for datacenter, mapped from YANG variable /brocade_vswitch_rpc/get_vmpolicy_macaddr/output/vmpolicy_macaddr/datacenter (string)
YANG Description: host datacenter
"""
return self.__datacenter
def _set_datacenter(self, v, load=False):
"""
Setter method for datacenter, mapped from YANG variable /brocade_vswitch_rpc/get_vmpolicy_macaddr/output/vmpolicy_macaddr/datacenter (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_datacenter is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_datacenter() directly.
YANG Description: host datacenter
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="datacenter", rest_name="datacenter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """datacenter must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="datacenter", rest_name="datacenter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)""",
})
self.__datacenter = t
if hasattr(self, '_set'):
self._set()
def _unset_datacenter(self):
self.__datacenter = YANGDynClass(base=unicode, is_leaf=True, yang_name="datacenter", rest_name="datacenter", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
def _get_dvpg_nn(self):
"""
Getter method for dvpg_nn, mapped from YANG variable /brocade_vswitch_rpc/get_vmpolicy_macaddr/output/vmpolicy_macaddr/dvpg_nn (string)
YANG Description: distributed virtual port group
"""
return self.__dvpg_nn
def _set_dvpg_nn(self, v, load=False):
"""
Setter method for dvpg_nn, mapped from YANG variable /brocade_vswitch_rpc/get_vmpolicy_macaddr/output/vmpolicy_macaddr/dvpg_nn (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_dvpg_nn is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dvpg_nn() directly.
YANG Description: distributed virtual port group
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="dvpg-nn", rest_name="dvpg-nn", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dvpg_nn must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="dvpg-nn", rest_name="dvpg-nn", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)""",
})
self.__dvpg_nn = t
if hasattr(self, '_set'):
self._set()
def _unset_dvpg_nn(self):
self.__dvpg_nn = YANGDynClass(base=unicode, is_leaf=True, yang_name="dvpg-nn", rest_name="dvpg-nn", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
def _get_port_nn(self):
"""
Getter method for port_nn, mapped from YANG variable /brocade_vswitch_rpc/get_vmpolicy_macaddr/output/vmpolicy_macaddr/port_nn (string)
YANG Description: vm port ID
"""
return self.__port_nn
def _set_port_nn(self, v, load=False):
"""
Setter method for port_nn, mapped from YANG variable /brocade_vswitch_rpc/get_vmpolicy_macaddr/output/vmpolicy_macaddr/port_nn (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_nn is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_nn() directly.
YANG Description: vm port ID
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="port-nn", rest_name="port-nn", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port_nn must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="port-nn", rest_name="port-nn", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)""",
})
self.__port_nn = t
if hasattr(self, '_set'):
self._set()
def _unset_port_nn(self):
self.__port_nn = YANGDynClass(base=unicode, is_leaf=True, yang_name="port-nn", rest_name="port-nn", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
def _get_port_prof(self):
"""
Getter method for port_prof, mapped from YANG variable /brocade_vswitch_rpc/get_vmpolicy_macaddr/output/vmpolicy_macaddr/port_prof (string)
YANG Description: port profile
"""
return self.__port_prof
def _set_port_prof(self, v, load=False):
"""
Setter method for port_prof, mapped from YANG variable /brocade_vswitch_rpc/get_vmpolicy_macaddr/output/vmpolicy_macaddr/port_prof (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_prof is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_prof() directly.
YANG Description: port profile
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="port-prof", rest_name="port-prof", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port_prof must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="port-prof", rest_name="port-prof", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)""",
})
self.__port_prof = t
if hasattr(self, '_set'):
self._set()
def _unset_port_prof(self):
self.__port_prof = YANGDynClass(base=unicode, is_leaf=True, yang_name="port-prof", rest_name="port-prof", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vswitch', defining_module='brocade-vswitch', yang_type='string', is_config=True)
mac = __builtin__.property(_get_mac, _set_mac)
name = __builtin__.property(_get_name, _set_name)
datacenter = __builtin__.property(_get_datacenter, _set_datacenter)
dvpg_nn = __builtin__.property(_get_dvpg_nn, _set_dvpg_nn)
port_nn = __builtin__.property(_get_port_nn, _set_port_nn)
port_prof = __builtin__.property(_get_port_prof, _set_port_prof)
_pyangbind_elements = {'mac': mac, 'name': name, 'datacenter': datacenter, 'dvpg_nn': dvpg_nn, 'port_nn': port_nn, 'port_prof': port_prof, }
| 56.81677
| 435
| 0.731129
|
4997ac7f1d2879471a7d874956b56ff3f7dab727
| 4,793
|
py
|
Python
|
pypureclient/flasharray/FA_2_10/models/software_get_response.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 14
|
2018-12-07T18:30:27.000Z
|
2022-02-22T09:12:33.000Z
|
pypureclient/flasharray/FA_2_10/models/software_get_response.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 28
|
2019-09-17T21:03:52.000Z
|
2022-03-29T22:07:35.000Z
|
pypureclient/flasharray/FA_2_10/models/software_get_response.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 15
|
2020-06-11T15:50:08.000Z
|
2022-03-21T09:27:25.000Z
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.10
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_10 import models
class SoftwareGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[Software]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.Software]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[Software])
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SoftwareGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SoftwareGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SoftwareGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 36.869231
| 524
| 0.606301
|
b0a5a33e119afe0c9845c4095ac551c9edfbb8b8
| 5,013
|
py
|
Python
|
OptionProfitabilityHeatmap.py
|
JoJo10Smith/OptionProfitabilityHeatmap
|
ecb4c058875a49421e1966fcc86def568cb0ab34
|
[
"MIT"
] | null | null | null |
OptionProfitabilityHeatmap.py
|
JoJo10Smith/OptionProfitabilityHeatmap
|
ecb4c058875a49421e1966fcc86def568cb0ab34
|
[
"MIT"
] | null | null | null |
OptionProfitabilityHeatmap.py
|
JoJo10Smith/OptionProfitabilityHeatmap
|
ecb4c058875a49421e1966fcc86def568cb0ab34
|
[
"MIT"
] | null | null | null |
import scipy.stats
import plotly.express as px
import plotly.graph_objects as go
import requests
import pandas
import time
API_KEY = "***INSERT YOUR TD AMERITRADE API KEY***"
UNDERLYING = "QQQ"
YEAR = "2021"
MONTH = "09"
DAY = "20"
DAYS_TO_EXP = "3"
OPTION_START,OPTION_END = 365,380
UNDERLYING_START,UNDERLYING_END = 360,385
UNDERLYING_PRICE = 374.36
#fill in all the dates that you would like to collect and should apply to all options
API_DATE_INPUT = "{}-{}-{}:{}".format(YEAR,MONTH,DAY,DAYS_TO_EXP)
def collect_option_data(symbol,strike,contractType="CALL"):
#Collect all relevant information about an option that will be analyzed
parameters = {"apikey":API_KEY,
"includeQuotes":True,
"strike":str(strike),
"symbol":symbol,
"contractType":contractType}
url = "https://api.tdameritrade.com/v1/marketdata/chains"
response = requests.get(url,params=parameters).json()
pointer = response['callExpDateMap'][API_DATE_INPUT][str(parameters['strike'])][0]
return pointer["ask"]
def create_option_strikes(starting_strike,ending_strike,step=1):
#create the desired list of strike prices for the option heatmap
list_of_strikes = []
for current_strike in range(starting_strike,ending_strike+step,step):
list_of_strikes.append(current_strike)
return list_of_strikes
def create_underlying_prices(starting_price,ending_price,step=1):
#create the desired list of prices for the heatmap
list_of_prices = []
for current_price in range(starting_price,ending_price+step,step):
list_of_prices.append(current_price)
return list_of_prices
PROFITABILITY = {}
OPTION_STRIKES = create_option_strikes(OPTION_START,OPTION_END)
UNDERLYING_PRICES = create_underlying_prices(UNDERLYING_START,UNDERLYING_END)
OPTION_PREMIUM = {}
for current_option_strike in OPTION_STRIKES:
option_price = collect_option_data(UNDERLYING,float(current_option_strike))
OPTION_PREMIUM[current_option_strike] = option_price
print ("Option Premium for ${} strike --> ${}".format(float(current_option_strike),option_price))
time.sleep(0.5)
#read the data from prepared file
file_data = [line.split(",") for line in open("QQQ_return_data.csv")]
file_data = file_data[1:]
RETURN_DATA = {}
for current_line in file_data:
days_to_exp = str(current_line[0])
mean_return = float(current_line[1])
standard_dev = float(current_line[2])
RETURN_DATA[days_to_exp] = {"mean":mean_return,"standard_dev":standard_dev}
def _required_return_(original_price,target_price):
#calculates the return you would need to reach your target
difference = target_price - original_price
return float(difference/original_price)
def calculate_probability(current_live_price,underlying_calc_price):
#calculates probablity of achiveing desired return based on pas performance
data = RETURN_DATA[DAYS_TO_EXP]
distribution = scipy.stats.norm(data["mean"],data["standard_dev"])
required_return = _required_return_(current_live_price,underlying_calc_price)
return 1 - distribution.cdf(required_return)
data = {}
for current_option_strike in OPTION_STRIKES:
option_profits = []
for current_underlying_price in UNDERLYING_PRICES:
if current_option_strike >= current_underlying_price:
current_profit = -OPTION_PREMIUM[current_option_strike]
else:
current_profit = current_underlying_price - current_option_strike - OPTION_PREMIUM[current_option_strike]
profit_percent = 100 * float(current_profit/OPTION_PREMIUM[current_option_strike])
#profit_percent = max(min(500,profit_percent),-100)
profit_percent *= calculate_probability(UNDERLYING_PRICE,current_underlying_price)
option_profits.append(min(200,profit_percent))
data[current_option_strike] = option_profits
GRAPH_DATA = pandas.DataFrame.from_dict(data,orient = "index",columns = UNDERLYING_PRICES)
fig = px.imshow(GRAPH_DATA,color_continuous_scale="RdYlGn",title="Expected value of Option returns for {}".format(UNDERLYING))
fig.update_xaxes(title_text='Underlying Price')
fig.update_yaxes(title_text='Option Strike Price')
def add_profitabilty_lines(given_data,underlying_prices):
x,y=[],[]
for current_strike in given_data:
current_index = 0
for current_profit in given_data[current_strike]:
if current_profit > 0:
y.append(current_strike-0.5)
y.append(current_strike+0.5)
x.append(underlying_prices[current_index]-0.5)
x.append(underlying_prices[current_index]-0.5)
break
else:
current_index += 1
return [x,y]
line_data = add_profitabilty_lines(data,UNDERLYING_PRICES)
fig.add_trace(
go.Scatter(
x=line_data[0],
y=line_data[1],
mode="lines",
line=go.scatter.Line(color="gray"),
showlegend=False))
fig.show()
| 37.410448
| 127
| 0.726112
|
b743ade8361b07239c91e7a36e1d7c5862a62635
| 3,881
|
py
|
Python
|
src/pyfel/base/fieldbuffer.py
|
bellaz89/pyFEL
|
d96bc50096d32dac4ba957f0fc022bc377232680
|
[
"MIT"
] | 1
|
2021-02-24T04:31:36.000Z
|
2021-02-24T04:31:36.000Z
|
src/pyfel/base/fieldbuffer.py
|
bellaz89/pyFEL
|
d96bc50096d32dac4ba957f0fc022bc377232680
|
[
"MIT"
] | null | null | null |
src/pyfel/base/fieldbuffer.py
|
bellaz89/pyFEL
|
d96bc50096d32dac4ba957f0fc022bc377232680
|
[
"MIT"
] | null | null | null |
'''
The field buffer structure
'''
import numpy as np
import pyopencl as cl
from .clctx import cl_queue, cl_ctx, cl_ctype
class FieldBuffer(object):
'''
Holds a 3 dimensional grid with the field.
Can be implemented either with texture of
global device memory
'''
def __init__(self, shape=None, data=None, fitype="global"):
'''
Initialize the buffer.
At least shape or data must be defined.
shape must be a three element tuple. If only shape is defined,
A new zeroed buffer with shape dimensions is created
data is a numpy array of complex values. If only data is passed, it must have
a shape len equal to 3.
If both shape and data are passed, data gets a new shape
fitype is either "global" or "texture"
'''
host_data = None
self.fitype = fitype
self.order = "F" if fitype == "global" else "C"
if not shape and not isinstance(data, np.ndarray):
raise RuntimeError("At least shape or data must be defined")
elif shape and not isinstance(data, np.ndarray):
assert len(shape) == 3, "shape must be a three elements long tuple"
self.shape = shape
host_data = np.zeros(self.shape, dtype=cl_ctype, order=self.order)
elif not shape and isinstance(data, np.ndarray):
assert len(data.shape) == 3, "shape of data must be a three elements long tuple"
self.shape = data.shape
host_data = np.array(data, dtype=cl_ctype, order=self.order)
else:
assert len(shape) == 3, "shape must be a three elements long tuple"
self.shape = shape
host_data = np.array(data, dtype=cl_ctype, order=self.order).reshape(shape)
self.data, self.evs = self.get_image_from_shape(host_data, fitype)
@staticmethod
def get_image_from_shape(host_data, fitype):
'''
Upload data to the device
'''
data = None
evs = []
if fitype == "global":
mf = cl.mem_flags
data = cl.Buffer(cl_ctx,
mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=host_data)
else:
channel_type = (cl.channel_type.FLOAT
if cl_ctype == np.complex128
else cl.channel_ctype.UNSIGNED_INT16)
image_format = cl.ImageFormat(cl.channel_order.RGBA, channel_type)
mf = cl.mem_flags
data = cl.Image(cl_ctx, mf.READ_WRITE, image_format, host_data.shape)
evs.append(cl.enqueue_copy(cl_queue,
data,
host_data,
origin=(0,0,0),
region=host_data.shape))
return data, evs
def get(self):
'''
Get data from from the buffer
'''
self.wait()
host_data = np.empty(self.shape, dtype=cl_ctype, order=self.order)
if self.fitype == "global":
self.evs.append(cl.enqueue_copy(cl_queue,
host_data,
self.data))
else:
self.evs.append(cl.enqueue_copy(cl_queue,
host_data,
self.data,
origin=(0,0,0),
region=self.shape))
return host_data
def wait(self):
'''
Waits until all the events associated to the buffer are consumed
'''
for ev in self.evs:
ev.wait()
self.evs = []
| 35.935185
| 92
| 0.515331
|
52645b520d3c0e79bed98a241a362a1c3d89094a
| 72
|
py
|
Python
|
apps/socials/models/__init__.py
|
jorgesaw/oclock
|
2a78bd4d1ab40eaa65ea346cf8c37556fcbbeca5
|
[
"MIT"
] | null | null | null |
apps/socials/models/__init__.py
|
jorgesaw/oclock
|
2a78bd4d1ab40eaa65ea346cf8c37556fcbbeca5
|
[
"MIT"
] | null | null | null |
apps/socials/models/__init__.py
|
jorgesaw/oclock
|
2a78bd4d1ab40eaa65ea346cf8c37556fcbbeca5
|
[
"MIT"
] | null | null | null |
from .links import UserSocialNetwork
from .socials import SocialNetwork
| 24
| 36
| 0.861111
|
7d80bdc984016af11a59423568503b4ea04d0acb
| 4,319
|
py
|
Python
|
src/automations/migrations/0001_initial.py
|
sebastianmanger/django-automations
|
070e700d29ef68f1b27c7f016ee6a08c41be56fe
|
[
"MIT"
] | 20
|
2021-04-25T16:19:09.000Z
|
2022-02-17T13:55:57.000Z
|
src/automations/migrations/0001_initial.py
|
sebastianmanger/django-automations
|
070e700d29ef68f1b27c7f016ee6a08c41be56fe
|
[
"MIT"
] | 25
|
2021-11-21T14:39:59.000Z
|
2022-02-01T11:32:17.000Z
|
src/automations/migrations/0001_initial.py
|
sebastianmanger/django-automations
|
070e700d29ef68f1b27c7f016ee6a08c41be56fe
|
[
"MIT"
] | 4
|
2021-11-21T04:27:55.000Z
|
2022-03-04T01:37:06.000Z
|
# Generated by Django 3.1.8 on 2021-05-02 08:56
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("auth", "0012_alter_user_first_name_max_length"),
]
operations = [
migrations.CreateModel(
name="AutomationModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"automation_class",
models.CharField(max_length=256, verbose_name="Process class"),
),
(
"finished",
models.BooleanField(default=False, verbose_name="Finished"),
),
("data", models.JSONField(default=dict, verbose_name="Data")),
(
"paused_until",
models.DateTimeField(null=True, verbose_name="Paused until"),
),
("created", models.DateTimeField(auto_now_add=True)),
("updated", models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name="AutomationTaskModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"status",
models.CharField(blank=True, max_length=256, verbose_name="Status"),
),
("locked", models.IntegerField(default=0, verbose_name="Locked")),
(
"interaction_permissions",
models.JSONField(
default=list,
help_text="List of permissions of the form app_label.codename",
verbose_name="Required permissions",
),
),
("created", models.DateTimeField(auto_now_add=True)),
("finished", models.DateTimeField(null=True)),
(
"message",
models.CharField(
blank=True, max_length=128, verbose_name="Message"
),
),
(
"result",
models.JSONField(
blank=True, default=dict, null=True, verbose_name="Result"
),
),
(
"automation",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="automations.automationmodel",
),
),
(
"interaction_group",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.PROTECT,
to="auth.group",
verbose_name="Assigned group",
),
),
(
"interaction_user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.PROTECT,
to=settings.AUTH_USER_MODEL,
verbose_name="Assigned user",
),
),
(
"previous",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="automations.automationtaskmodel",
verbose_name="Previous task",
),
),
],
),
]
| 35.113821
| 88
| 0.406807
|
6cbbc5dd050928dc9ba6ce349aa6b8576a697df9
| 428,170
|
py
|
Python
|
tests/python/unittest/test_operator.py
|
sa-mustafa/incubator-mxnet
|
03654eeea3f3ab30dc43fabb7229945970a358b2
|
[
"Apache-2.0"
] | 13
|
2016-04-01T03:19:44.000Z
|
2019-10-17T13:30:09.000Z
|
tests/python/unittest/test_operator.py
|
sa-mustafa/incubator-mxnet
|
03654eeea3f3ab30dc43fabb7229945970a358b2
|
[
"Apache-2.0"
] | 82
|
2016-03-29T02:40:02.000Z
|
2021-02-06T22:20:40.000Z
|
tests/python/unittest/test_operator.py
|
sa-mustafa/incubator-mxnet
|
03654eeea3f3ab30dc43fabb7229945970a358b2
|
[
"Apache-2.0"
] | 2
|
2017-04-21T15:51:25.000Z
|
2017-09-08T11:55:01.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.operator import *
from mxnet.base import py_str, MXNetError, _as_list
from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied, assertRaises
from common import run_in_spawned_process
from nose.tools import assert_raises, ok_
import unittest
import os
def check_rnn_consistency(cell1, cell2, T, N, I, H, grad_req, rtol=1e-2, atol=1e-4):
dshape = (N, T, I)
data = mx.sym.Variable('data')
Y1, _ = cell1.unroll(T, data, layout='NTC', merge_outputs=True)
mod1 = mx.mod.Module(Y1, label_names=None, context=default_context())
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
Y2, _ = cell2.unroll(T, data, layout='NTC', merge_outputs=True)
mod2 = mx.mod.Module(Y2, label_names=None, context=default_context())
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
x = mx.random.uniform(shape=dshape)
batch=mx.io.DataBatch(data=[x])
# check inference
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
# check training
mod1.forward(batch, is_train=True)
mod2.forward(batch, is_train=True)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
dy = mx.random.uniform(shape=mod1.get_outputs()[0].shape)
mod1.backward(out_grads=[dy])
mod2.backward(out_grads=[dy])
if type(grad_req) is dict and grad_req['data'] == 'null' or grad_req == 'null':
assert(mod1.get_input_grads()[0] == None)
assert(mod2.get_input_grads()[0] == None)
else:
assert_allclose(mod1.get_input_grads()[0].asnumpy(), mod2.get_input_grads()[0].asnumpy(), rtol=rtol, atol=atol)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnn_with_new_param():
rnn_modes = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm']
ngates_ = [1, 1, 3, 4]
num_layers, input_size, seq_len, batch_size, state_size = 3, 128, 5, 64, 8
for bidirectional in [False, True]:
directions = 2 if bidirectional else 1
for mode, ngates in zip(rnn_modes, ngates_):
first_layer_size = (input_size * state_size + state_size * state_size + state_size * 2) * ngates
rest_layer_size = (state_size * directions * state_size + state_size * state_size + state_size * 2) \
* ngates * (num_layers - 1)
param_size = (first_layer_size + rest_layer_size) * directions
sym = mx.sym.RNN(mode=mode, num_layers=num_layers, bidirectional=bidirectional,
state_outputs=False, state_size=state_size, name='rnn')
bind_dict = {
'rnn_data': mx.ndarray.random.uniform(low=-1, high=1, shape=(seq_len, batch_size, input_size)),
'rnn_parameters': mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size)),
'rnn_state': mx.ndarray.zeros(shape=(num_layers * directions, batch_size, state_size))
}
if mode == 'lstm':
bind_dict['rnn_state_cell'] = mx.ndarray.zeros(
shape=(num_layers * directions, batch_size, state_size))
ex = sym.bind(default_context(), bind_dict)
ex.forward(is_train=True)
ex01 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex02 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex01, ex02, rtol=1e-2, atol=1e-4)
bind_dict['rnn_parameters'] = mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size))
ex.copy_params_from(bind_dict)
ex.forward(is_train=True)
ex03 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex04 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex03, ex04, rtol=1e-2, atol=1e-4)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_sym():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='lstm', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.LSTMCell(H, prefix='l0_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l1_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='lstm',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l0_'),
mx.rnn.LSTMCell(H, prefix='r0_'),
output_prefix='bi_lstm_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l1_'),
mx.rnn.LSTMCell(H, prefix='r1_'),
output_prefix='bi_lstm_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
check_rnn_consistency(fused, stack, T, N, I, H, {'data': 'add', 'parameters': 'null'})
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_sym():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='gru', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(H, prefix='l0_'))
stack.add(mx.rnn.GRUCell(H, prefix='l1_'))
stack.add(mx.rnn.GRUCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='gru',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l0_'),
mx.rnn.GRUCell(H, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l1_'),
mx.rnn.GRUCell(H, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_sym():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_tanh', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_tanh',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r0_'),
output_prefix='bi_rnntanh_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r1_'),
output_prefix='bi_rnntanh_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_sym():
T, N, I, H = 5, 32, 200, 200
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_relu', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_relu',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l0_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r0_'),
output_prefix='bi_rnnrelu_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l1_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r1_'),
output_prefix='bi_rnnrelu_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'add', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'null', rtol=1e-2, atol=1e-2)
@with_seed()
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def test_RNN_float64():
if default_context().device_type == 'gpu':
return
sym = mx.sym.RNN(
mx.sym.Variable('in'),
mx.sym.Variable('par'),
mx.sym.Variable('s'),
state_size = (2),
num_layers = 1,
mode = 'rnn_tanh'
)
dtype = 'float64'
explicit_grad = {
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
}
args_grad = explicit_grad
grad_req = 'write'
ex = sym.bind(default_context(),
{
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
},
args_grad = args_grad,
grad_req = grad_req
)
ex.forward()
ex.outputs[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out.bind(default_context(),
args=arr,
args_grad=arr_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a, out_grad, rtol=1e-5, atol=1e-5)
@with_seed()
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out.bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1, ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad, np_grad + 1)
@with_seed()
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
@with_seed()
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym.simple_bind(ctx=default_context(), data=data_npy.shape)
assert len(exe.outputs) == num_outputs
outputs = exe.forward(is_train=True, data=data_npy)
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i], gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i], gt)
# test backward
exe.backward(out_grads=[mx.nd.array(ele, ctx=default_context()) for ele in out_grads_npy])
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
@with_seed()
def test_regression():
''' test regression operator '''
def check_regression(symbol, forward, backward, shape, stype='default', densities=[0, 0.5, 1]):
# init executor
data = mx.symbol.Variable('data')
label = mx.symbol.Variable('label', stype=stype)
out = symbol(data, label)
grad_req = {'data': 'write', 'label': 'null'}
out_exec = out.simple_bind(default_context(), grad_req=grad_req,
data=shape, label=shape)
arg_map = dict(zip(out.list_arguments(), out_exec.arg_arrays))
grad_map = dict(zip(out.list_arguments(), out_exec.grad_arrays))
# init data
arr_data = mx.random.uniform(-1, 1, shape)
arg_map["data"][:] = arr_data
# init label based on density
arr_label = arg_map["label"]
atol = 1e-5
for density in densities:
arr_label[:] = rand_ndarray(shape, stype, density=density)
out_exec.forward(is_train=True)
out_exec.backward()
np_out = forward(arr_data.asnumpy())
out_grad = backward(np_out, arr_label.asnumpy().reshape(np_out.shape)) / shape[1]
assert_almost_equal(out_exec.outputs[0], np_out, atol=atol)
assert_almost_equal(grad_map["data"], out_grad, atol=atol)
shape = (50, 30)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape)
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape)
check_regression(mx.symbol.MAERegressionOutput,
lambda x: x,
lambda x, y : np.where(x > y, np.ones(x.shape), -np.ones(x.shape)),
shape)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape, stype='csr')
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape, stype='csr')
def check_softmax_grad(xpu):
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.zeros((1,4))
expected_grad_out[0, k] = -1
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_smoothed_softmax_grad(xpu):
alpha = 0.2
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False, smooth_alpha=alpha)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.full((1,4), fill_value=-alpha/float(4-1))
expected_grad_out[0, k] = - (1 - alpha)
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_softmax_with_ignore_label(xpu):
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, ignore_label=0, use_ignore=True)
shape = (20, 10)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1]-1, (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
exec1.backward()
grad0 = grad.asnumpy()
for i in range(int(shape[0]/2)):
l_np[i] = 0
l[:] = l_np
exec1.forward(is_train=True)
exec1.backward()
grad1 = grad.asnumpy()
assert abs(np.sum(grad1[:int(shape[0]/2)])) < 1e-5
assert_almost_equal(grad0[int(shape[0]/2):], grad1[int(shape[0]/2):])
def check_softmax_with_shape(shape, xpu, preserve_shape=False):
# bind with label
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, preserve_shape=preserve_shape)
x = mx.random.uniform(-1, 1, shape, ctx=xpu)
l = mx.random.uniform(-1, 1, shape, ctx=xpu)
l[:] = np_softmax(l.asnumpy())
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# Non-zero atol required by test_softmax with seed 781663739
rtol = 1e-4
atol = 1e-6
assert_almost_equal(out, np_softmax(x.asnumpy()), rtol=rtol, atol=atol)
exec1.backward()
assert_almost_equal(grad, np_softmax(x.asnumpy()) - l.asnumpy(), rtol=rtol, atol=atol)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s.bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x, exec1.outputs[0])
exec1.backward(dy)
assert_almost_equal(dy, dx)
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap.bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
config = [((1, 1, 2), 0, 1),
((1, 1, 2), -1, -2),
((4, 5, 6, 7), 1, 1),
((4, 5, 6, 7), 2, 3),
((4, 5, 6, 7), -2, 2),
((4, 5, 6, 7), -2, -3)]
for shape, axis1, axis2 in config:
data_np = np.random.uniform(size=shape)
data_mx = mx.nd.array(data_np, dtype=data_np.dtype)
ret_np = np.swapaxes(data_np, axis1=axis1, axis2=axis2)
ret_mx = mx.symbol.SwapAxis(data, dim1=axis1, dim2=axis2)
exe_c = ret_mx.bind(default_context(), args=[data_mx])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
assert_almost_equal(out, ret_np)
@with_seed()
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
@with_seed()
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
@with_seed()
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
@with_seed()
def test_fully_connected():
data = mx.sym.var("data")
fc_weight = mx.sym.var("weight")
fc_bias = mx.sym.var("bias")
fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc')
data = mx.nd.random.uniform(shape=(5, 5, 5, 13), dtype=np.float32)
fc_weight = mx.nd.random.uniform(shape=(10, 325), dtype=np.float32)
fc_bias = mx.nd.random.uniform(shape=(10), dtype=np.float32)
fc_bias2 = mx.nd.random.uniform(shape=(10, 1), dtype=np.float32)
data_np = data.asnumpy().reshape(5, 325)
fc_weight_np = np.transpose(fc_weight.asnumpy())
fc_bias_np = fc_bias.asnumpy()
res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy()
check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res})
check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np},
numeric_eps=1e-2, rtol=1e-4, atol=1e-2)
# TODO: Fix Bug #15032 when bias has ndim > 1
#check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res})
@with_seed()
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
x = np.ones(shape)*3
for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]:
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
@with_seed()
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return 1.0 * (x > 0.0)
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(shape), np.ones(gam.shape)], [g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(shape), np.ones(gam_full.shape)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 2e-2, 1e-3) if dtype is np.float16 else (1e-4, 1e-3, 1e-5)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
@with_seed()
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
@with_seed()
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
@with_seed()
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
@with_seed()
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z.simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0]
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar.simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar.simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0]
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0]
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
@with_seed()
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out, reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y.simple_bind(ctx=default_context(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out, reference(xa, dtype=xa.dtype))
@with_seed()
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed.simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0], np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"], np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
@with_seed()
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0], data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad, 2.0 * data_tmp)
@with_seed()
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
@with_seed()
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_maximum_minimum():
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2)
exe_test = test.bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
@with_seed()
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test.bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1, npout_grad1)
@with_seed()
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0], rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv.bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv.bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1], deconv_args_grad[1], rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv.bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@with_seed()
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
@with_seed()
def test_deconvolution_forward_with_bias():
"""Check if deconvolution forward can work well with bias=True
"""
def check_deconvolution_forward_with_bias(shape=(1, 16, 5, 5), num_filter=32, num_group=1, kernel=(3, 3), pad=(1, 1)):
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
input_data = mx.random.uniform(-5, 5, shape, ctx=mx.cpu())
y = mx.sym.Deconvolution(data=x, weight=w, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=False, pad=pad)
exe = y.simple_bind(ctx=mx.cpu(), x=shape, grad_req='null')
exe.arg_arrays[0][:] = np.random.normal(size=exe.arg_arrays[0].shape)
exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape)
exe.forward(is_train=False)
o = exe.outputs[0]
t = o.asnumpy()
check_deconvolution_forward_with_bias((1, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((32, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((1, 16, 5, 5), 32, 1, (3, 3), (1, 1))
check_deconvolution_forward_with_bias((32, 16, 5, 5), 32, 1, (3, 3), (1, 1))
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter):
def _init_bilinear(arr, f):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
return arr
up = mx.sym.UpSampling(mx.sym.Variable("data"),
mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale,
num_filter=num_filter, num_args=2)
arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape)
arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()),
'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))}
arr_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(exe.outputs)
target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale)
assert out.shape == data_shape[:2] + target_shape
@with_seed()
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
@with_seed()
def test_bilinear_upsampling():
rootscale = [2,3]
scales = [1,2,3]
filters = [1,2,3]
bases = [1,2,3]
for params in itertools.product(rootscale, scales, filters, bases):
root_scale, scale, num_filter, base = params
# bilinear upsampling takes only 1 data and 1 weight
# multi input mode is not applicable
dimension = base*root_scale*scale
kernel = 2 * root_scale - root_scale % 2
data_shape = (1, num_filter, dimension, dimension)
weight_shape = (1, num_filter, kernel, kernel)
check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter)
@with_seed()
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2), (2, 8, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@with_seed()
def test_batchnorm():
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(op, shape, axis, cudnn_off, output_mean_var):
print(str((op, shape, axis, cudnn_off)))
kwargs = dict(output_mean_var=output_mean_var)
if op == mx.nd.contrib.SyncBatchNorm:
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad()
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad()
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
for _ in range(num_iters):
data = mx.nd.random.uniform(shape=shape)
data.attach_grad()
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=False, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
running_var = running_var * momentum + \
data_var_flat * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
m = np.prod(shape) / shape[axis]
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
atol = 1e-2
rtol = 1e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(data.grad.asnumpy(),
dX.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_gamma.grad.asnumpy(), dW.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_beta.grad.asnumpy(), db.asnumpy(), atol=atol, rtol=rtol)
for op in [mx.nd.BatchNorm, mx.nd.contrib.SyncBatchNorm]:
for shape in [(24, 2), (24, 3, 4), (24, 4, 4, 4), (24, 8, 4, 4), (24, 5, 6, 4, 4)]:
for axis in range(len(shape)):
for cudnn_off in [False, True]:
for output_mean_var in [False, True]:
_test_batchnorm_impl(op, shape, axis,
cudnn_off, output_mean_var)
@with_seed()
def test_groupnorm():
acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'}
def x_hat_helper(x, num_groups, eps):
dtype = x.dtype
dshape = x.shape
assert len(dshape) == 4
acc_type = acc_types[str(dtype)]
new_shape = (dshape[0], num_groups, int(dshape[1] / num_groups), dshape[2], dshape[3])
new_moments_shape = (dshape[0], num_groups, 1, 1, 1)
data = x.reshape(new_shape)
mean = np.mean(data, axis=(2, 3, 4), keepdims=False, dtype=acc_type).astype(dtype)
std = np.sqrt(np.var(data, axis=(2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) + eps)
x_hat = (data - mean.reshape(new_moments_shape)) / std.reshape(new_moments_shape)
return x_hat, mean, std
def np_groupnorm(data, gamma, beta, num_groups, eps):
new_param_shape = (1, num_groups, 1, 1, 1)
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
out = x_hat * gamma.reshape(new_param_shape) + beta.reshape(new_param_shape)
return out.reshape(dshape), mean, std
def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps):
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
new_shape = x_hat.shape
dshape = data.shape
dtype = data.dtype
new_moments_shape = (new_shape[0], num_groups, 1, 1, 1)
new_param_shape = (1, num_groups, 1, 1, 1)
acc_type = acc_types[str(dtype)]
ograd = ograd.reshape(new_shape)
data = data.reshape(new_shape)
gamma = gamma.reshape(new_param_shape)
beta = beta.reshape(new_param_shape)
mean = mean.reshape(new_moments_shape)
std = std.reshape(new_moments_shape)
beta_grad = np.sum(ograd, axis=(0, 2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype)
gamma_grad = np.sum(x_hat * ograd, axis=(0, 2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype)
x_hat_grad = ograd * gamma
ograd_mult = x_hat_grad / std
red_out = np.mean(ograd_mult, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = ograd_mult - red_out
red_out = np.mean(ograd_mult * x_hat, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = data_grad - x_hat * red_out
return data_grad.reshape(dshape), gamma_grad, beta_grad
batch_size = random.randint(1, 8)
num_groups = random.randint(2, 3)
num_channels = random.randint(2, 3) * num_groups
height = random.randint(1, 5)
width = random.randint(1, 5)
dshape = (batch_size, num_channels, height, width)
param_shape = (num_groups,)
temp_shape = (batch_size, num_groups, int(num_channels / num_groups), height, width)
np_data = np.random.uniform(0.2, 1.0, dshape)
np_gamma = np.random.uniform(-1.0, 1.0, param_shape)
np_beta = np.random.uniform(-1.0, 1.0, param_shape)
data_sym = mx.sym.Variable("data")
gamma_sym = mx.sym.Variable("gamma")
beta_sym = mx.sym.Variable("beta")
for dtype in [np.float16, np.float32, np.float64]:
eps = 1e-2 if dtype == np.float16 else 1e-5
mx_data = mx.nd.array(np_data, dtype=dtype)
mx_gamma = mx.nd.array(np_gamma, dtype=dtype)
mx_beta = mx.nd.array(np_beta, dtype=dtype)
np_out, np_mean, np_std = np_groupnorm(np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
num_groups=num_groups,
eps=eps)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=True)
check_symbolic_forward(mx_sym, [mx_data, mx_gamma, mx_beta], [np_out, np_mean, np_std],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-3 if dtype == np.float16 else 1e-4, dtype=dtype)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=False)
np_ograd = np.random.uniform(-1.0, 1.0, dshape).astype(dtype)
np_data_grad, np_gamma_grad, np_beta_grad = np_groupnorm_grad(np_ograd,
np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
np_mean, np_std,
num_groups, eps)
check_symbolic_backward(mx_sym, [mx_data, mx_gamma, mx_beta], [mx.nd.array(np_ograd)],
[np_data_grad, np_gamma_grad, np_beta_grad],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-2 if dtype == np.float16 else 1e-4, dtype=dtype)
@with_seed()
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1.simple_bind(default_context(), x=shape)
exe2 = y2.simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
@with_seed()
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1.simple_bind(dev, x=shape)
exe2 = y2.simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
assert_allclose(arr1, arr2, rtol=1e-3, atol=1e-3)
@with_seed()
def test_convolution_independent_gradients():
# NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603.
# GPU context will be enabled after figuring out the possible issue tracked at
# https://github.com/apache/incubator-mxnet/issues/15638.
ctx = mx.cpu()
atol = 1.0e-3
rtol = 1.0e-3
reqs = ["null", "write", "add"]
var_names = ["x", "w", "b"]
dims = [1, 2]
num_bases = [1, 8]
kernel_xs = [3, 5]
stride_xs = [1, 2]
pad_xs = [0, 1]
in_sizes = [7, 32]
no_biases = [True, False]
for dim, num_base, kernel_x, stride_x, pad_x , in_size, no_bias in \
itertools.product(dims, num_bases, kernel_xs, stride_xs, pad_xs, in_sizes, no_biases):
# Prepare params shape
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
x_shape = (2, num_base) + (in_size,) * dim
w_shape = (num_filter, num_base) + kernel
# Symbols definition
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b') if not no_bias else None
conv = mx.sym.Convolution(x, w, b, num_filter=num_filter,
kernel=kernel, stride=stride, pad=pad, no_bias=no_bias)
for req_kind in reqs:
# Binding args for conv with possible dependent gradients
base_args = {
'x': mx.nd.random.normal(shape=x_shape, ctx=ctx),
'w': mx.nd.random.normal(shape=w_shape, ctx=ctx),
'b': mx.nd.random.normal(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
args1 = copy.deepcopy(base_args)
grad1 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req1 = [req_kind] * 3
grad_req1 = dict(zip(var_names, grad_req1))
exe1 = conv.bind(ctx, args1, args_grad=grad1, grad_req=grad_req1)
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
for x_req, w_req, b_req in itertools.product(reqs, repeat=3):
# Binding args for conv with independent gradients
args2 = copy.deepcopy(base_args) # Deepcopy the same params of `exe1`
grad2 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req2 = {"x": x_req, "w": w_req, "b": b_req}
exe2 = conv.bind(ctx, args2, args_grad=grad2, grad_req=grad_req2)
exe2.forward(is_train=True)
np.testing.assert_allclose(exe1.outputs[0].asnumpy(),
exe2.outputs[0].asnumpy(), rtol=rtol, atol=atol)
exe2.backward(exe2.outputs[0])
for var_name in var_names:
if var_name == "b" and no_bias:
continue
if grad_req2[var_name] == "null":
exe2_var_grad = grad2[var_name].asnumpy()
np.testing.assert_allclose(exe2_var_grad,
np.zeros_like(exe2_var_grad), rtol=rtol, atol=atol)
if grad_req2[var_name] != grad_req1[var_name]:
continue
np.testing.assert_allclose(args1[var_name].asnumpy(),
args2[var_name].asnumpy(), rtol=rtol, atol=atol)
np.testing.assert_allclose(grad1[var_name].asnumpy(),
grad2[var_name].asnumpy(), rtol=rtol, atol=atol)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
y.forward(is_train=True)
y.backward([mx.nd.array(out)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
@with_seed()
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
@with_seed()
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
@with_seed()
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net.bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net.bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
@with_seed()
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 3D
for dil in [ (1,1,1), (2,2,2), (3,3,3) ]:
for ks in [ (3,3,3), (4,4,4), (2,3,4), (3,2,4), (1,1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@with_seed()
def test_reshape():
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
# Test new api (Using shape)
test_cases = [
[(2, 3, 5, 5), (0, -1), False, (2, 75)],
[(2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)],
[(5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)],
[(2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)],
[(2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)],
[(2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)],
[(2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)],
[(2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)],
[(2, 3, 5, 6), (-3, -3), False, (6, 30)],
[(2, 3, 5, 6), (-3, -1), False, (6, 30)],
[(64,), (-4, 16, 4), False, (16, 4)],
[(64,), (-4, 16, -1), False, (16, 4)],
[(64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)],
[(2, 3, 5, 5), (0, -1), True, (5, 30)],
[(2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)],
[(5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)],
[(2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)],
[(2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)],
[(2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)],
[(2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)],
[(2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)],
[(2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)],
[(2, 3, 5, 6), (-3, -3), True, (6, 30)],
[(64,), (16, 4, -4), True, (16, 4)],
[(64,), (16, -1, -4), True, (16, 4)],
[(1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16)]]
for test_case in test_cases:
test_reshape_new(*test_case)
# Test old api
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net.simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
@with_seed()
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
@with_seed()
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
equal_forward = almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, 1E-4, 1E-4)
assert equal_forward
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
equal_backward = almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, 1E-4, 1E-4)
assert equal_backward
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
@with_seed()
def test_broadcast():
sample_num = 200
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
target_shape_with_zero = list(target_shape)
for idx in range(len(target_shape_with_zero)):
if idx not in axis:
target_shape_with_zero[idx] = 0
break
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_to_with_zero = mx.symbol.broadcast_to(a, shape=tuple(target_shape_with_zero))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0], groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd, grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_to_with_zero)
test_broadcasting_ele(sym_bcast_like)
@with_seed()
def test_transpose():
for ndim in range(1, 7):
for t in range(5):
dims = list(np.random.randint(1, 10, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
def test_pseudo2dtranspose():
def getTwoInts(mn, mx):
n1 = np.random.randint(mn, mx)
n2 = np.random.randint(mn, mx-1)
n2 = n2 if n2 < n1 else n2+1
return tuple(np.sort([n1, n2]))
def getTranspAxes(ndim):
axes = list(range(ndim))
n1, n2 = getTwoInts(0,ndim)
return tuple(axes[:n1]+axes[n2:]+axes[n1:n2])
for ndim in range(2, 7):
for dt in ['int8', 'half', 'int32', 'int64']:
for _ in range(5):
dims = list(np.random.randint(5, 20, size=ndim))
axes = getTranspAxes(ndim)
x = mx.nd.array(np.random.normal(size=dims), dtype=dt)
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
@with_seed()
def test_big_transpose():
n = [1]
d = list(np.random.randint(132, 160, size=1))
hw = list(np.random.randint(256, 320, size=2))
c = [10]
dims = n + d + hw + c
axes = (0,4,1,2,3)
x_np = np.random.normal(size=dims).astype('uint8')
x = mx.nd.array(x_np, dtype='uint8')
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x_np, axes=axes), y.asnumpy().astype('uint8'))
axes = (0,2,3,4,1)
z = mx.nd.transpose(y, axes=axes)
assert_allclose(x_np, z.asnumpy().astype('uint8'))
@with_seed()
def test_larger_transpose():
x = mx.nd.random.normal(shape=(50,51))
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
@with_seed()
def test_crop():
for ndim in range(1, 6):
for t in range(5):
dims = []
begin = []
end = []
idx = []
for i in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
@with_seed()
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y.bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y.bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
@with_seed()
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y.bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
@with_seed()
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
@with_seed()
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
@with_seed()
def test_flip():
for ndim in range(1, 6):
for t in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
@with_seed()
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn.bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad, grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn.bind(
ctx=default_context(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
@with_seed()
def test_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
tol = 1e-2 if data_type == 'float16' else 1e-3
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c.simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy, rtol=tol, atol=tol)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy, rtol=tol, atol=tol)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy, rtol=tol, atol=tol)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
@with_seed()
def test_batch_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
if ctx.device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_init_grad_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_init_grad_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c.simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c.simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'],
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'],
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1.simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0], forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'], grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'], grad2, rtol=1e-3, atol=1e-4)
@with_seed()
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
@with_seed()
def test_support_vector_machine_l1_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L, use_linear=True)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0])
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-1) * l_mask * np.greater(1 - l_mask * x_np, 0)
assert_almost_equal(grad_np, grad)
@with_seed()
def test_support_vector_machine_l2_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
x_np = x_np.astype(np.float32)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0])
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-2)*l_mask*np.maximum(1-l_mask*x_np,0)
grad_np = grad_np.astype(np.float32)
assert_almost_equal(grad_np, grad)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y.bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0]
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
@with_seed()
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y.bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0]
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
@with_seed()
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out.simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0], np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
@with_seed()
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32,
forward_check_eps=1E-3, backward_check_eps=1E-3,
npy_grad_check=True, finite_grad_check=True):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
def npy_layer_norm_grad(data, gamma, out_grad, axis, eps):
if axis < 0:
axis += data.ndim
exclude_axis = tuple([ele for ele in range(data.ndim) if ele != axis])
data_mean = data.mean(axis=axis, keepdims=True)
data_var = data.var(axis=axis, keepdims=True)
data_std = np.sqrt(data_var + eps)
centered_data = (data - data_mean) / data_std
gamma_grad = (centered_data * out_grad).sum(axis=exclude_axis, keepdims=True)
beta_grad = out_grad.sum(axis=exclude_axis, keepdims=True)
w = out_grad * gamma.reshape([1 if i != axis else data.shape[axis] for i in range(data.ndim)])\
/ data_std
data_grad = w - w.mean(axis=axis, keepdims=True)\
- centered_data * (w * centered_data).mean(axis=axis, keepdims=True)
gamma_grad = gamma_grad.reshape((-1,))
beta_grad = beta_grad.reshape((-1,))
return data_grad, gamma_grad, beta_grad
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s.simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd, forward_check_eps, forward_check_eps)
if finite_grad_check:
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
if npy_grad_check:
# Test for grad_req = write
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='write')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad =\
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad, backward_check_eps, backward_check_eps)
# Test for grad_req = add
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_data_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_gamma_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
init_beta_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='add')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.grad_dict['data'][:] = init_data_grad
exe.grad_dict['gamma'][:] = init_gamma_grad
exe.grad_dict['beta'][:] = init_beta_grad
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad = \
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(),
gt_data_grad + init_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(),
gt_gamma_grad + init_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(),
gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps)
@with_seed()
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_context()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([2,3,4], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64,
np.int32: np.int32, np.int64: np.int64}
dtype_to_str = {np.float16: 'float16', np.float32: 'float32', np.float64: 'float64',
np.int32: 'int32', np.int64: 'int64'}
is_windows = sys.platform.startswith('win')
for enforce_safe_acc in ["1", "0"]:
if is_windows:
if enforce_safe_acc == "0":
break
enforce_safe_acc = "0" if "MXNET_SAFE_ACCUMULATION" not in os.environ else os.environ["MXNET_SAFE_ACCUMULATION"]
else:
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
accumulation_type = acc_type[dtype]
if enforce_safe_acc == "0":
backward_dtype = dtype
out_dtype = dtype_to_str[dtype]
accumulation_type = dtype
skip_backward = 'int' in out_dtype
in_data = np.random.uniform(-1, 1, in_shape).astype(accumulation_type)
in_data[abs(in_data) < epsilon] = 2 * epsilon
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=1e-4 if dtype == np.float16 else 1e-5, ctx=ctx, dtype=dtype)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-2 if dtype is np.float16 else 1e-3,
atol=1e-4 if dtype is np.float16 else 1e-5, ctx=ctx)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
def test_layer_norm():
for enforce_safe_acc in ["1", "0"]:
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for dtype, forward_check_eps, backward_check_eps in zip([np.float16, np.float32, np.float64],
[1E-2, 1E-3, 1E-4],
[1E-2, 1E-3, 1E-4]):
if dtype != np.float16:
in_shape_l, finite_grad_check_l = [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False]
else:
in_shape_l, finite_grad_check_l = [(10, 6, 5), (10, 10)], [True, True] # large input + fp16 does not pass the forward check
for in_shape, finite_grad_check in zip(in_shape_l, finite_grad_check_l):
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
if dtype == np.float16:
npy_grad_check = False
else:
npy_grad_check = True
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps,
backward_check_eps=backward_check_eps,
npy_grad_check=npy_grad_check,
finite_grad_check=finite_grad_check)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@with_seed()
@unittest.skip("Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
@with_seed()
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
@with_seed()
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test.bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
@with_seed()
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape).astype('float32')
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp <= 0.6, [1], [0]) * np.where(data_tmp >= -0.6, [1], [0])])
# Test monitor on symbol using clip
def simple_callback(name, arr):
pass
exe = test.simple_bind(ctx=mx.current_context(), data=shape)
exe.set_monitor_callback(simple_callback, monitor_all=True)
exe.forward(is_train=True)
exe.backward(out_grads=mx.nd.ones(shape))
mx.nd.waitall()
@with_seed()
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x.bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0], np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out)
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s.bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0], np.array([0,1,2,3,4]))
def test_arange_like():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
axis_list = [0, -1]
for sh in shape_list:
for axis in axis_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis)
np_out = np.arange(start=0, stop=sh[axis])
assert_almost_equal(nd_out.asnumpy(), np_out)
def test_arange_like_without_axis():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
for sh in shape_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0)
np_out = np.arange(start=0, stop=val.size)
assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
test_arange_like()
test_arange_like_without_axis()
@with_seed()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
for dtype in [np.float16, np.float32, np.float64]:
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(dtype)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5, is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
@with_seed()
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b.simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0], a_npy)
exe.backward() # No error if BlockGrad works
@with_seed()
def test_take():
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
def check_output_n_grad(data_shape, idx_shape, axis, mode, out_of_range=True):
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result.simple_bind(default_context(), a=data_shape,
indices=idx_shape, axis=axis, mode=mode)
data_real = np.random.normal(size=data_shape).astype('float32')
if out_of_range:
idx_real = np.random.randint(low=-data_shape[axis], high=data_shape[axis], size=idx_shape)
if mode == 'raise':
idx_real[idx_real == 0] = 1
idx_real *= data_shape[axis]
else:
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
if out_of_range and mode == 'raise':
try:
mx_out = exe.outputs[0].asnumpy()
except MXNetError as e:
return
else:
# Did not raise exception
assert False, "did not raise %s" % MXNetError.__name__
assert_almost_equal(exe.outputs[0], np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
if mode == 'clip':
i = np.clip(i, 0, data_shape[axis])
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'], grad_in)
def check_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad)
for mode in ['clip', 'wrap', 'raise']:
for data_ndim in range(1, 5):
for idx_ndim in range(1, 4):
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
if mode == 'raise':
check_output_n_grad(data_shape, idx_shape, axis, 'raise', False)
check_output_n_grad(data_shape, idx_shape, axis, mode)
check_autograd_req()
@with_seed()
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'], grad_est, rtol=1e-3, atol=1e-5)
# check addto
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'], grad_est + grid_grad_npy, rtol=1e-2, atol=1e-5)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'], grad_est, rtol=1e-3)
# check addto
exe_add = grid.simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'], grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
@with_seed()
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r, data.asnumpy()[np.arange(n), x.asnumpy()])
@with_seed()
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y.simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
assert exe.outputs[0].dtype == dsttype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0], X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0], X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
def get_cast_op_data():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
@with_seed()
def test_cast_float32_to_float16():
input_np = np.array(list(get_cast_op_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
def check_cast(op, input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float32)
sym = op(x, dtype=np.float16)
ctx = default_context()
exe = sym.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
assert exe.outputs[0].dtype == np.float16
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
check_cast(mx.sym.Cast, input_np, expected_output)
if default_context().device_type == 'gpu':
check_cast(mx.sym.amp_cast, input_np, expected_output)
@with_seed()
def test_amp_multicast():
if default_context().device_type == 'cpu':
return
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx),
'y': mx.nd.random.uniform(shape=(3, 3), dtype=np.float32, ctx=ctx),
'z': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
out1, out2, out3 = exe.outputs
assert out1.asnumpy().dtype == np.float32
assert out2.asnumpy().dtype == np.float32
assert out3.asnumpy().dtype == np.float32
def check_amp_multicast(input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float16, ctx=ctx),
'y': mx.nd.array(input_np, dtype=np.float32, ctx=ctx),
'z': mx.nd.array(input_np, dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
input_np = np.array(list(get_cast_op_data()), dtype=np.float16)
expected_output = input_np.astype(np.float32)
check_amp_multicast(input_np, expected_output)
@with_seed()
def test_all_finite():
data = mx.sym.Variable("data", dtype=np.float32)
data2 = mx.sym.Variable("data2", dtype=np.float32)
finite_arr = mx.nd.array([[0, 0]])
inf_arr = mx.nd.array([[np.inf, np.inf]])
z = mx.sym.all_finite(data)
ctx = default_context()
exe = z.bind(ctx, {'data': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
exe = z.bind(ctx, {'data': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
@with_seed()
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats)
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis)
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
@with_seed()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
@with_seed()
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for i in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for i in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
with mx.np_shape():
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
with mx.np_shape():
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
@with_seed()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
with mx.np_shape():
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32
).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth,))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
with mx.np_shape():
test_empty_indices()
test_zero_depth()
@with_seed()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int32)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx)
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx)
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
@with_seed()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
@with_seed()
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
@with_seed()
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)])
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1.bind(default_context(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
assert_almost_equal(ndarr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_seed()
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
assert_almost_equal(dtype_softmax, ref_softmax, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
assert_almost_equal(dtype_input.grad, ref_input.grad, rtol=grad_rtol, atol=grad_atol)
import sys
is_windows = sys.platform.startswith('win')
enforce_safe_acc = os.environ.get("MXNET_SAFE_ACCUMULATION", "0")
if not is_windows or enforce_safe_acc == "1":
os.environ["MXNET_SAFE_ACCUMULATION"] = "1"
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
@with_seed()
def test_softmax_with_length():
def np_softmax_with_length(data, length):
res = np.zeros(data.shape)
for i in range(length.shape[0]):
for j in range(length.shape[1]):
leng = int(length[i, j])
res[i, 0:leng, j] = np_softmax(data[i, 0:leng, j])
return res
ndim = 3
shape = rand_shape_nd(ndim, dim=10)
len_shape = list(shape)
del len_shape[1]
len_shape = tuple(len_shape)
for dtype in [np.float16, np.float32, np.float64]:
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_length = np.random.randint(1, shape[1] + 1, len_shape)
mx_length = mx.nd.array(np_length, dtype=np.int32)
np_out = np_softmax_with_length(np_data, np_length)
data = mx.sym.Variable("data")
length = mx.sym.Variable("length")
mx_sym = mx.sym.softmax(data=data, length=length, use_length=True, axis=1)
location = {"data": mx_data, "length": mx_length}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [np.ones(shape, dtype=dtype)],
[np.zeros(shape), np.zeros(len_shape, dtype=np.int32)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3, dtype="asnumpy")
@with_seed()
def test_pick():
def test_pick_helper(index_type=np.int32):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth, contrib=False):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
if contrib:
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
else:
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0].copy()
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest, outTrain)
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest, loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
@with_seed()
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts, labels, true_loss, contrib=contrib)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels2, true_loss, contrib=contrib)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels3, true_loss, contrib=contrib)
@with_seed()
def test_ctc_loss_with_large_classes():
ctx = default_context()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss, expected_loss)
@with_seed()
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label, contrib=False): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
if contrib:
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
else:
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l, loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad, grad_truth, atol=1e-5, rtol=1e-5)
for contrib in [False, True]:
for label in ['first', 'last']:
check_ctc_loss_grad(label, contrib=contrib)
@with_seed()
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
print(a_.asnumpy())
print(a_real.asnumpy())
assert same(qa.asnumpy(), qa_real.asnumpy())
assert_almost_equal(a_.asnumpy(), a_real.asnumpy(), rtol=1e-2)
@with_seed()
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
@with_seed()
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
# test 0-size output
mx.set_np_shape(True)
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 0, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.zeros((0, 3))
expected_grad = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
mx.set_np_shape(False)
# test gradient
shape = (100, 30)
a = mx.nd.random.randint(0, 100, shape=shape)
a.attach_grad()
bi = mx.nd.random.randint(0, 100, shape=shape[0:1]) > 50
ci = mx.nd.random.randint(0, 100, shape=shape[0:1]) < 50
mx_grad = mx.nd.zeros_like(a)
mx.autograd.mark_variables([a], [mx_grad], grad_reqs='add')
T = 3
for _ in range(T):
with mx.autograd.record():
b = mx.nd.contrib.boolean_mask(a, bi)
c = mx.nd.contrib.boolean_mask(a, ci)
su = b.sum() + c.sum()
su.backward()
grad = (bi + ci).asnumpy().reshape((-1,) + (1,) * (len(shape)-1))
grad = np.tile(grad, (1,) + shape[1:])
# T times
grad *= T
assert_allclose(a.grad.asnumpy(), grad)
a_np = a.asnumpy()
assert same(b.asnumpy(), a_np[bi.asnumpy().astype('bool')])
assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')])
@with_seed()
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
@with_seed()
def test_reciprocal_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 6 below set empirically, depends on eps.
# Issue exposed by seed 879579887.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 6*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
@with_seed()
def test_cbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid finite difference method inaccuracies due to infinite gradient at the origin.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 553872106.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=eps)
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
@with_seed()
def test_rcbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 788174893.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
@with_seed()
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output, expected_output, rtol=rtol, atol=atol)
assert_almost_equal(x2.grad, expected_grad, rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x, np.ones(shape=(10, 10), dtype=np.float32))
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/17467")
@with_seed()
def test_custom_op_fork():
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
if not sys.platform.startswith('win'): # no fork in windows
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive() and p.exitcode == 0
def _build_dot_custom(fun_forward, name):
class Dot(mx.operator.CustomOp):
def __init__(self):
super(Dot, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
fun_forward(in_data, out_data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register(name)
class DotProp(mx.operator.CustomOpProp):
def __init__(self):
super(DotProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [(in_shape[0][0], in_shape[1][1])]
def create_operator(self, ctx, shapes, dtypes):
return Dot()
@with_seed()
def test_custom_op_exc():
# test except handling
# see https://github.com/apache/incubator-mxnet/pull/14693
# 1. error in python code
def custom_exc1():
def f(in_data, out_data):
assert False
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot1')
a = mx.nd.zeros((4, 1))
b = mx.nd.zeros((1, 4))
c = mx.nd.Custom(a, b, op_type='Dot1')
c.wait_to_read()
assert_raises(MXNetError, custom_exc1)
# 2. error in pushing operator to engine
def custom_exc2():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot2')
a = mx.nd.zeros((4, 2))
b = mx.nd.zeros((1, 4))
# trigger error by invalid input shapes of operands
c = mx.nd.Custom(a, b, op_type='Dot2')
c.wait_to_read()
assert_raises(MXNetError, custom_exc2)
# 3. error in real execution
if default_context().device_type == 'cpu':
def custom_exc3():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
out_data[0].wait_to_read()
_build_dot_custom(f, 'Dot3')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot3')
c.wait_to_read()
assert_raises(MXNetError, custom_exc3)
def custom_exc4():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
_build_dot_custom(f, 'Dot4')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot4')
c.wait_to_read()
assert_raises(MXNetError, custom_exc4)
@with_seed()
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution():
for num_batch in [1, 2]:
for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]):
for input_height, input_width in itertools.product([5, 6], [5, 6]):
for dilate in [(1, 1), (2, 2)]:
for grad_nodes in [['im_data'], ['offset_data'], ['weight']]:
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
weight_var = mx.symbol.Variable(name="weight")
bias_var = mx.symbol.Variable(name="bias")
op = mx.sym.contrib.DeformableConvolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
@with_seed()
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = 1e-7, atol_fw = 1e-9):
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np.random.uniform(1, 10, shape1).astype(dtype)
data_in2 = np.random.uniform(1, 10, shape2).astype(dtype)
data_in3 = np.random.uniform(1, 10, shape3).astype(dtype)
data_in4 = np.random.uniform(1, 10, shape4).astype(dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
@with_seed()
def test_gemm():
_gemm_test_helper(np.float64, True)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
_gemm_test_helper(np.float32, False, rtol_fw = 1e-5, atol_fw = 1e-7)
if default_context().device_type == 'gpu':
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "1"
_gemm_test_helper(np.float32, False, rtol_fw = 2e-5, atol_fw = 2e-7)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@with_seed()
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 2e-6
rtol_bw = 1e-5
atol_bw = 1e-5
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
def check_fw_grad(sym, location, expected):
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
if grad_check == 1:
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
shape = (4, 4, 1, 1)
ones = mx.nd.ones(shape).asnumpy()
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw_grad(test_potrf, [data_in], [res_potrf])
# test potri
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw_grad(test_potri, [data_in], [res_potri])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw_grad(test_trsm, [trian_in, data_in], [ones])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw_grad(test_trmm, [trian_in, data_in], [ones])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw_grad(test_sumlogdiag, [data_in], [res_sumlogdiag])
# more elaborate example of Cholesky factorization
low_trian = trian
if upper:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw_grad(test_potrf, [a], [r])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw_grad(test_potri, [a], [r])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw_grad(test_trsm, [a, b], [r])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw_grad(test_trsm2, [a, b], [r])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw_grad(test_trsm3, [a, b], [r])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw_grad(test_trsm4, [a, b], [r])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = [a, rep_3x(matrix, 4, 4)]
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw_grad(test_trmm, a, [r])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw_grad(test_trmm2, a, [r])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw_grad(test_trmm3, a, [r])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw_grad(test_trmm4, a, [r])
# test sumlogdiag
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw_grad(test_sumlogdiag, [rep_3x(pow, 4, 4)], [r])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
@with_seed()
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
@with_seed()
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
def test_laop_5():
# tests for diagonal and triangular matrix extraction and generation
data = mx.symbol.Variable('data')
# test complete range of small matrices to cover corner cases
for n in range(1, 5):
# test batched and non-batched processing
for b in range(3):
shape = (n, n) if b == 0 else (b, n, n)
data_in = np.random.uniform(1, 10, shape)
# test all legal offsets of the diagonal
for offs in range(1-n, n):
# test extraction of diagonal
test_diag = mx.sym.linalg.extractdiag(data, offset=offs)
res_diag = np.diagonal(data_in, offset=offs) if b==0 else np.diagonal(data_in, axis1=1, axis2=2, offset=offs)
check_symbolic_forward(test_diag, [data_in], [res_diag])
check_numeric_gradient(test_diag, [data_in])
# test generation of diagonal matrix
test_diag2 = mx.sym.linalg.makediag(data, offset=offs)
res_diag2 = None
if b == 0:
res_diag2 = np.diagflat(res_diag, k=offs)
else:
for i in range(b):
res = np.reshape(np.diagflat(res_diag[i], k=offs), (1, n, n))
res_diag2 = res if res_diag2 is None else np.concatenate((res_diag2, res), axis=0)
check_symbolic_forward(test_diag2, [res_diag], [res_diag2])
check_numeric_gradient(test_diag2, [res_diag])
# check both settings for parameter "lower" in case of zero offset
lower_vals = [True] if offs != 0 else [True, False]
for lower in lower_vals:
# test extraction of triangle by doing a full roundtrip as the intermediate extracted
# triangle has different orderings than numpy.
test_trian = mx.sym.linalg.extracttrian(data, offset=offs, lower=lower)
test_trian = mx.sym.linalg.maketrian(test_trian, offset=offs, lower=lower)
extracts_lower = (offs < 0) or ((offs == 0) and lower)
res_trian = None
if b == 0:
res_trian = np.tril(data_in, offs) if extracts_lower else np.triu(data_in, offs)
else:
for i in range(b):
res = np.tril(data_in[i], offs) if extracts_lower else np.triu(data_in[i], offs)
res = np.reshape(res, (1, n, n))
res_trian = res if res_trian is None else np.concatenate((res_trian, res), axis=0)
check_symbolic_forward(test_trian, [data_in], [res_trian])
check_numeric_gradient(test_trian, [data_in])
# Tests for linalg.inverse
@with_seed()
@unittest.skip("Test crashes https://github.com/apache/incubator-mxnet/issues/15975")
def test_laop_6():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-4
atol_bw = 1e-6
data = mx.symbol.Variable('data')
check_fw = lambda sym, location, expected:\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
## det(I + dot(v, v.T)) = 1 + dot(v.T, v) >= 1, so it's always invertible;
## det is away from zero, so the value of logdet is stable
v = np.random.random(4)
a = np.eye(4) + np.outer(v, v)
a = np.tile(a, (3, 1, 1))
permute_mat = np.eye(4)[[1, 0, 2, 3]]
# test matrix inverse
r = np.eye(4)
r = np.tile(r, (3, 1, 1))
test_inverse = mx.sym.linalg.inverse(data)
test_eye = mx.sym.linalg.gemm2(data, test_inverse)
check_fw(test_eye, [a], [r])
check_grad(test_inverse, [a])
# test matrix determinant
# det
r = np.linalg.det(a)
test_det = mx.sym.linalg.det(data)
check_fw(test_det, [a], [r])
check_grad(test_det, [a])
# test slogdet
r1 = np.array([1., 1., 1.])
r2 = np.log(np.abs(np.linalg.det(a)))
test_sign, test_logabsdet = mx.sym.linalg.slogdet(data)
check_fw(test_sign, [a], [r1])
check_fw(test_sign, [np.dot(a, permute_mat)], [-r1])
check_fw(test_logabsdet, [a], [r2])
check_grad(test_logabsdet, [a])
@with_seed()
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
## TODO: test fails intermittently when cudnn on. temporarily disabled cudnn until gets fixed.
## tracked at https://github.com/apache/incubator-mxnet/issues/14288
@with_seed()
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
# check_dropout_ratio(0.5, shape, cudnn_off=False)
# check_dropout_ratio(0.0, shape, cudnn_off=False)
# check_dropout_ratio(1.0, shape, cudnn_off=False)
# check_dropout_ratio(0.75, shape, cudnn_off=False)
# check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
# check_passthrough(0.5, shape, cudnn_off=False)
# check_passthrough(0.0, shape, cudnn_off=False)
# check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
# check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
@with_seed()
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
@with_seed()
def test_gather_nd_check_bound():
def _test_gather_nd_exception(data, indices):
output = mx.nd.gather_nd(data, indices).asnumpy()
# check if indices is out of bound
data = mx.nd.array([[0, 1, 2], [3, 4, 5]])
indices1 = mx.nd.array([[0, 1, 0], [0, 1, 3]])
indices2 = mx.nd.array([[0, 1, 0], [0, 1, -5]])
assertRaises(IndexError, _test_gather_nd_exception, data, indices1)
# IndexError: index 3 is out of bounds for axis 1 with size 3
assertRaises(IndexError, _test_gather_nd_exception, data, indices2)
# IndexError: index -5 is out of bounds for axis 1 with size 3
# check if the negative indices are wrapped correctly
indices1 = mx.nd.array([[0, 1, -1], [0, 1, -2]])
indices2 = mx.nd.array([[0, 1, 1], [0, 1, 1]])
data1 = mx.nd.gather_nd(data, indices1)
data2 = mx.nd.gather_nd(data, indices2)
assert_almost_equal(data1, data2, rtol=1e-5, atol=1e-5)
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@with_seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
@with_seed()
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0],
'power': [lambda x, y: mx.sym.power(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@with_seed()
def test_softmax():
check_softmax_with_shape((3, 4), default_context(), preserve_shape=False)
check_softmax_with_shape((3, 4), default_context(), preserve_shape=True)
check_softmax_with_shape((3, 4, 2), default_context(), preserve_shape=True)
check_softmax_grad(default_context())
check_smoothed_softmax_grad(default_context())
@with_seed()
def test_softmax_output_normalization():
def _softmaxoutput_normalization(multi_output, use_ignore, normalization):
grad_scale = np.random.random()
batch_size = 8
num_labels = 6
H, W = 3, 3
ignore_label = np.random.randint(0, num_labels) if use_ignore else -1
if multi_output:
data_shape = (batch_size, num_labels, H, W)
label_shape = (batch_size, H, W)
else:
data_shape = (batch_size, num_labels)
label_shape = (batch_size, )
data = mx.nd.random.uniform(-1, 1, shape=data_shape)
label = mx.nd.random.randint(
0, num_labels, shape=label_shape).astype('float32')
data.attach_grad()
kwargs = dict(grad_scale=grad_scale,
normalization=normalization, multi_output=multi_output)
if use_ignore:
kwargs.update(use_ignore=True, ignore_label=ignore_label)
with mx.autograd.record():
out = mx.nd.SoftmaxOutput(data=data, label=label, **kwargs)
out.backward(mx.nd.ones_like(data))
exp_data = mx.nd.exp(data)
softmax_data = exp_data / exp_data.sum(1, keepdims=True)
argmax_data = mx.nd.argmax(data, axis=1)
assert_almost_equal(out.asnumpy(), softmax_data.asnumpy())
one_hot_label = mx.nd.one_hot(label, num_labels)
if multi_output:
one_hot_label = one_hot_label.transpose((0, 3, 1, 2))
data_grad = softmax_data - one_hot_label
if use_ignore:
if multi_output:
data_grad *= (label !=
ignore_label).reshape((batch_size, 1, H, W))
else:
data_grad *= (label != ignore_label).reshape((batch_size, 1))
valid_cnt = 1
if normalization == 'batch':
valid_cnt = batch_size
elif normalization == 'valid':
valid_cnt = mx.nd.maximum(1, (label != ignore_label).sum())
scale = grad_scale / valid_cnt
if multi_output:
if normalization != 'valid':
scale /= H * W
data_grad *= scale
assert_almost_equal(data.grad.asnumpy(), data_grad.asnumpy())
for multi_output in [False, True]:
for use_ignore in [False, True]:
for normalization in ['null', 'batch', 'valid']:
_softmaxoutput_normalization(
multi_output, use_ignore, normalization)
@with_seed()
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
with mx.np_shape():
var1 = mx.sym.var(name="data", shape=(-1, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3))
var1 = mx.sym.var(name='data', shape=(10, -1))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1))
@with_seed()
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@with_seed()
@mx.use_np_shape
def test_zero_size_min_max():
def min():
a = mx.nd.zeros(shape=(5, 0))
a.min()
def max():
a = mx.nd.zeros(shape=(5, 0))
a.max()
assert_raises(MXNetError, min)
assert_raises(MXNetError, max)
@with_seed()
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@with_seed()
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
@with_seed()
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputWidth - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def py_bilinear_resize_backward(x, incoming_grads, mode='size'):
data1 = np.zeros_like(x)
data2 = incoming_grads
batchsize = data1.shape[0]
channels = data1.shape[1]
height1 = data1.shape[2]
width1 = data1.shape[3]
height2 = data2.shape[2]
width2 = data2.shape[3]
rheight = float(height1 - 1) / (height2 - 1) if (height2 > 1) else 0
rwidth = float(width1 - 1) / (width2 - 1) if (width2 > 1) else 0
# special case: just copy
if height1 == height2 and width1 == width2:
data1 += data2
return [data1]
for h2 in range(0, height2):
for w2 in range(0, width2):
h1r = rheight * h2
h1 = int(h1r)
h1p = 1 if (h1 < height1 - 1) else 0
h1lambda = h1r - h1
h0lambda = 1 - h1lambda
#
w1r = rwidth * w2
w1 = int(w1r)
w1p = 1 if (w1 < width1 - 1) else 0
w1lambda = w1r - w1
w0lambda = 1 - w1lambda
#
for n in range(0, batchsize):
for c in range(0, channels):
d2val = data2[n][c][h2][w2]
data1[n][c][h1][w1] += h0lambda * w0lambda * d2val
data1[n][c][h1][w1 + w1p] += h0lambda * w1lambda * d2val
data1[n][c][h1 + h1p][w1] += h1lambda * w0lambda * d2val
data1[n][c][h1 + h1p][w1 + w1p] += h1lambda * w1lambda * d2val
if mode == 'like':
return data1, np.zeros_like(incoming_grads)
return [data1]
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y, py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
def check_bilinear_resize_align_corners_op():
img_shape = [1, 1, 3, 2]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
# align_corners = False
expected_data[0] = [
64.000, 56.000, 40.000, 32.000, 56.000, 52.000, 44.000, 40.000, 40.000, 44.000, 52.000, 56.000,
36.500, 45.625, 63.875, 73.000, 45.500, 56.875, 79.625, 91.000, 50.000, 62.500, 87.500, 100.000
]
# align_corners = True
expected_data[1] = [
64.000, 53.333, 42.667, 32.000, 51.200, 49.067, 46.933, 44.800, 38.400, 44.800, 51.200, 57.600,
35.600, 47.467, 59.333, 71.200, 42.800, 57.067, 71.333, 85.600, 50.000, 66.667, 83.333, 100.000
]
x = np.array(data, dtype=np.float32).reshape(img_shape)
x_nd = mx.nd.array(x)
y0 = np.array(expected_data[0]).reshape((1, 1, target_height, target_width))
y0_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=False)
assert_almost_equal(y0, y0_nd.asnumpy(), atol=1e-3)
y1 = np.array(expected_data[1]).reshape((1, 1, target_height, target_width))
y1_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=True)
assert_almost_equal(y1, y1_nd.asnumpy(), atol=1e-3)
def check_bilinear_resize_modes_op(shape, scale_height=None, scale_width=None, shape_1=None, mode=None):
x = mx.nd.random.uniform(shape=shape)
original_h = shape[2]
original_w = shape[3]
if mode == 'odd_scale':
assert scale_height is not None and scale_width is not None
new_h = int(original_h * scale_height) if (original_h % 2) == 0 else \
int((original_h - 1) * scale_height) + 1
new_w = int(original_w * scale_width) if (original_w % 2) == 0 \
else int((original_w - 1) * scale_width) + 1
y = mx.nd.contrib.BilinearResize2D(x, scale_height=scale_height,
scale_width=scale_width,
mode='odd_scale')
elif mode == 'to_even_down':
new_h = original_h if (original_h % 2) == 0 else original_h - 1
new_w = original_w if (original_w % 2) == 0 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_down')
elif mode == 'to_even_up':
new_h = original_h if (original_h % 2) == 0 else original_h + 1
new_w = original_w if (original_w % 2) == 0 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_up')
elif mode == 'to_odd_down':
new_h = original_h if (original_h % 2) == 1 else original_h - 1
new_w = original_w if (original_w % 2) == 1 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_down')
elif mode == 'to_odd_up':
new_h = original_h if (original_h % 2) == 1 else original_h + 1
new_w = original_w if (original_w % 2) == 1 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_up')
elif mode == 'like':
x_1 = mx.nd.random.uniform(shape=shape_1)
new_h = x_1.shape[2]
new_w = x_1.shape[3]
y = mx.nd.contrib.BilinearResize2D(x, x_1, mode='like')
new_shape_desired = np.array([shape[0], shape[1], new_h, new_w], dtype='int')
new_shape_got = np.array(y.shape, dtype='int')
data_sym = mx.sym.var('data')
data_np = x.asnumpy()
expected = py_bilinear_resize(data_np, new_h, new_w)
out_grads = np.ones([shape[0], shape[1], new_h, new_w])
expected_backward = py_bilinear_resize_backward(data_np, out_grads, mode)
assert_array_equal(new_shape_desired, new_shape_got, "Desired and got shapes are not equal. {} vs {}".format(
str(new_shape_desired.tolist()), str(new_shape_got.tolist())))
assert_almost_equal(y.asnumpy(), expected, 1e-3, 0)
if mode != 'like':
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, None, scale_height=scale_height, scale_width=scale_width, mode=mode)
check_symbolic_forward(resize_sym, [data_np], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np], rtol=1e-2, atol=1e-4)
else:
data_sym_like = mx.sym.var('data_like')
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, data_sym_like, mode=mode)
date_np_like = x_1.asnumpy()
check_symbolic_forward(resize_sym, [data_np, date_np_like], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np, date_np_like], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np, date_np_like], rtol=1e-2, atol=1e-4)
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
shape = (2, 2, 20, 20)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape = (2, 2, 21, 21)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape_0 = (2, 2, 21, 21)
shape_1 = (2, 2, 10, 10)
check_bilinear_resize_modes_op(shape_0, shape_1=shape_1, mode='like')
check_bilinear_resize_modes_op(shape_1, shape_1=shape_0, mode='like')
check_bilinear_resize_align_corners_op()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
@with_seed()
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
tol = 1e-2 if dtype is np.float16 else 1e-5
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output, expected, rtol=tol, atol=tol)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected], rtol=tol, atol=tol)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected], rtol=tol, atol=tol)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
def allclose_function(contexts):
def getRandom(base, percent = 1.):
return base * (1 + percent * (2 * np.random.random_sample() - 1.) / 100)
title = 'exp'
for ctx in contexts:
title += ' cpu' if ctx == mx.cpu() else ' gpu'
title += ' nElem shape'
num_ctx = len(contexts)
result = [False, False]
for dtype in [np.float16, np.float32, np.float64]:
rtol = getRandom(1e-2 if dtype is np.float16 else 1e-5)
atol = getRandom(1e-4 if dtype is np.float16 else 1e-7)
print('\nnumpy.{}: atol = {} rtol = {}'.format(dtype.__name__, atol, rtol))
print(title)
for ndim in range(1, 10):
shape = rand_shape_nd(ndim, 8)
a_np = np.random.randn(*shape).astype(dtype)
b_np = (a_np + np.random.randn(*shape).astype(dtype) / 10000000).astype(dtype)
expected = np.allclose(a_np, b_np, rtol, atol)
for n, ctx in enumerate(contexts):
a_ctx = mx.nd.array(a_np, dtype = dtype, ctx=ctx)
b_ctx = mx.nd.array(b_np, dtype = dtype, ctx=ctx)
output = mx.nd.contrib.allclose(a_ctx, b_ctx, rtol=rtol, atol=atol)
result[n] = output.asnumpy() == 1
if expected != result[n]:
# Preparing the output of elements of the array, which are considered as "not close" AND
# corresponding elements of comparison CPU/GPU/Python vectors, which are considered as "close"
v_ctx = 'CPU' if ctx == mx.cpu() else 'GPU'
if expected:
v_cmp = 'Python'
a_b = a_ctx.asnumpy()
b_b = b_ctx.asnumpy()
a_g = np.asarray(a_np)
b_g = np.asarray(b_np)
else:
v_cmp = v_ctx
v_ctx = 'Python'
a_b = np.asarray(a_np)
b_b = np.asarray(b_np)
a_g = a_ctx.asnumpy()
b_g = b_ctx.asnumpy()
print('\n *** Violations found on %s, but not on %s side ***' % (v_ctx, v_cmp))
frmt = " a[{0:d}]: b[{0:d}]:" \
" abs(a[{0:d}]-b[{0:d}]) - atol + rtol*abs(b[{0:d}]):"
# Define the indices of all violations and corresponding values of coordinates
bad_indexes = np.abs(a_b - b_b) >= atol + rtol * abs(b_b)
a_values = [a_b[bad_indexes], a_g[bad_indexes]]
b_values = [b_b[bad_indexes], b_g[bad_indexes]]
idx = np.asarray(np.where(bad_indexes == True))
idx = idx.reshape(1, idx.size)
idx_flat = np.asarray(np.where(bad_indexes.flatten() == True)).flatten()
for i in range(len(a_values[0])):
flat_idx = idx_flat[i]
print('{}: index = {} flat_index = {}'.format('%4d'%i, idx[i], flat_idx))
print(frmt.format(flat_idx))
for j in range(2):
diff = np.abs(a_values[j][i]-b_values[j][i]) - atol + rtol*abs(b_values[j][i])
print('{}: {} {} {}'.format('%6s'%v_ctx, a_values[j][i], b_values[j][i], diff))
if num_ctx == 1:
print(' {0:d} {1:d} {2:10d} {3:}'.format(expected, result[0], np.prod(shape), shape))
else:
print(' {0:d} {1:d} {2:d} {3:10d} {4:}'.format(expected, result[0], result[1], np.prod(shape), shape))
if expected != result[0] or num_ctx > 1 and expected != result[1]:
assert False
@with_seed()
def test_allclose_function():
allclose_function([default_context()])
@with_seed()
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1, np_bins1)
assert_almost_equal(mx_histo1, np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2, np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2, np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1.bind(ctx=default_context(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2.bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
def test_op_output_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=False)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check output names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
def test_op_all_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=True)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check all names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['data', 'conv_data', 'conv_weight', 'conv_weight', 'conv_bias', 'conv_bias', 'conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['data', 'deconv_data', 'deconv_weight', 'deconv_weight', 'deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['data', 'fc_data', 'fc_weight', 'fc_weight', 'fc_bias', 'fc_bias', 'fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['data', 'lrn_data', 'lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['data', 'act_input0', 'act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['data', 'concat_arg0', 'data', 'concat_arg1', 'concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'softmax_output'])
length = mx.sym.Variable("length", shape=(10, 10, 10))
sm_sym = mx.sym.softmax(data, length, axis=1, use_length=True, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'length', 'softmax_length', 'softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['data', 'softmax_input0', 'softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['data', 'upsampling_arg0', 'upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['data', 'pooling_data', 'pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
@with_seed()
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915")
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@with_seed()
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
def test_context_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
@with_seed()
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_context()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output, real_output, atol=1e-3)
assert_almost_equal(data.grad, dx, atol=1e-3)
assert_almost_equal(rois.grad, drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_context()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
@with_seed()
def test_op_rroi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0)
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
return val
def rroialign_forward(data, rois, pooled_size, spatial_scale, sampling_ratio):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 6,\
ValueError(
'The length of the axis 1 of rois should be 6 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
out = np.zeros((R, C, PH, PW), dtype=T)
for r in range(R):
batch_ind = int(rois[r, 0])
roi_center_w, roi_center_h, roi_w, roi_h = rois[r, 1:5] * T(spatial_scale)
roi_theta = T(rois[r,5] * np.pi / 180.0)
roi_w = T(max(roi_w, 1.0))
roi_h = T(max(roi_h, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
roi_start_h = T(-roi_h / 2.0)
roi_start_w = T(-roi_w / 2.0)
for c in range(C):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
for iy in range(roi_bin_grid_h):
yy = roi_start_h + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
xx = roi_start_w + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
x = xx * np.cos(roi_theta, dtype=T) + yy * np.sin(roi_theta, dtype=T) + roi_center_w
y = yy * np.cos(roi_theta, dtype=T) - xx * np.sin(roi_theta, dtype=T) + roi_center_h
v = bilinear_interpolate(
bdata[c], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out
def test_rroi_align_value(sampling_ratio=-1):
ctx = default_context()
if ctx.device_type == 'gpu':
print('skipped testing rroi align for gpu since it is not supported yet')
return
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
theta = mx.nd.random.uniform(0, 180, (R,1), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy, wh, theta, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
output = mx.nd.contrib.RROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sampling_ratio=sampling_ratio)
real_output = rroialign_forward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio)
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
test_rroi_align_value()
test_rroi_align_value(sampling_ratio=2)
@with_seed()
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
for k in [0, 1, -1, np.random.randint(-min(h,w) + 1, min(h,w))]:
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r, np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r, np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@with_seed()
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@with_seed()
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
@with_seed()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
@with_seed()
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
@with_seed()
def test_moments():
dim = random.randint(2, 5)
shape = rand_shape_nd(dim, dim=5)
axes = [i for i in range(dim)]
test_dims = random.sample(axes, random.randint(1, dim))
test_axes = tuple(sorted(test_dims))
np_a = np.random.uniform(-1.0, 1.0, shape)
a = mx.nd.array(np_a)
for keepdims in [True, False]:
eps = 1e-3
np_a[abs(np_a) < eps] = 2 * eps
np_mean = np.mean(np_a, axis=test_axes, keepdims=keepdims)
np_var = np.var(np_a, axis=test_axes, keepdims=keepdims)
mx_mean, mx_var = mx.nd.moments(a, keepdims=keepdims, axes=test_axes)
N = np_a.size / np_mean.size
mx_sym = mx.sym.Variable("data")
mx_moments = mx.sym.moments(mx_sym, axes=test_axes, keepdims=keepdims)
mx_test_sym = mx.sym.elemwise_add(mx_moments[0], mx_moments[1])
if len(np_mean.shape) == 0:
np_mean = np_mean.reshape(mx_mean.shape)
np_var = np_var.reshape(mx_var.shape)
assert np_mean.shape == mx_mean.shape
assert np_var.shape == mx_var.shape
check_symbolic_forward(mx_test_sym, [np_a], [np_mean + np_var], rtol=1e-3, atol=1e-5)
check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4)
@with_seed()
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
@with_seed()
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
@with_seed()
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
@with_seed()
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@with_seed()
def test_image_normalize():
# Part 1 - Test 3D input with 3D mean/std
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D input with 3D mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
# Part 3 - Test 3D input with scalar mean/std
shape_3d = (3, 28, 28)
mean = 1.0
std = 2.0
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][:] = (data_expected_3d[:][:][:] - 1.0) / 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 4 - Test 4D input with scalar mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[:][:][:][:] = (data_expected_4d[:][:][:][:] - 1.0) / 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[:][:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
@with_seed()
def test_index_array():
def test_index_array_default():
for shape in [(10,), (7, 5, 29), (5, 7, 11, 13, 17, 19)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_dim():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(())
expected = np.zeros((0,))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones((0, 0, 0))
expected = np.zeros((0, 0, 0, 3))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
def test_index_array_select_axes():
shape = (5, 7, 11, 13, 17, 19)
for axes in [(3,), (4, 1), (5, 1, 3), (-1,), (-5, -1, -3)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=axes)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)[..., axes]
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_select_axes_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=(2, 1))
input_array = np.ones((0, 0, 0, 0))
expected = np.zeros((0, 0, 2))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
test_index_array_default()
test_index_array_default_zero_dim()
test_index_array_default_zero_size()
test_index_array_select_axes()
test_index_array_select_axes_zero_size()
@with_seed()
def test_scalar_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=())
assertRaises(MXNetError, mx.nd.ones, shape=())
with mx.np_shape():
data_mx = mx.nd.ones(shape=())
data_np = np.ones((), dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_zero_size_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0))
assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0))
with mx.np_shape():
data_mx = mx.nd.ones(shape=(0, 1, 0, 4))
data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_concat_with_zero_size_tensor():
with mx.np_shape():
data1 = mx.nd.ones((0, 8, 12))
data2 = mx.nd.ones((3, 8, 12))
data3 = mx.nd.ones((0, 8, 12))
ret = mx.nd.Concat(data1, data2, data3, dim=0)
assert ret.shape == (3, 8, 12)
data1 = mx.nd.ones((0, 3, 10))
data2 = mx.nd.ones((0, 4, 10))
data3 = mx.nd.ones((0, 5, 10))
ret = mx.nd.Concat(data1, data2, data3, dim=1)
assert ret.shape == (0, 12, 10)
@with_seed()
def test_np_shape_decorator():
@mx.use_np_shape
def check_scalar_one():
"""Generate scalar one tensor"""
return mx.nd.ones(shape=())
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
for active in [True, False]:
with mx.np_shape(active=active):
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
@mx.use_np_shape
def check_concat(shape1, shape2, axis):
data1 = mx.nd.ones(shape1)
data2 = mx.nd.ones(shape2)
ret = mx.nd.Concat(data1, data2, dim=axis)
expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis)
assert ret.shape == expected_ret.shape
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
for active in [True, False]:
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
@with_seed()
def test_add_n():
data_shape = (2, 2)
input_num = 5
data = [mx.nd.random.uniform(shape=data_shape) for i in range(input_num)]
rslt = mx.nd.zeros(shape=data_shape)
for i in range(input_num):
rslt += data[i]
add_n_rslt = mx.nd.add_n(*data, out=data[0])
assert_almost_equal(rslt.asnumpy(), add_n_rslt.asnumpy(), atol=1e-5)
def test_get_all_registered_operators():
ops = get_all_registered_operators()
ok_(isinstance(ops, list))
ok_(len(ops) > 0)
ok_('Activation' in ops)
def test_get_operator_arguments():
operator_arguments = get_operator_arguments('Activation')
ok_(isinstance(operator_arguments, OperatorArguments))
ok_(operator_arguments.names == ['data', 'act_type'])
ok_(operator_arguments.types
== ['NDArray-or-Symbol', "{'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"])
ok_(operator_arguments.narg == 2)
def test_transpose_infer_shape_back():
o1 = mx.sym.ones(shape=[2,3])
o2 = mx.sym.ones(shape=[-1,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b.bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_transpose_infer_shape_mixed():
o1 = mx.sym.ones(shape=[2,-1])
o2 = mx.sym.ones(shape=[3,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b.bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
@with_seed()
def test_sample_normal_default_shape():
# Test case from https://github.com/apache/incubator-mxnet/issues/16135
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]))
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=())
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=1)
assert s.shape == (1, 1)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=(1,))
assert s.shape == (1, 1)
def test_large_tensor_disabled_err_msg():
LARGE_X = 4300000000
MEDIUM_X = 1000000000
SMALL_Y = 1
shape = (2, LARGE_X)
def check_nd_array():
x = np.arange(0, LARGE_X)
assertRaises(MXNetError, mx.nd.array, x)
def check_nd_ones():
assertRaises(MXNetError, mx.nd.ones, shape)
def check_nd_zeros():
assertRaises(MXNetError, mx.nd.zeros, shape)
def check_nd_full():
val = 1
assertRaises(Exception, mx.nd.full, shape, val)
def check_nd_arange():
start = 0
stop = LARGE_X
assertRaises(Exception, mx.nd.arange, start, stop)
def check_nd_random():
shape = (2, LARGE_X)
def check_random_exp():
lam = 4
assertRaises(MXNetError, mx.nd.random_exponential, lam, shape)
def check_random_gamma():
alpha = 9
beta = 0.5
assertRaises(MXNetError, mx.nd.random_gamma, alpha, beta, shape)
def check_random_normal():
loc = 0
scale = 1
assertRaises(MXNetError, mx.nd.random_normal, loc, scale, shape)
def check_random_poisson():
lam = 4
assertRaises(MXNetError, mx.nd.random_poisson, alpha, lam, shape)
def check_random_randint():
low = 0
high = 1000000
assertRaises(MXNetError, mx.nd.random_randint, low, high, shape)
def check_random_uniform():
low = 0
hight = 1
assertRaises(MXNetError, mx.nd.random_uniform, alpha, beta, shape)
def check_multihead_attention_selfatt(dtype):
def convert_weight(F, q_weight, k_weight, v_weight, num_heads):
q_weight = F.reshape(q_weight, shape=(num_heads, -1, 0), reverse=True)
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(q_weight, k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, q_bias, k_bias, v_bias, num_heads):
q_bias = F.reshape(q_bias, shape=(num_heads, -1))
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(q_bias, k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'qkv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
qkv_weight = convert_weight(mx.sym, q_weight, k_weight, v_weight, num_heads)
qkv_bias = convert_bias(mx.sym, q_bias, k_bias, v_bias, num_heads)
qkv = mx.sym.transpose(qkv, axes=(1, 0, 2))
qkv_proj = mx.sym.FullyConnected(qkv, weight=qkv_weight, bias=qkv_bias, flatten=False,
num_hidden=qkv_units * 3, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_selfatt_qk(
qkv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_selfatt_valatt(
qkv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
type_dict={'qkv': dtype,
'q_weight': dtype,
'k_weight': dtype,
'v_weight': dtype,
'q_bias': dtype,
'k_bias': dtype,
'v_bias': dtype,
'sonde': dtype},
grad_req='write', force_rebind=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(qkv, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(qkv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(qkv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
type_dict={'qkv': dtype},
grad_req='write', force_rebind=True)
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@with_seed()
@assert_raises_cuda_not_satisfied(min_version='9.1')
def test_multihead_attention_selfatt():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_selfatt(dtype=dtype)
def check_multihead_attention_encdec(dtype):
def convert_weight(F, k_weight, v_weight, num_heads):
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, k_bias, v_bias, num_heads):
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'q': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'kv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
kv_weight = convert_weight(mx.sym, k_weight, v_weight, num_heads)
kv_bias = convert_bias(mx.sym, k_bias, v_bias, num_heads)
kv = mx.sym.transpose(kv, axes=(1, 0, 2))
kv_proj = mx.sym.FullyConnected(kv, weight=kv_weight, bias=kv_bias, flatten=False,
num_hidden=qkv_units * 2, no_bias=False)
q = mx.sym.transpose(q, axes=(1, 0, 2))
q_proj = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_encdec_qk(
q_proj, kv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_encdec_valatt(
kv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
out_weight=(out_dim, qkv_units),
out_bias=(out_dim,),
type_dict={'q': dtype,
'kv': dtype,
'q_weight': dtype,
'q_bias': dtype,
'k_weight': dtype,
'k_bias': dtype,
'v_weight': dtype,
'v_bias': dtype,
'out_weight': dtype,
'out_bias': dtype,
},
grad_req='write', force_rebind=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(kv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(kv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
type_dict={'q': dtype,
'kv': dtype},
grad_req='write', force_rebind=True)
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@with_seed()
@assert_raises_cuda_not_satisfied(min_version='9.1')
def test_multihead_attention_encdec():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_encdec(dtype=dtype)
@with_seed()
def test_im2col_col2im():
def compute_output_size(spatial, kernel, stride=1, dilate=1, pad=0):
pad_size = spatial + 2 * pad
dilated_kernel = dilate * (kernel - 1) + 1
return (pad_size - dilated_kernel) // stride + 1
def build_kwargs(kernel, stride=1, dilate=1, pad=0):
return {'kernel': (kernel, kernel),
'stride': (stride, stride),
'dilate': (dilate, dilate),
'pad': (pad, pad)}
# use im2col to compute convolution
def test_conv_compute(input_shape, num_filter, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
w = mx.nd.uniform(shape=(num_filter, channel, kernel, kernel))
c1 = mx.nd.dot(col.transpose((0, 2, 1)), w.reshape(num_filter, -1).T).transpose((0, 2, 1))
hos = compute_output_size(input_shape[2], kernel, stride, dilate, pad)
wos = compute_output_size(input_shape[3], kernel, stride, dilate, pad)
c1 = c1.reshape((batch_size, num_filter, hos, wos))
c2 = mx.nd.Convolution(data, num_filter=num_filter, weight=w, no_bias=True, **kwargs)
assert_almost_equal(c1.asnumpy(), c2.asnumpy(), rtol=1e-5, atol=1e-5)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# use composite of im2col and col2im to reconstruct image
def test_reconstruct(input_shape, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
im1 = mx.nd.col2im(col, input_shape[2:], **kwargs)
im2 = mx.nd.col2im(mx.nd.ones_like(col), input_shape[2:], **kwargs) * data
assert_almost_equal(im1.asnumpy(), im2.asnumpy(), rtol=1e-5, atol=1e-5)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# test gradient
# the grad of im2col is col2im, and vice versa
def test_grad(input_shape, kernel, stride=1, dilate=1, pad=0):
# im2col
data = mx.sym.Variable('data')
kwargs = build_kwargs(kernel, stride, dilate, pad)
sym = mx.sym.im2col(data, **kwargs)
im = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(im, **kwargs)
col_shape = col.shape
expected = mx.nd.col2im(col, input_shape[2:], **kwargs)
check_symbolic_backward(sym, [im.asnumpy()], [col.asnumpy()], [expected.asnumpy()])
# col2im
data = mx.sym.Variable('data')
sym = mx.sym.col2im(data, input_shape[2:], **kwargs)
col = mx.nd.uniform(shape=col_shape)
im = mx.nd.col2im(col, input_shape[2:], **kwargs)
expected = mx.nd.im2col(im, **kwargs)
check_symbolic_backward(sym, [col.asnumpy()], [im.asnumpy()], [expected.asnumpy()])
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
if __name__ == '__main__':
import nose
nose.runmodule()
| 43.328274
| 173
| 0.574496
|
f3bfb67cd58479c42892546cfa623674c3218500
| 3,227
|
py
|
Python
|
alert_service_sdk/model/flowable_service/bpmn_exclusive_gateway_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
alert_service_sdk/model/flowable_service/bpmn_exclusive_gateway_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
alert_service_sdk/model/flowable_service/bpmn_exclusive_gateway_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: bpmn_exclusive_gateway.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from alert_service_sdk.model.flowable_service import bpmn_links_pb2 as alert__service__sdk_dot_model_dot_flowable__service_dot_bpmn__links__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='bpmn_exclusive_gateway.proto',
package='flowable_service',
syntax='proto3',
serialized_options=_b('ZJgo.easyops.local/contracts/protorepo-models/easyops/model/flowable_service'),
serialized_pb=_b('\n\x1c\x62pmn_exclusive_gateway.proto\x12\x10\x66lowable_service\x1a\x39\x61lert_service_sdk/model/flowable_service/bpmn_links.proto\"N\n\x14\x42PMNExclusiveGateway\x12\n\n\x02id\x18\x01 \x01(\t\x12*\n\x05links\x18\x02 \x01(\x0b\x32\x1b.flowable_service.BPMNLinksBLZJgo.easyops.local/contracts/protorepo-models/easyops/model/flowable_serviceb\x06proto3')
,
dependencies=[alert__service__sdk_dot_model_dot_flowable__service_dot_bpmn__links__pb2.DESCRIPTOR,])
_BPMNEXCLUSIVEGATEWAY = _descriptor.Descriptor(
name='BPMNExclusiveGateway',
full_name='flowable_service.BPMNExclusiveGateway',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='flowable_service.BPMNExclusiveGateway.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='links', full_name='flowable_service.BPMNExclusiveGateway.links', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=109,
serialized_end=187,
)
_BPMNEXCLUSIVEGATEWAY.fields_by_name['links'].message_type = alert__service__sdk_dot_model_dot_flowable__service_dot_bpmn__links__pb2._BPMNLINKS
DESCRIPTOR.message_types_by_name['BPMNExclusiveGateway'] = _BPMNEXCLUSIVEGATEWAY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BPMNExclusiveGateway = _reflection.GeneratedProtocolMessageType('BPMNExclusiveGateway', (_message.Message,), {
'DESCRIPTOR' : _BPMNEXCLUSIVEGATEWAY,
'__module__' : 'bpmn_exclusive_gateway_pb2'
# @@protoc_insertion_point(class_scope:flowable_service.BPMNExclusiveGateway)
})
_sym_db.RegisterMessage(BPMNExclusiveGateway)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 39.353659
| 374
| 0.796095
|
522259ef3a3d6f1b790edda2e92fb140f6f1a6a4
| 14,131
|
py
|
Python
|
venv/Lib/site-packages/comtypes/server/register.py
|
cvangheem/Twitchbot
|
48bb065951e88e4d2e9ef8d0c1a3afb0150a5eb5
|
[
"MIT"
] | 64
|
2020-07-22T06:24:18.000Z
|
2022-03-27T10:48:15.000Z
|
venv/Lib/site-packages/comtypes/server/register.py
|
AsterLaoWhy/Navi
|
819e4cc9e70721d65da5979e0c7a6fead9eb9d6e
|
[
"MIT"
] | 9
|
2021-08-14T01:27:36.000Z
|
2021-08-24T18:10:18.000Z
|
venv/Lib/site-packages/comtypes/server/register.py
|
AsterLaoWhy/Navi
|
819e4cc9e70721d65da5979e0c7a6fead9eb9d6e
|
[
"MIT"
] | 17
|
2020-09-14T02:46:41.000Z
|
2022-03-01T09:52:33.000Z
|
"""comtypes.server.register - register and unregister a COM object.
Exports the UseCommandLine function. UseCommandLine is called with
the COM object classes that a module exposes. It parses the Windows
command line and takes the appropriate actions.
These command line options are supported:
/regserver - register the classes with COM.
/unregserver - unregister the classes with COM.
/nodebug - remove all logging configuration from the registry.
/l <name>=<level> - configure the logging level for the standard Python loggind module,
this option may be used several times.
/f <formatter> - specify the formatter string.
Note: Registering and unregistering the objects does remove logging
entries. Configuring the logging does not change other registry
entries, so it is possible to freeze a comobject with py2exe, register
it, then configure logging afterwards to debug it, and delete the
logging config afterwards.
Sample usage:
Register the COM object:
python mycomobj.py /regserver
Configure logging info:
python mycomobj.py /l comtypes=INFO /l comtypes.server=DEBUG /f %(message)s
Now, debug the object, and when done delete logging info:
python mycomobj.py /nodebug
"""
import sys, os
import winreg
import logging
import comtypes
from comtypes.typeinfo import LoadTypeLibEx, UnRegisterTypeLib, REGKIND_REGISTER
from comtypes.hresult import *
from comtypes.server import w_getopt
import comtypes.server.inprocserver
from ctypes import windll, c_ulong, c_wchar_p, WinError, sizeof, create_string_buffer
_debug = logging.getLogger(__name__).debug
def get_winerror(exception):
try:
return exception.winerror
except AttributeError:
return exception.errno
# a SHDeleteKey function, will remove a registry key with all subkeys.
def _non_zero(retval, func, args):
if retval:
raise WinError(retval)
SHDeleteKey = windll.shlwapi.SHDeleteKeyW
SHDeleteKey.errcheck = _non_zero
SHDeleteKey.argtypes = c_ulong, c_wchar_p
try:
Set = set
except NameError:
from sets import Set #as set
_KEYS = {winreg.HKEY_CLASSES_ROOT: "HKCR",
winreg.HKEY_LOCAL_MACHINE: "HKLM",
winreg.HKEY_CURRENT_USER: "HKCU"}
def _explain(hkey):
return _KEYS.get(hkey, hkey)
class Registrar(object):
"""COM class registration.
The COM class can override what this does by implementing
_register and/or _unregister class methods. These methods will be
called with the calling instance of Registrar, and so can call the
Registrars _register and _unregister methods which do the actual
work.
"""
def nodebug(self, cls):
"""Delete logging entries from the registry."""
clsid = cls._reg_clsid_
try:
_debug('DeleteKey( %s\\CLSID\\%s\\Logging"' % \
(_explain(winreg.HKEY_CLASSES_ROOT), clsid))
hkey = winreg.OpenKey(winreg.HKEY_CLASSES_ROOT, r"CLSID\%s" % clsid)
winreg.DeleteKey(hkey, "Logging")
except WindowsError as detail:
if get_winerror(detail) != 2:
raise
def debug(self, cls, levels, format):
"""Write entries in the registry to setup logging for this clsid."""
# handlers
# format
clsid = cls._reg_clsid_
_debug('CreateKey( %s\\CLSID\\%s\\Logging"' % \
(_explain(winreg.HKEY_CLASSES_ROOT), clsid))
hkey = winreg.CreateKey(winreg.HKEY_CLASSES_ROOT, r"CLSID\%s\Logging" % clsid)
for item in levels:
name, value = item.split("=")
v = getattr(logging, value)
assert isinstance(v, int)
_debug('SetValueEx(levels, %s)' % levels)
winreg.SetValueEx(hkey, "levels", None, winreg.REG_MULTI_SZ, levels)
if format:
_debug('SetValueEx(format, %s)' % format)
winreg.SetValueEx(hkey, "format", None, winreg.REG_SZ, format)
else:
_debug('DeleteValue(format)')
try:
winreg.DeleteValue(hkey, "format")
except WindowsError as detail:
if get_winerror(detail) != 2:
raise
def register(self, cls, executable=None):
"""Register the COM server class."""
# First, we unregister the object with force=True, to force removal
# of all registry entries, even if we would not write them.
# Second, we create new entries.
# It seems ATL does the same.
mth = getattr(cls, "_register", None)
if mth is not None:
mth(self)
else:
self._unregister(cls, force=True)
self._register(cls, executable)
def _register(self, cls, executable=None):
table = self._registry_entries(cls)
table.sort()
_debug("Registering %s", cls)
for hkey, subkey, valuename, value in table:
_debug ('[%s\\%s]', _explain(hkey), subkey)
_debug('%s="%s"', valuename or "@", value)
k = winreg.CreateKey(hkey, subkey)
winreg.SetValueEx(k, valuename, None, winreg.REG_SZ, str(value))
tlib = getattr(cls, "_reg_typelib_", None)
if tlib is not None:
if hasattr(sys, "frozendllhandle"):
dll = self._get_serverdll()
_debug("LoadTypeLibEx(%s, REGKIND_REGISTER)", dll)
LoadTypeLibEx(dll, REGKIND_REGISTER)
else:
if executable:
path = executable
elif hasattr(sys, "frozen"):
path = sys.executable
else:
path = cls._typelib_path_
_debug("LoadTypeLibEx(%s, REGKIND_REGISTER)", path)
LoadTypeLibEx(path, REGKIND_REGISTER)
_debug("Done")
def unregister(self, cls, force=False):
"""Unregister the COM server class."""
mth = getattr(cls, "_unregister", None)
if mth is not None:
mth(self)
else:
self._unregister(cls, force=force)
def _unregister(self, cls, force=False):
# If force==False, we only remove those entries that we
# actually would have written. It seems ATL does the same.
table = [t[:2] for t in self._registry_entries(cls)]
# only unique entries
table = list(set(table))
table.sort()
table.reverse()
_debug("Unregister %s", cls)
for hkey, subkey in table:
try:
if force:
_debug("SHDeleteKey %s\\%s", _explain(hkey), subkey)
SHDeleteKey(hkey, subkey)
else:
_debug("DeleteKey %s\\%s", _explain(hkey), subkey)
winreg.DeleteKey(hkey, subkey)
except WindowsError as detail:
if get_winerror(detail) != 2:
raise
tlib = getattr(cls, "_reg_typelib_", None)
if tlib is not None:
try:
_debug("UnRegisterTypeLib(%s, %s, %s)", *tlib)
UnRegisterTypeLib(*tlib)
except WindowsError as detail:
if not get_winerror(detail) in (TYPE_E_REGISTRYACCESS, TYPE_E_CANTLOADLIBRARY):
raise
_debug("Done")
def _get_serverdll(self):
"""Return the pathname of the dll hosting the COM object."""
handle = getattr(sys, "frozendllhandle", None)
if handle is not None:
buf = create_string_buffer(260)
windll.kernel32.GetModuleFileNameA(handle, buf, sizeof(buf))
return buf[:]
import _ctypes
return _ctypes.__file__
def _get_full_classname(self, cls):
"""Return <modulename>.<classname> for 'cls'."""
modname = cls.__module__
if modname == "__main__":
modname = os.path.splitext(os.path.basename(sys.argv[0]))[0]
return "%s.%s" % (modname, cls.__name__)
def _get_pythonpath(self, cls):
"""Return the filesystem path of the module containing 'cls'."""
modname = cls.__module__
dirname = os.path.dirname(sys.modules[modname].__file__)
return os.path.abspath(dirname)
def _registry_entries(self, cls):
"""Return a sequence of tuples containing registry entries.
The tuples must be (key, subkey, name, value).
Required entries:
=================
_reg_clsid_ - a string or GUID instance
_reg_clsctx_ - server type(s) to register
Optional entries:
=================
_reg_desc_ - a string
_reg_progid_ - a string naming the progid, typically 'MyServer.MyObject.1'
_reg_novers_progid_ - version independend progid, typically 'MyServer.MyObject'
_reg_typelib_ - an tuple (libid, majorversion, minorversion) specifying a typelib.
_reg_threading_ - a string specifying the threading model
Note that the first part of the progid string is typically the
IDL library name of the type library containing the coclass.
"""
HKCR = winreg.HKEY_CLASSES_ROOT
# table format: rootkey, subkey, valuename, value
table = []
append = lambda *args: table.append(args)
# basic entry - names the comobject
reg_clsid = str(cls._reg_clsid_) # that's the only required attribute for registration
reg_desc = getattr(cls, "_reg_desc_", "")
if not reg_desc:
# Simple minded algorithm to construct a description from
# the progid:
reg_desc = getattr(cls, "_reg_novers_progid_", "") or \
getattr(cls, "_reg_progid_", "")
if reg_desc:
reg_desc = reg_desc.replace(".", " ")
append(HKCR, "CLSID\\%s" % reg_clsid, "", reg_desc)
reg_progid = getattr(cls, "_reg_progid_", None)
if reg_progid:
# for ProgIDFromCLSID:
append(HKCR, "CLSID\\%s\\ProgID" % reg_clsid, "", reg_progid) # 1
# for CLSIDFromProgID
if reg_desc:
append(HKCR, reg_progid, "", reg_desc) # 2
append(HKCR, "%s\\CLSID" % reg_progid, "", reg_clsid) # 3
reg_novers_progid = getattr(cls, "_reg_novers_progid_", None)
if reg_novers_progid:
append(HKCR, "CLSID\\%s\\VersionIndependentProgID" % reg_clsid, # 1a
"", reg_novers_progid)
if reg_desc:
append(HKCR, reg_novers_progid, "", reg_desc) # 2a
append(HKCR, "%s\\CurVer" % reg_novers_progid, "", reg_progid) #
append(HKCR, "%s\\CLSID" % reg_novers_progid, "", reg_clsid) # 3a
clsctx = getattr(cls, "_reg_clsctx_", 0)
if clsctx & comtypes.CLSCTX_LOCAL_SERVER \
and not hasattr(sys, "frozendllhandle"):
exe = sys.executable
if " " in exe:
exe = '"%s"' % exe
if not hasattr(sys, "frozen"):
if not __debug__:
exe = "%s -O" % exe
script = os.path.abspath(sys.modules[cls.__module__].__file__)
if " " in script:
script = '"%s"' % script
append(HKCR, "CLSID\\%s\\LocalServer32" % reg_clsid, "", "%s %s" % (exe, script))
else:
append(HKCR, "CLSID\\%s\\LocalServer32" % reg_clsid, "", "%s" % exe)
# Register InprocServer32 only when run from script or from
# py2exe dll server, not from py2exe exe server.
if clsctx & comtypes.CLSCTX_INPROC_SERVER \
and getattr(sys, "frozen", None) in (None, "dll"):
append(HKCR, "CLSID\\%s\\InprocServer32" % reg_clsid,
"", self._get_serverdll())
# only for non-frozen inproc servers the PythonPath/PythonClass is needed.
if not hasattr(sys, "frozendllhandle") \
or not comtypes.server.inprocserver._clsid_to_class:
append(HKCR, "CLSID\\%s\\InprocServer32" % reg_clsid,
"PythonClass", self._get_full_classname(cls))
append(HKCR, "CLSID\\%s\\InprocServer32" % reg_clsid,
"PythonPath", self._get_pythonpath(cls))
reg_threading = getattr(cls, "_reg_threading_", None)
if reg_threading is not None:
append(HKCR, "CLSID\\%s\\InprocServer32" % reg_clsid,
"ThreadingModel", reg_threading)
reg_tlib = getattr(cls, "_reg_typelib_", None)
if reg_tlib is not None:
append(HKCR, "CLSID\\%s\\Typelib" % reg_clsid, "", reg_tlib[0])
return table
################################################################
def register(cls):
Registrar().register(cls)
def unregister(cls):
Registrar().unregister(cls)
def UseCommandLine(*classes):
usage = """Usage: %s [-regserver] [-unregserver] [-nodebug] [-f logformat] [-l loggername=level]""" % sys.argv[0]
opts, args = w_getopt.w_getopt(sys.argv[1:],
"regserver unregserver embedding l: f: nodebug")
if not opts:
sys.stderr.write(usage + "\n")
return 0 # nothing for us to do
levels = []
format = None
nodebug = False
runit = False
for option, value in opts:
if option == "regserver":
for cls in classes:
register(cls)
elif option == "unregserver":
for cls in classes:
unregister(cls)
elif option == "embedding":
runit = True
elif option == "f":
format = value
elif option == "l":
levels.append(value)
elif option == "nodebug":
nodebug = True
if levels or format is not None:
for cls in classes:
Registrar().debug(cls, levels, format)
if nodebug:
for cls in classes:
Registrar().nodebug(cls)
if runit:
import comtypes.server.localserver
comtypes.server.localserver.run(classes)
return 1 # we have done something
if __name__ == "__main__":
UseCommandLine()
| 37.582447
| 117
| 0.594792
|
ff87a5ed8957bf4b99be832a67dedb25dda29729
| 7,872
|
py
|
Python
|
rl-ros-agents/rl_ros_agents/env_wappers/arena2dEnv.py
|
Sirupli/arena2D
|
2214754fe8e9358fa8065be5187d73104949dc4f
|
[
"MIT"
] | null | null | null |
rl-ros-agents/rl_ros_agents/env_wappers/arena2dEnv.py
|
Sirupli/arena2D
|
2214754fe8e9358fa8065be5187d73104949dc4f
|
[
"MIT"
] | 2
|
2020-09-28T17:29:09.000Z
|
2020-10-26T14:48:57.000Z
|
rl-ros-agents/rl_ros_agents/env_wappers/arena2dEnv.py
|
Sirupli/arena2D
|
2214754fe8e9358fa8065be5187d73104949dc4f
|
[
"MIT"
] | null | null | null |
from std_msgs.msg import String
import gym
import rospy
from geometry_msgs.msg import Twist
from arena2d_msgs.msg import RosAgentReq, Arena2dResp
from stable_baselines.common.vec_env import SubprocVecEnv
from stable_baselines.bench import Monitor
import numpy as np
import threading
from typing import Union, List, Tuple
import time
from gym import spaces
import os
# namespace of arena settings
NS_SETTING = "/arena_sim/settings/"
def get_arena_envs(use_monitor=True, log_dir=None):
# the num_envs should be set in the filt settings.st under the folder of arena2d-sim
num_envs = rospy.get_param(NS_SETTING + "num_envs")
if log_dir is None:
logs_file_names = [None] * num_envs
else:
logs_file_names = [os.path.join(log_dir, f"arena_env_{i}") for i in range(num_envs)]
if use_monitor:
return SubprocVecEnv([lambda i=i: Monitor(Arena2dEnvWrapper(i), logs_file_names[i]) for i in range(num_envs)])
return SubprocVecEnv([lambda i=i: Arena2dEnvWrapper(i) for i in range(num_envs)])
class Arena2dEnvWrapper(gym.Env):
def __init__(self, idx_env, is_action_space_discrete=False):
super().__init__()
self._idx_env = idx_env
self._is_action_space_discrete = is_action_space_discrete
self._action_discrete_list = ["forward", "left", "right", "strong_left", "strong_right", "backward", "stop"]
self._action_discrete_map = {
"forward": [0.2, 0],
"left": [0.15, 0.75],
"right": [0.15, -0.75],
"strong_left": [0, 1.5],
"strong_right": [0, -1.5],
"backward": [-0.1, 0],
"stop": [0, 0]
}
rospy.init_node("arena_ros_agent_env_{:02d}".format(idx_env), anonymous=True, log_level=rospy.INFO)
self._setSubPub()
# we use this to let main thread know the response is received which is done by another thread
self.response_con = threading.Condition()
self.resp_received = False
# the following variables will be set on invoking the _arena2dRespCallback
self.obs = None
self.reward = None
self.done = None
self.info = None
self._set_action_oberservation_space()
def _set_action_oberservation_space(self):
action_space_lower_limit = rospy.get_param(NS_SETTING + "action_space_lower_limit")
action_space_upper_limit = rospy.get_param(NS_SETTING + "action_space_upper_limit")
num_beam = rospy.get_param(NS_SETTING + "observation_space_num_beam")
obervation_space_upper_limit = rospy.get_param(NS_SETTING + "observation_space_upper_limit")
if not self._is_action_space_discrete:
self.action_space = spaces.Box(low=np.array(action_space_lower_limit),
high=np.array(action_space_upper_limit) * 3, dtype=np.float)
else:
self.action_space = spaces.Discrete(len(self._action_discrete_list))
self.observation_space = spaces.Box(low=0, high=obervation_space_upper_limit,
shape=(1, num_beam+2), dtype=np.float)
def step(self, action):
rospy.logdebug("step in")
self._pubRosAgentReq(action, env_reset=False)
# get the observations from the arena simulater
with self.response_con:
while not self.resp_received:
self.response_con.wait(0.5)
if not self.resp_received:
rospy.logerr(
f"Environement wrapper [{self._idx_env}] didn't get the feedback within 0.5s from arena simulator after sending action")
break
self.resp_received = False
rospy.logdebug("step out")
return self.obs, self.reward, self.done, self.info
def reset(self):
self._pubRosAgentReq(env_reset=True)
error_showed = False
with self.response_con:
while not self.resp_received:
self.response_con.wait(0.5)
if not self.resp_received:
rospy.logerr(
f"Environement wrapper [{self._idx_env}] didn't get the feedback within 0.5s from arena simulator after sending reset command")
break
self.resp_received = False
return self.obs
def close(self):
rospy.loginfo(f"env[{self._idx_env}] closed")
self._pubRosAgentReq(env_close=True)
def _setSubPub(self):
namespace = "arena2d/env_{:d}/".format(self._idx_env)
# publisher
self._ros_agent_pub = rospy.Publisher(namespace + "request", RosAgentReq, queue_size=1, tcp_nodelay=True)
rospy.loginfo("env[{:d}] wrapper waiting for arena-2d simulator to connect!".format(self._idx_env))
times = 0
# subscriber
# According to the testing,enable tcp_nodelay can double the performance
self._arena2d_sub = rospy.Subscriber(namespace + "response", Arena2dResp,
self._arena2dRespCallback, tcp_nodelay=True)
# # give rospy enough time to establish the connection, without this procedure, the message to
# # be published at the beginning could be lost.
while self._ros_agent_pub.get_num_connections() == 0 or self._arena2d_sub.get_num_connections() == 0:
time.sleep(0.1)
times += 1
rospy.loginfo("env[{:d}] connected with arena-2d simulator, took {:3.1f}s.".format(self._idx_env, .1 * times))
# time.sleep(1)
def _pubRosAgentReq(self, action: Union[List, Tuple, RosAgentReq] = None, env_reset: bool = False, env_close: bool = False):
req_msg = RosAgentReq()
if env_close:
req_msg.arena2d_sim_close = True
# reset environment
elif env_reset:
req_msg.env_reset = True
else:
req_msg.env_reset = False
if not self._is_action_space_discrete:
assert isinstance(action, (list, tuple, np.ndarray)) and len(
action) == 2, "Type of action must be one of (list, tuple, numpy.ndarray) and length is equal to 2, current type of action is '{:4d}' ".format(type(action))
req_msg.action.linear = action[0]
req_msg.action.angular = action[1]
else:
action_name = self._action_discrete_list[action]
req_msg.action.linear = self._action_discrete_map[action_name][0]
req_msg.action.angular = self._action_discrete_map[action_name][1]
self._ros_agent_pub.publish(req_msg)
rospy.logdebug("send action")
def _arena2dRespCallback(self, resp: Arena2dResp):
rospy.logdebug("received response")
with self.response_con:
obs = resp.observation.ranges
goal_distance_angle = resp.goal_pos
# in current settings the observation not only contains laser scan but also contains the relative distance and angle to goal position.
self.obs = np.array(obs + goal_distance_angle).reshape([1, -1])
# print("obs:"+obs.__str__()+" gda: "+goal_distance_angle.__str__())
self.reward = resp.reward
self.done = resp.done
self.info = dict(mean_reward=resp.mean_reward, mean_success=resp.mean_success)
self.resp_received = True
self.response_con.notify()
if __name__ == "__main__":
# comment out rospy.init_node in the class Arena2dEnv for the test!!!!!!!!!
rospy.init_node("test")
def test_step(idx_env):
env = Arena2dEnvWrapper(idx_env)
action = [1, 0]
_, reward, _, _ = env.step(action)
# env.reset()
print("env: {:d} reward {}".format(idx_env, reward))
for i in range(4):
t = threading.Thread(target=test_step, args=(i,))
t.start()
| 44.474576
| 176
| 0.638338
|
a1ca8d56de4b84c0c6fa99292f1d45827a2b3576
| 2,301
|
py
|
Python
|
docs/conf.py
|
proyecto-cema/cema-django
|
46835121bc93bf2e8a0b82c9fb375c1ec1c81933
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
proyecto-cema/cema-django
|
46835121bc93bf2e8a0b82c9fb375c1ec1c81933
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
proyecto-cema/cema-django
|
46835121bc93bf2e8a0b82c9fb375c1ec1c81933
|
[
"MIT"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
import django
if os.getenv("READTHEDOCS", default=False) == "True":
sys.path.insert(0, os.path.abspath(".."))
os.environ["DJANGO_READ_DOT_ENV_FILE"] = "True"
os.environ["USE_DOCKER"] = "no"
else:
sys.path.insert(0, os.path.abspath("/app"))
os.environ["DATABASE_URL"] = "sqlite:///readthedocs.db"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
django.setup()
# -- Project information -----------------------------------------------------
project = "Como Esta Mi Animal"
copyright = """2021, Francisco de Maussion"""
author = " Francisco de Maussion"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
| 36.52381
| 79
| 0.66797
|
e41d483d5d98634804cbe3f4ffb8922ef7bcf0d9
| 5,050
|
py
|
Python
|
yolo.py
|
datng-dev/Vehicle-Counting
|
f5438767237629d92da7203144f93f380bd38a25
|
[
"MIT"
] | 1
|
2022-01-31T12:48:51.000Z
|
2022-01-31T12:48:51.000Z
|
yolo.py
|
datng-dev/Vehicle-Counting
|
f5438767237629d92da7203144f93f380bd38a25
|
[
"MIT"
] | null | null | null |
yolo.py
|
datng-dev/Vehicle-Counting
|
f5438767237629d92da7203144f93f380bd38a25
|
[
"MIT"
] | 1
|
2022-01-28T21:28:23.000Z
|
2022-01-28T21:28:23.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Run a YOLO_v3 style detection model on test images.
"""
import colorsys
import os
import random
from timeit import time
from timeit import default_timer as timer ### to calculate FPS
import numpy as np
from keras import backend as K
from keras.models import load_model
from PIL import Image, ImageFont, ImageDraw
from yolo3.model import yolo_eval
from yolo3.utils import letterbox_image
class YOLO(object):
def __init__(self):
self.model_path = 'model_data/yolo.h5'
self.anchors_path = 'model_data/yolo_anchors.txt'
self.classes_path = 'model_data/coco_classes.txt'
self.score = 0.5
self.iou = 0.5
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.model_image_size = (416, 416) # fixed size or (None, None)
self.is_fixed_size = self.model_image_size != (None, None)
self.boxes, self.scores, self.classes = self.generate()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
anchors = np.array(anchors).reshape(-1, 2)
return anchors
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
self.yolo_model = load_model(model_path, compile=False)
print('{} model, anchors, and classes loaded.'.format(model_path))
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
random.seed(10101) # Fixed seed for consistent colors across runs.
random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
self.input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image):
if self.is_fixed_size:
assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
#print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
return_boxs = []
return_class_name =[]
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = self.class_names[c]
list3 = ["motorbike","car", "truck","bus"]
for pp in list3:
if predicted_class != pp :
continue
box = out_boxes[i]
if pp == 'motorbike':
c= int(1)
elif pp == 'car':
c= int(2)
elif pp == 'truck':
c= int(3)
else:
c= int(4)
score = out_scores[i]
x = int(box[1])
y = int(box[0])
w = int(box[3]-box[1])
h = int(box[2]-box[0])
if x < 0 :
w = w + x
x = 0
if y < 0 :
h = h + y
y = 0
return_boxs.append([x,y,w,h,c, score])
# return_class_name.append([predicted_class])
return return_boxs
def close_session(self):
self.sess.close()
| 37.686567
| 88
| 0.571089
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.