code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for LatestBlessedModelStrategy."""
import tensorflow as tf
from tfx import types
from tfx.components.model_validator import constants as model_validator
from tfx.dsl.input_resolution.strategies import latest_blessed_model_strategy
from tfx.orchestration import metadata
from tfx.types import standard_artifacts
from tfx.utils import test_case_utils
from ml_metadata.proto import metadata_store_pb2
class LatestBlessedModelStrategyTest(test_case_utils.TfxTest):
def setUp(self):
super().setUp()
self._connection_config = metadata_store_pb2.ConnectionConfig()
self._connection_config.sqlite.SetInParent()
self._metadata = self.enter_context(
metadata.Metadata(connection_config=self._connection_config))
self._store = self._metadata.store
def _set_model_blessing_bit(self, artifact: types.Artifact, model_id: int,
is_blessed: int):
artifact.mlmd_artifact.custom_properties[
model_validator.ARTIFACT_PROPERTY_BLESSED_KEY].int_value = is_blessed
artifact.mlmd_artifact.custom_properties[
model_validator
.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY].int_value = model_id
def testStrategy(self):
# Model with id 1, will be blessed.
model_one = standard_artifacts.Model()
model_one.uri = 'model_one'
model_one.id = 1
# Model with id 2, will be blessed.
model_two = standard_artifacts.Model()
model_two.uri = 'model_two'
model_two.id = 2
# Model with id 3, will not be blessed.
model_three = standard_artifacts.Model()
model_three.uri = 'model_three'
model_three.id = 3
model_blessing_one = standard_artifacts.ModelBlessing()
self._set_model_blessing_bit(model_blessing_one, model_one.id, 1)
model_blessing_two = standard_artifacts.ModelBlessing()
self._set_model_blessing_bit(model_blessing_two, model_two.id, 1)
strategy = latest_blessed_model_strategy.LatestBlessedModelStrategy()
result = strategy.resolve_artifacts(
self._store, {
'model': [model_one, model_two, model_three],
'model_blessing': [model_blessing_one, model_blessing_two]
})
self.assertIsNotNone(result)
self.assertEqual([a.uri for a in result['model']], ['model_two'])
if __name__ == '__main__':
tf.test.main()
|
tensorflow/tfx
|
tfx/dsl/input_resolution/strategies/latest_blessed_model_strategy_test.py
|
Python
|
apache-2.0
| 2,902
|
"""One-time script for extracting all the cat and dog images from CIFAR-10."""
import cPickle
import numpy as np
from PIL import Image
TRAIN_FILES = ['cifar-10-batches-py/data_batch_%d' % i for i in range(1,6)]
TEST_FILE = 'test_batch'
CAT_INPUT_LABEL = 3
DOG_INPUT_LABEL = 5
CAT_OUTPUT_LABEL = 1
DOG_OUTPUT_LABEL = 0
def unpickle(file):
with open(file, 'rb') as fo:
dict = cPickle.load(fo)
return dict
data = []
# Count number of cats/dogs
num_cats = 0
num_dogs = 0
for data_file in TRAIN_FILES:
d = unpickle(data_file)
data.append(d)
for label in d['labels']:
if label == CAT_INPUT_LABEL:
num_cats += 1
if label == DOG_INPUT_LABEL:
num_dogs += 1
# Copy the cats/dogs into new array
images = np.empty((num_cats + num_dogs, 32, 32, 3), dtype=np.uint8)
labels = np.empty((num_cats + num_dogs), dtype=np.uint8)
index = 0
for data_batch in data:
for batch_index, label in enumerate(data_batch['labels']):
if label == CAT_INPUT_LABEL or label == DOG_INPUT_LABEL:
# Data is stored in B x 3072 format, convert to B' x 32 x 32 x 3
images[index, :, :, :] = np.transpose(
np.reshape(data_batch['data'][batch_index, :],
newshape=(3, 32, 32)),
axes=(1, 2, 0))
if label == CAT_INPUT_LABEL:
labels[index] = CAT_OUTPUT_LABEL
else:
labels[index] = DOG_OUTPUT_LABEL
index += 1
np.save('catdog_data.npy', {'images': images, 'labels': labels})
# Make sure images look correct
img = Image.fromarray(images[10, :, :, :])
img.show()
|
random-forests/tensorflow-workshop
|
archive/extras/cat_dog_estimator/extract_cats_dogs.py
|
Python
|
apache-2.0
| 1,545
|
from copy import deepcopy
from datetime import datetime
from datetime import timezone
from email.utils import mktime_tz
from email.utils import parsedate_tz
from io import BytesIO
from itertools import chain
from mimetypes import guess_type
from typing import Callable
from typing import Iterable
from typing import List
from typing import Optional
from typing import Tuple
from bs4 import BeautifulSoup
from PIL import Image
from pyzmail import PyzMessage
from pyzmail.parse import MailPart
from requests import Response
from requests import get as http_get
from opwen_email_server.config import MAX_HEIGHT_IMAGES
from opwen_email_server.config import MAX_WIDTH_IMAGES
from opwen_email_server.utils.log import LogMixin
from opwen_email_server.utils.serialization import to_base64
def _parse_body(message: PyzMessage, default_charset: str = 'ascii') -> str:
body_parts = (message.html_part, message.text_part)
for part in body_parts:
if part is None:
continue
payload = part.get_payload()
if payload is None:
continue
charset = part.charset or default_charset
return payload.decode(charset, errors='replace')
return ''
def _parse_attachments(mailparts: Iterable[MailPart]) -> Iterable[dict]:
attachment_parts = (part for part in mailparts if not part.is_body)
for part in attachment_parts:
filename = part.sanitized_filename
payload = part.get_payload()
attachment_id = part.content_id
if filename and payload:
attachment = {'filename': filename, 'content': payload}
if attachment_id:
attachment['cid'] = attachment_id
yield attachment
def _parse_addresses(message: PyzMessage, address_type: str) -> List[str]:
return sorted(email for _, email in message.get_addresses(address_type) if email)
def _parse_address(message: PyzMessage, address_type: str) -> Optional[str]:
return next(iter(_parse_addresses(message, address_type)), None)
def _parse_sent_at(message: PyzMessage) -> Optional[str]:
rfc_822 = message.get_decoded_header('date')
if not rfc_822:
return None
date_tz = parsedate_tz(rfc_822)
if not date_tz:
return None
timestamp = mktime_tz(date_tz)
# noinspection PyUnresolvedReferences
date_utc = datetime.fromtimestamp(timestamp, timezone.utc)
return date_utc.strftime('%Y-%m-%d %H:%M')
def parse_mime_email(mime_email: str) -> dict:
message = PyzMessage.factory(mime_email)
return {
'sent_at': _parse_sent_at(message),
'to': _parse_addresses(message, 'to'),
'cc': _parse_addresses(message, 'cc'),
'bcc': _parse_addresses(message, 'bcc'),
'from': _parse_address(message, 'from'),
'subject': message.get_subject(),
'body': _parse_body(message),
'attachments': list(_parse_attachments(message.mailparts)),
}
def format_attachments(email: dict) -> dict:
attachments = email.get('attachments', [])
if not attachments:
return email
formatted_attachments = deepcopy(attachments)
is_any_attachment_changed = False
for i, attachment in enumerate(attachments):
filename = attachment.get('filename', '')
content = attachment.get('content', b'')
formatted_content = _format_attachment(filename, content)
if content != formatted_content:
formatted_attachments[i]['content'] = formatted_content
is_any_attachment_changed = True
if not is_any_attachment_changed:
return email
new_email = dict(email)
new_email['attachments'] = formatted_attachments
return new_email
def _format_attachment(filename: str, content: bytes) -> bytes:
attachment_type = guess_type(filename)[0]
if not attachment_type:
return content
if 'image' in attachment_type.lower():
content = _change_image_size(content)
return content
def get_recipients(email: dict) -> Iterable[str]:
return chain(email.get('to') or [], email.get('cc') or [], email.get('bcc') or [])
def get_domains(email: dict) -> Iterable[str]:
return frozenset(get_domain(address) for address in get_recipients(email))
def get_domain(address: str) -> str:
return address.split('@')[-1]
def ensure_has_sent_at(email: dict):
if not email.get('sent_at'):
email['sent_at'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M')
def _get_image_type(response: Response, url: str) -> Optional[str]:
content_type = response.headers.get('Content-Type')
if not content_type:
content_type = guess_type(url)[0]
return content_type
def _is_already_small(size: Tuple[int, int]) -> bool:
width, height = size
return width <= MAX_WIDTH_IMAGES and height <= MAX_HEIGHT_IMAGES
def _change_image_size(image_content_bytes: bytes) -> bytes:
image_bytes = BytesIO(image_content_bytes)
image_bytes.seek(0)
image = Image.open(image_bytes)
if _is_already_small(image.size):
return image_content_bytes
new_size = (MAX_WIDTH_IMAGES, MAX_HEIGHT_IMAGES)
image.thumbnail(new_size, Image.ANTIALIAS)
new_image = BytesIO()
image.save(new_image, image.format)
new_image.seek(0)
new_image_bytes = new_image.read()
return new_image_bytes
def _fetch_image_to_base64(image_url: str) -> Optional[str]:
response = http_get(image_url)
if not response.ok:
return None
image_type = _get_image_type(response, image_url)
if not image_type:
return None
if not response.content:
return None
small_image_bytes = _change_image_size(response.content)
small_image_base64 = to_base64(small_image_bytes)
return f'data:{image_type};base64,{small_image_base64}'
def _is_valid_url(url: Optional[str]) -> bool:
if not url:
return False
has_http_prefix = url.startswith('http://')
has_https_prefix = url.startswith('https://')
return has_http_prefix or has_https_prefix
def format_inline_images(email: dict, on_error: Callable) -> dict:
email_body = email.get('body', '')
if not email_body:
return email
soup = BeautifulSoup(email_body, 'html.parser')
image_tags = soup.find_all('img')
if not image_tags:
return email
for image_tag in image_tags:
image_url = image_tag.get('src')
if not _is_valid_url(image_url):
continue
try:
encoded_image = _fetch_image_to_base64(image_url)
except Exception as ex:
on_error('Unable to inline image %s: %s', image_url, ex)
else:
if encoded_image:
image_tag['src'] = encoded_image
new_email = dict(email)
new_email['body'] = str(soup)
return new_email
class MimeEmailParser(LogMixin):
def __call__(self, mime_email: str) -> dict:
email = parse_mime_email(mime_email)
email = format_attachments(email)
email = format_inline_images(email, self.log_warning)
return email
|
ascoderu/opwen-cloudserver
|
opwen_email_server/utils/email_parser.py
|
Python
|
apache-2.0
| 7,047
|
# -*- coding: utf-8 -*-
# Copyright (C) 2013 Rackspace Hosting Inc. All Rights Reserved.
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from taskflow import exceptions
from taskflow.utils import misc
from taskflow.utils import reflection
LOG = logging.getLogger(__name__)
def _save_as_to_mapping(save_as):
"""Convert save_as to mapping name => index.
Result should follow storage convention for mappings.
"""
# TODO(harlowja): we should probably document this behavior & convention
# outside of code so that its more easily understandable, since what an
# atom returns is pretty crucial for other later operations.
if save_as is None:
return {}
if isinstance(save_as, six.string_types):
# NOTE(harlowja): this means that your atom will only return one item
# instead of a dictionary-like object or a indexable object (like a
# list or tuple).
return {save_as: None}
elif isinstance(save_as, (tuple, list)):
# NOTE(harlowja): this means that your atom will return a indexable
# object, like a list or tuple and the results can be mapped by index
# to that tuple/list that is returned for others to use.
return dict((key, num) for num, key in enumerate(save_as))
elif isinstance(save_as, set):
# NOTE(harlowja): in the case where a set is given we will not be
# able to determine the numeric ordering in a reliable way (since it is
# a unordered set) so the only way for us to easily map the result of
# the atom will be via the key itself.
return dict((key, key) for key in save_as)
raise TypeError('Task provides parameter '
'should be str, set or tuple/list, not %r' % save_as)
def _build_rebind_dict(args, rebind_args):
"""Build a argument remapping/rebinding dictionary.
This dictionary allows an atom to declare that it will take a needed
requirement bound to a given name with another name instead (mapping the
new name onto the required name).
"""
if rebind_args is None:
return {}
elif isinstance(rebind_args, (list, tuple)):
rebind = dict(zip(args, rebind_args))
if len(args) < len(rebind_args):
rebind.update((a, a) for a in rebind_args[len(args):])
return rebind
elif isinstance(rebind_args, dict):
return rebind_args
else:
raise TypeError('Invalid rebind value: %s' % rebind_args)
def _build_arg_mapping(task_name, reqs, rebind_args, function, do_infer):
"""Given a function, its requirements and a rebind mapping this helper
function will build the correct argument mapping for the given function as
well as verify that the final argument mapping does not have missing or
extra arguments (where applicable).
"""
task_args = reflection.get_callable_args(function, required_only=True)
result = {}
if reqs:
result.update((a, a) for a in reqs)
if do_infer:
result.update((a, a) for a in task_args)
result.update(_build_rebind_dict(task_args, rebind_args))
if not reflection.accepts_kwargs(function):
all_args = reflection.get_callable_args(function, required_only=False)
extra_args = set(result) - set(all_args)
if extra_args:
extra_args_str = ', '.join(sorted(extra_args))
raise ValueError('Extra arguments given to task %s: %s'
% (task_name, extra_args_str))
# NOTE(imelnikov): don't use set to preserve order in error message
missing_args = [arg for arg in task_args if arg not in result]
if missing_args:
raise ValueError('Missing arguments for task %s: %s'
% (task_name, ' ,'.join(missing_args)))
return result
class Atom(object):
"""An abstract flow atom that causes a flow to progress (in some manner).
An atom is a named object that operates with input flow data to perform
some action that furthers the overall flows progress. It usually also
produces some of its own named output as a result of this process.
"""
def __init__(self, name=None, provides=None):
self._name = name
# An *immutable* output 'resource' name dict this atom
# produces that other atoms may depend on this atom providing.
#
# Format is output index:arg_name
self.save_as = _save_as_to_mapping(provides)
# This identifies the version of the atom to be ran which
# can be useful in resuming older versions of atoms. Standard
# major, minor version semantics apply.
self.version = (1, 0)
def _build_arg_mapping(self, executor, requires=None, rebind=None,
auto_extract=True):
self.rebind = _build_arg_mapping(self.name, requires, rebind,
executor, auto_extract)
out_of_order = self.provides.intersection(self.requires)
if out_of_order:
raise exceptions.InvariantViolation(
"Atom %(item)s provides %(oo)s that are required "
"by this atom"
% dict(item=self.name, oo=sorted(out_of_order)))
@property
def name(self):
return self._name
def __str__(self):
return "%s==%s" % (self.name, misc.get_version_string(self))
@property
def provides(self):
"""Any outputs this atom produces."""
return set(self.save_as)
@property
def requires(self):
"""Any inputs this atom requires to execute."""
return set(self.rebind.values())
|
citrix-openstack-build/taskflow
|
taskflow/atom.py
|
Python
|
apache-2.0
| 6,234
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from nova import exception
from nova import test
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import vm_util
class fake_session(object):
def __init__(self, ret=None):
self.ret = ret
def _call_method(self, *args):
return self.ret
class VMwareVMUtilTestCase(test.TestCase):
def setUp(self):
super(VMwareVMUtilTestCase, self).setUp()
fake.reset()
def tearDown(self):
super(VMwareVMUtilTestCase, self).tearDown()
fake.reset()
def test_get_datastore_ref_and_name(self):
result = vm_util.get_datastore_ref_and_name(
fake_session([fake.Datastore()]))
self.assertEquals(result[1], "fake-ds")
self.assertEquals(result[2], 1024 * 1024 * 1024 * 1024)
self.assertEquals(result[3], 1024 * 1024 * 500 * 1024)
def test_get_datastore_ref_and_name_without_datastore(self):
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(), host="fake-host")
self.assertRaises(exception.DatastoreNotFound,
vm_util.get_datastore_ref_and_name,
fake_session(), cluster="fake-cluster")
def test_get_host_ref_from_id(self):
fake_host_sys = fake.HostSystem(
fake.ManagedObjectReference("HostSystem", "host-123"))
fake_host_id = fake_host_sys.obj.value
fake_host_name = "ha-host"
ref = vm_util.get_host_ref_from_id(
fake_session([fake_host_sys]), fake_host_id, ['name'])
self.assertIsInstance(ref, fake.HostSystem)
self.assertEqual(fake_host_id, ref.obj.value)
host_name = vm_util.get_host_name_from_host_ref(ref)
self.assertEquals(fake_host_name, host_name)
def test_get_host_name_for_vm(self):
fake_vm = fake.ManagedObject(
"VirtualMachine", fake.ManagedObjectReference(
"vm-123", "VirtualMachine"))
fake_vm.propSet.append(
fake.Property('name', 'vm-123'))
vm_ref = vm_util.get_vm_ref_from_name(
fake_session([fake_vm]), 'vm-123')
self.assertIsNotNone(vm_ref)
fake_results = [
fake.ObjectContent(
None, [
fake.Property('runtime.host',
fake.ManagedObjectReference(
'host-123', 'HostSystem'))
])]
host_id = vm_util.get_host_id_from_vm_ref(
fake_session(fake_results), vm_ref)
self.assertEqual('host-123', host_id)
def test_property_from_property_set(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
MoRef = collections.namedtuple('Val', ['value'])
results_good = [
ObjectContent(propSet=[
DynamicProperty(name='name', val=MoRef(value='vm-123'))]),
ObjectContent(propSet=[
DynamicProperty(name='foo', val=MoRef(value='bar1')),
DynamicProperty(
name='runtime.host', val=MoRef(value='host-123')),
DynamicProperty(name='foo', val=MoRef(value='bar2')),
]),
ObjectContent(propSet=[
DynamicProperty(
name='something', val=MoRef(value='thing'))]), ]
results_bad = [
ObjectContent(propSet=[
DynamicProperty(name='name', val=MoRef(value='vm-123'))]),
ObjectContent(propSet=[
DynamicProperty(name='foo', val='bar1'),
DynamicProperty(name='foo', val='bar2'), ]),
ObjectContent(propSet=[
DynamicProperty(
name='something', val=MoRef(value='thing'))]), ]
prop = vm_util.property_from_property_set(
'runtime.host', results_good)
self.assertIsNotNone(prop)
value = prop.val.value
self.assertEqual('host-123', value)
prop2 = vm_util.property_from_property_set(
'runtime.host', results_bad)
self.assertIsNone(prop2)
prop3 = vm_util.property_from_property_set('foo', results_good)
self.assertIsNotNone(prop3)
val3 = prop3.val.value
self.assertEqual('bar1', val3)
prop4 = vm_util.property_from_property_set('foo', results_bad)
self.assertIsNotNone(prop4)
self.assertEqual('bar1', prop4.val)
|
yrobla/nova
|
nova/tests/test_vmwareapi_vm_util.py
|
Python
|
apache-2.0
| 5,302
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import requests
from platformio import exception, util
def test_platformio_cli():
result = util.exec_command(["pio", "--help"])
assert result["returncode"] == 0
assert "Usage: pio [OPTIONS] COMMAND [ARGS]..." in result["out"]
def test_ping_internet_ips():
for host in util.PING_REMOTE_HOSTS:
requests.get("http://%s" % host, allow_redirects=False, timeout=2)
def test_api_internet_offline(without_internet, isolated_pio_home):
with pytest.raises(exception.InternetIsOffline):
util.get_api_result("/stats")
def test_api_cache(monkeypatch, isolated_pio_home):
api_kwargs = {"url": "/stats", "cache_valid": "10s"}
result = util.get_api_result(**api_kwargs)
assert result and "boards" in result
monkeypatch.setattr(util, "_internet_on", lambda: False)
assert util.get_api_result(**api_kwargs) == result
|
platformio/platformio
|
tests/test_misc.py
|
Python
|
apache-2.0
| 1,489
|
../common/cgi_runtests.py
|
ankurjimmy/catawampus
|
tr/vendor/tornado/maint/appengine/py27/cgi_runtests.py
|
Python
|
apache-2.0
| 25
|
from PIL import Image
import os.path,os
#import pickle
#import sqlite3
import hashlib
import time
import random
import logging
import copy
import threading
import itertools
from math import ceil
from enum import Enum
from copy import deepcopy
import itertools
from lipyc.utility import recursion_protect
from lipyc.Version import Versionned
from lipyc.config import *
from lipyc.utility import check_ext, make_thumbnail
from tkinter import messagebox
class Album(Versionned): #subalbums not fully implemented
def __init__(self, id, scheduler, name=None, datetime=None):
super().__init__()
self.scheduler = scheduler
self.id = id
self.name = name
self.datetime = datetime if datetime else time.mktime(time.gmtime())
self.subalbums = set()
self.thumbnail = None
self.files = set() #order by id
self.inner_keys = [] #use for inner albums
def __deepcopy__(self, memo):
new = Album(self.id, self.scheduler, self.name, self.datetime)
new.subalbums = deepcopy(self.subalbums)
new.thumbnail = deepcopy(self.thumbnail)
new.files = deepcopy(self.files)
new.inner_keys = deepcopy(self.inner_keys)
return new
#for copy_to,add_to,move_to
def clone(self, new_id):
alb = self.__deepcopy__(None)
alb.inner_keys.clear()
alb.id = new_id
return alb
def pseudo_clone(self):
new = Album(self.id, self.scheduler, self.name, self.datetime)
if self.thumbnail:
self.scheduler.duplicate(self.thumbnail)
new.subalbums = self.subalbums
new.thumbnail = self.thumbnail
new.files = self.files
return new
def sql(self):
return (self.id, self.name, self.datetime,
'|'.join( [ str(alb.id) for alb in self.subalbums] ), self.thumbnail,
'|'.join( [ str(afile.id) for afile in self.files] ),
'|'.join(self.inner_keys) )
def rename(self, name):
self.name = name
def add_file(self, _file):
self.files.add(_file)
if self.thumbnail == None and _file.thumbnail :
self.thumbnail = self.scheduler.duplicate_file( _file.thumbnail )
def remove_file(self, _file):
self.files.discard(_file)
@recursion_protect()
def remove_all(self):
for album in list(self.subalbums):
album.remove_all()
self.subalbums.clear()
for _file in list(self.files):
self.remove_file(_file)
self.files.clear()
def add_subalbum(self, album):
self.subalbums.add( album )
def remove_subalbum(self, album):
if album in self.subalbums:
if album.thumbnail :
self.scheduler.remove_file( album.thumbnail )
self.subalbums.discard( album )
@recursion_protect()
def export_to(self, path):
location = os.path.join(path, self.name)
if not os.path.isdir(location):
os.makedirs( location )
for _file in self.files:
_file.export_to(location)
for album in self.subalbums:
album.export_to( location )
@recursion_protect()
def lock_files(self):
for _file in self.files:
_file.io_lock.acquire()
for album in self.subalbums:
album.lock_files()
def set_thumbnail(self, location):
if self.thumbnail :
self.scheduler.remove_file(self.thumbnail)
if not isinstance(location, str) or check_ext(location, img_exts): #fichier ouvert
self.thumbnail = make_thumbnail(self.scheduler, location )
else:
self.thumbnail = self.scheduler.add_file(location_album_default) #size and md5 ought to be combute once for all
def deep_files(self):
tmp = itertools.chain.from_iterable(map(Album.deep_files, self.subalbums))
return itertools.chain( self.files, tmp)
@recursion_protect(0)
def __len__(self): #number of file in dir and subdir
return len(self.files) + sum( [len(a) for a in self.subalbums ] )
@recursion_protect(0)
def all_albums(self):
return itertools.chain( [self], *list(map( lambda x:x.all_albums(), self.subalbums )) )
@recursion_protect(0)
def all_files(self):
return set(itertools.chain( *list(map(lambda x:x.files, self.all_albums()))))
@recursion_protect(0)
def duplicate(self):
if self.thumbnail:
self.scheduler.duplicate_file(self.thumbnail)
for f in self.files:
f.duplicate()
for alb in self.subalbums:
alb.duplicate()
|
severus21/LiPyc
|
src/Album.py
|
Python
|
apache-2.0
| 4,909
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.operator import *
from mxnet.base import py_str, MXNetError, _as_list
from common import assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied, assertRaises
from common import xfail_when_nonstandard_decimal_separator, with_environment
import pytest
import os
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
@pytest.mark.serial
def test_rnn_with_new_param():
rnn_modes = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm']
ngates_ = [1, 1, 3, 4]
num_layers, input_size, seq_len, batch_size, state_size = 3, 128, 5, 64, 8
for bidirectional in [False, True]:
directions = 2 if bidirectional else 1
for mode, ngates in zip(rnn_modes, ngates_):
first_layer_size = (input_size * state_size + state_size * state_size + state_size * 2) * ngates
rest_layer_size = (state_size * directions * state_size + state_size * state_size + state_size * 2) \
* ngates * (num_layers - 1)
param_size = (first_layer_size + rest_layer_size) * directions
sym = mx.sym.RNN(mode=mode, num_layers=num_layers, bidirectional=bidirectional,
state_outputs=False, state_size=state_size, name='rnn')
bind_dict = {
'rnn_data': mx.ndarray.random.uniform(low=-1, high=1, shape=(seq_len, batch_size, input_size)),
'rnn_parameters': mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size)),
'rnn_state': mx.ndarray.zeros(shape=(num_layers * directions, batch_size, state_size))
}
if mode == 'lstm':
bind_dict['rnn_state_cell'] = mx.ndarray.zeros(
shape=(num_layers * directions, batch_size, state_size))
ex = sym._bind(default_context(), bind_dict)
ex.forward(is_train=True)
ex01 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex02 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex01, ex02, rtol=1e-2, atol=1e-4)
bind_dict['rnn_parameters'] = mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size))
ex.copy_params_from(bind_dict)
ex.forward(is_train=True)
ex03 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex04 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex03, ex04, rtol=1e-2, atol=1e-4)
@pytest.mark.serial
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@pytest.mark.serial
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn._simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def test_RNN_float64():
if default_context().device_type == 'gpu':
return
sym = mx.sym.RNN(
mx.sym.Variable('in'),
mx.sym.Variable('par'),
mx.sym.Variable('s'),
state_size = (2),
num_layers = 1,
mode = 'rnn_tanh'
)
dtype = 'float64'
explicit_grad = {
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
}
args_grad = explicit_grad
grad_req = 'write'
ex = sym._bind(default_context(),
{
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
},
args_grad = args_grad,
grad_req = grad_req
)
ex.forward()
ex.outputs[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out._bind(default_context(),
args=arr,
args_grad=arr_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a, out_grad, rtol=1e-5, atol=1e-5)
@pytest.mark.serial
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out._bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1, ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad, np_grad + 1)
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym._simple_bind(ctx=default_context(), data=data_npy.shape)
outputs = exe.forward(is_train=True, data=data_npy)
assert len(exe.outputs) == num_outputs
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i], gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i], gt)
# test backward
ograd = [mx.nd.array(ele, dtype=outputs[i].dtype) for i, ele in enumerate(out_grads_npy)]
exe.backward(out_grads=ograd)
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s._bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x, exec1.outputs[0])
exec1.backward(dy)
assert_almost_equal(dy, dx)
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap._bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
config = [((1, 1, 2), 0, 1),
((1, 1, 2), -1, -2),
((4, 5, 6, 7), 1, 1),
((4, 5, 6, 7), 2, 3),
((4, 5, 6, 7), -2, 2),
((4, 5, 6, 7), -2, -3)]
for shape, axis1, axis2 in config:
data_np = np.random.uniform(size=shape)
data_mx = mx.nd.array(data_np, dtype=data_np.dtype)
ret_np = np.swapaxes(data_np, axis1=axis1, axis2=axis2)
ret_mx = mx.symbol.SwapAxis(data, dim1=axis1, dim2=axis2)
exe_c = ret_mx._bind(default_context(), args=[data_mx])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
assert_almost_equal(out, ret_np)
@xfail_when_nonstandard_decimal_separator
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
def test_fully_connected():
# Create data of given shape as a uniform distribution centered on 0.0
def random_data(shape, dtype=np.float32):
return mx.nd.random.uniform(low=-0.5,
high=0.5, shape=shape, dtype=dtype)
data = mx.sym.var("data")
fc_weight = mx.sym.var("weight")
fc_bias = mx.sym.var("bias")
fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc')
data = random_data(shape=(5, 5, 5, 13))
fc_weight = random_data(shape=(10, 325))
fc_bias = random_data(shape=(10))
fc_bias2 = random_data(shape=(10, 1))
data_np = data.asnumpy().reshape(5, 325)
fc_weight_np = np.transpose(fc_weight.asnumpy())
fc_bias_np = fc_bias.asnumpy()
res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy()
check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res})
check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np})
# TODO: Fix Bug #15032 when bias has ndim > 1
#check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res})
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
x = np.ones(shape)*3
for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]:
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return np.float32(1.0) * (x > np.float32(0.0))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype('float32')
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape, dtype=dtype)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(ya.shape, dtype=dtype)],
[g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(ya_full.shape, dtype=dtype)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape, dtype=dtype)], [ga], rtol=rtol, atol=atol, dtype=dtype)
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 2e-2, 1e-3) if dtype is np.float16 else (1e-4, 1e-3, 1e-5)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y._bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y._bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z._simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0]
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar._simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar._simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0]
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0]
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out, reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y._simple_bind(ctx=default_context(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out, reference(xa, dtype=xa.dtype))
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed._simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0], np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"], np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0], data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad, 2.0 * data_tmp)
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test._bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test._bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def test_maximum_minimum():
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2)
exe_test = test._bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test._bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1, npout_grad1)
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv._bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0], rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv._bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv._bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv._bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1], deconv_args_grad[1], rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv._bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@pytest.mark.serial
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
def test_deconvolution_forward_with_bias():
"""Check if deconvolution forward can work well with bias=True
"""
def check_deconvolution_forward_with_bias(shape=(1, 16, 5, 5), num_filter=32, num_group=1, kernel=(3, 3), pad=(1, 1)):
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
input_data = mx.random.uniform(-5, 5, shape, ctx=mx.cpu())
y = mx.sym.Deconvolution(data=x, weight=w, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=False, pad=pad)
exe = y._simple_bind(ctx=mx.cpu(), x=shape, grad_req='null')
exe.arg_arrays[0][:] = np.random.normal(size=exe.arg_arrays[0].shape)
exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape)
exe.forward(is_train=False)
o = exe.outputs[0]
t = o.asnumpy()
check_deconvolution_forward_with_bias((1, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((32, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((1, 16, 5, 5), 32, 1, (3, 3), (1, 1))
check_deconvolution_forward_with_bias((32, 16, 5, 5), 32, 1, (3, 3), (1, 1))
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up._bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter):
def _init_bilinear(arr, f):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
return arr
up = mx.sym.UpSampling(mx.sym.Variable("data"),
mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale,
num_filter=num_filter, num_args=2)
arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape)
arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()),
'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))}
arr_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = up._bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(exe.outputs)
target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale)
assert out.shape == data_shape[:2] + target_shape
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
def test_bilinear_upsampling():
rootscale = [2,3]
scales = [1,2,3]
filters = [1,2,3]
bases = [1,2,3]
for params in itertools.product(rootscale, scales, filters, bases):
root_scale, scale, num_filter, base = params
# bilinear upsampling takes only 1 data and 1 weight
# multi input mode is not applicable
dimension = base*root_scale*scale
kernel = 2 * root_scale - root_scale % 2
data_shape = (1, num_filter, dimension, dimension)
weight_shape = (1, num_filter, kernel, kernel)
check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter)
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2), (2, 8, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@xfail_when_nonstandard_decimal_separator
@pytest.mark.parametrize('op_name', ['BatchNorm', 'SyncBatchNorm'])
@pytest.mark.parametrize('shape', [(4, 2), (4, 3, 4),
(4, 6, 4, 5), (4, 5, 6, 4, 5)])
@pytest.mark.parametrize('fix_gamma', [False, True])
@pytest.mark.parametrize('cudnn_off', [False, True])
@pytest.mark.parametrize('output_mean_var', [False, True])
def test_batchnorm(op_name, shape, fix_gamma, cudnn_off, output_mean_var):
if op_name == 'BatchNorm':
op = mx.nd.BatchNorm
elif op_name == 'SyncBatchNorm':
op = mx.nd.contrib.SyncBatchNorm
else:
raise ValueError(f'Not supported {op_name}')
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(axis,
data_grad_req, gamma_grad_req, beta_grad_req):
kwargs = dict(output_mean_var=output_mean_var)
if op_name == 'SyncBatchNorm':
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
if not fix_gamma:
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad(grad_req=gamma_grad_req)
else:
bn_gamma = mx.nd.ones(shape=(nch,))
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad(grad_req=beta_grad_req)
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
data = mx.nd.random.uniform(shape=shape)
data.attach_grad(grad_req=data_grad_req)
adX, adW, adb = 0, 0, 0
is_train = data_grad_req != 'null' or \
(not fix_gamma and gamma_grad_req != 'null') or \
beta_grad_req != 'null'
for _ in range(num_iters):
if data_grad_req != 'add':
data = mx.nd.random.uniform(shape=shape)
data.attach_grad(grad_req=data_grad_req)
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=fix_gamma, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
if is_train:
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
m = np.prod(shape) / shape[axis]
# cudnn uses m-1 in the denominator of its sample variance calculation, not m
sample_var_adjust = 1.0 if cudnn_off or fix_gamma else m / (m-1)
running_var = running_var * momentum + \
data_var_flat * sample_var_adjust * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
adX = dX if data_grad_req != 'add' else adX + dX
adW = dW if gamma_grad_req != 'add' else adW + dW
adb = db if beta_grad_req != 'add' else adb + db
atol, rtol = 5e-2, 5e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
if is_train:
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
if data_grad_req != 'null':
assert_almost_equal(data.grad.asnumpy(),
adX.asnumpy(), atol=atol, rtol=rtol)
if not fix_gamma:
if gamma_grad_req != 'null':
assert_almost_equal(
bn_gamma.grad.asnumpy(), adW.asnumpy(),
atol=atol, rtol=rtol)
else:
assert((bn_gamma.asnumpy() == 1).all())
if beta_grad_req != 'null':
assert_almost_equal(
bn_beta.grad.asnumpy(), adb.asnumpy(), atol=atol, rtol=rtol)
grad_reqs = ['write'] if len(shape) != 4 else ['null', 'write', 'add']
for data_grad_req in grad_reqs:
for gamma_grad_req in grad_reqs:
if fix_gamma and gamma_grad_req != 'null':
continue
for beta_grad_req in grad_reqs:
for axis in range(len(shape)):
_test_batchnorm_impl(axis,
data_grad_req, gamma_grad_req, beta_grad_req)
def test_groupnorm():
acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'}
def x_hat_helper(x, num_groups, eps):
dtype = x.dtype
dshape = x.shape
assert len(dshape) == 4
acc_type = acc_types[str(dtype)]
new_shape = (dshape[0], num_groups, int(dshape[1] / num_groups), dshape[2], dshape[3])
new_moments_shape = (dshape[0], num_groups, 1, 1, 1)
data = x.reshape(new_shape)
mean = np.mean(data, axis=(2, 3, 4), keepdims=False, dtype=acc_type).astype(dtype)
std = np.sqrt(np.var(data, axis=(2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) + eps)
x_hat = (data - mean.reshape(new_moments_shape)) / std.reshape(new_moments_shape)
return x_hat, mean, std
def np_groupnorm(data, gamma, beta, num_groups, eps):
new_param_shape = (1, dshape[1], 1, 1)
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
out = x_hat.reshape(dshape) * gamma.reshape(new_param_shape) + beta.reshape(new_param_shape)
return out, mean, std
def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps):
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
new_shape = x_hat.shape
dshape = data.shape
dtype = data.dtype
new_moments_shape = (new_shape[0], num_groups, 1, 1, 1)
new_param_shape = (1, dshape[1], 1, 1)
acc_type = acc_types[str(dtype)]
ograd = ograd.reshape(new_shape)
data = data.reshape(new_shape)
gamma = gamma.reshape(new_param_shape)
beta = beta.reshape(new_param_shape)
mean = mean.reshape(new_moments_shape)
std = std.reshape(new_moments_shape)
beta_grad = np.sum(ograd, axis=(0, 3, 4), dtype=acc_type, keepdims=False).astype(dtype).flatten()
gamma_grad = np.sum(x_hat * ograd, axis=(0, 3, 4), dtype=acc_type, keepdims=False).astype(dtype).flatten()
x_hat_grad = ograd * gamma.reshape(1, num_groups, dshape[1] // num_groups, 1, 1)
ograd_mult = x_hat_grad / std
red_out = np.mean(ograd_mult, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = ograd_mult - red_out
red_out = np.mean(ograd_mult * x_hat, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = data_grad - x_hat * red_out
return data_grad.reshape(dshape), gamma_grad, beta_grad
batch_size = random.randint(1, 8)
num_groups = random.randint(2, 3)
num_channels = random.randint(2, 3) * num_groups
height = random.randint(1, 5)
width = random.randint(1, 5)
dshape = (batch_size, num_channels, height, width)
param_shape = (num_channels,)
temp_shape = (batch_size, num_groups, int(num_channels / num_groups), height, width)
np_data = np.random.uniform(0.2, 1.0, dshape)
np_gamma = np.random.uniform(-1.0, 1.0, param_shape)
np_beta = np.random.uniform(-1.0, 1.0, param_shape)
data_sym = mx.sym.Variable("data")
gamma_sym = mx.sym.Variable("gamma")
beta_sym = mx.sym.Variable("beta")
for dtype in [np.float16, np.float32, np.float64]:
eps = 1e-2 if dtype == np.float16 else 1e-5
mx_data = mx.nd.array(np_data, dtype=dtype)
mx_gamma = mx.nd.array(np_gamma, dtype=dtype)
mx_beta = mx.nd.array(np_beta, dtype=dtype)
np_out, np_mean, np_std = np_groupnorm(np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
num_groups=num_groups,
eps=eps)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=True)
check_symbolic_forward(mx_sym, [mx_data, mx_gamma, mx_beta], [np_out, np_mean, np_std],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-3 if dtype == np.float16 else 1e-4, dtype=dtype)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=False)
np_ograd = np.random.uniform(-1.0, 1.0, dshape).astype(dtype)
np_data_grad, np_gamma_grad, np_beta_grad = np_groupnorm_grad(np_ograd,
np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
np_mean, np_std,
num_groups, eps)
check_symbolic_backward(mx_sym, [mx_data, mx_gamma, mx_beta], [mx.nd.array(np_ograd, dtype=np_ograd.dtype)],
[np_data_grad, np_gamma_grad, np_beta_grad],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-2 if dtype == np.float16 else 1e-4, dtype=dtype)
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1._simple_bind(default_context(), x=shape)
exe2 = y2._simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@pytest.mark.skip(reason="Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1._simple_bind(dev, x=shape)
exe2 = y2._simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
assert_allclose(arr1, arr2, rtol=1e-3, atol=1e-3)
def test_convolution_independent_gradients():
# NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603.
# GPU context will be enabled after figuring out the possible issue tracked at
# https://github.com/apache/incubator-mxnet/issues/15638.
ctx = mx.cpu()
atol = 1.0e-3
rtol = 1.0e-3
reqs = ["null", "write", "add"]
var_names = ["x", "w", "b"]
dims = [1, 2]
num_bases = [1, 8]
kernel_xs = [3, 5]
stride_xs = [1, 2]
pad_xs = [0, 1]
in_sizes = [7, 32]
no_biases = [True, False]
for dim, num_base, kernel_x, stride_x, pad_x , in_size, no_bias in \
itertools.product(dims, num_bases, kernel_xs, stride_xs, pad_xs, in_sizes, no_biases):
# Prepare params shape
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
x_shape = (2, num_base) + (in_size,) * dim
w_shape = (num_filter, num_base) + kernel
# Symbols definition
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b') if not no_bias else None
conv = mx.sym.Convolution(x, w, b, num_filter=num_filter,
kernel=kernel, stride=stride, pad=pad, no_bias=no_bias)
for req_kind in reqs:
# Binding args for conv with possible dependent gradients
base_args = {
'x': mx.nd.random.normal(shape=x_shape, ctx=ctx),
'w': mx.nd.random.normal(shape=w_shape, ctx=ctx),
'b': mx.nd.random.normal(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
args1 = copy.deepcopy(base_args)
grad1 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req1 = [req_kind] * 3
grad_req1 = dict(zip(var_names, grad_req1))
exe1 = conv._bind(ctx, args1, args_grad=grad1, grad_req=grad_req1)
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
for x_req, w_req, b_req in itertools.product(reqs, repeat=3):
# Binding args for conv with independent gradients
args2 = copy.deepcopy(base_args) # Deepcopy the same params of `exe1`
grad2 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req2 = {"x": x_req, "w": w_req, "b": b_req}
exe2 = conv._bind(ctx, args2, args_grad=grad2, grad_req=grad_req2)
exe2.forward(is_train=True)
np.testing.assert_allclose(exe1.outputs[0].asnumpy(),
exe2.outputs[0].asnumpy(), rtol=rtol, atol=atol)
exe2.backward(exe2.outputs[0])
for var_name in var_names:
if var_name == "b" and no_bias:
continue
if grad_req2[var_name] == "null":
exe2_var_grad = grad2[var_name].asnumpy()
np.testing.assert_allclose(exe2_var_grad,
np.zeros_like(exe2_var_grad), rtol=rtol, atol=atol)
if grad_req2[var_name] != grad_req1[var_name]:
continue
np.testing.assert_allclose(args1[var_name].asnumpy(),
args2[var_name].asnumpy(), rtol=rtol, atol=atol)
np.testing.assert_allclose(grad1[var_name].asnumpy(),
grad2[var_name].asnumpy(), rtol=rtol, atol=atol)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol._bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol._bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
o = y.forward(is_train=True)
y.backward([mx.nd.array(out, dtype=o[0].dtype)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net._bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net._bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net._bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net._bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 3D
for dil in [ (1,1,1), (2,2,2), (3,3,3) ]:
for ks in [ (3,3,3), (4,4,4), (2,3,4), (3,2,4), (1,1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@pytest.mark.serial
@pytest.mark.parametrize('src_shape,shape_args,reverse,dst_shape', [
((2, 3, 5, 5), (0, -1), False, (2, 75)),
((2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)),
((5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)),
((2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)),
((2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)),
((2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)),
((2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)),
((2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)),
((2, 3, 5, 6), (-3, -3), False, (6, 30)),
((2, 3, 5, 6), (-3, -1), False, (6, 30)),
((64,), (-4, 16, 4), False, (16, 4)),
((64,), (-4, 16, -1), False, (16, 4)),
((64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)),
((2, 3, 5, 5), (0, -1), True, (5, 30)),
((2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)),
((5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)),
((2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)),
((2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)),
((2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)),
((2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)),
((2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)),
((2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)),
((2, 3, 5, 6), (-3, -3), True, (6, 30)),
((64,), (16, 4, -4), True, (16, 4)),
((64,), (16, -1, -4), True, (16, 4)),
((1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16))
])
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net._simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
def test_reshape_old():
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net._simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net._simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b._bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
# check forward
assert_almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, rtol=1e-4, atol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
# check backward
assert_almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, rtol=1e-4, atol=1e-4)
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
def test_broadcast():
sample_num = 200
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
target_shape_with_zero = list(target_shape)
for idx in range(len(target_shape_with_zero)):
if idx not in axis:
target_shape_with_zero[idx] = 0
break
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_to_with_zero = mx.symbol.broadcast_to(a, shape=tuple(target_shape_with_zero))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast._bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0], groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd, grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_to_with_zero)
test_broadcasting_ele(sym_bcast_like)
def test_transpose():
for ndim in range(1, 10):
for t in range(5):
dims = list(np.random.randint(1, 5, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@pytest.mark.serial
def test_pseudo2dtranspose():
def getTwoInts(mn, mx):
n1 = np.random.randint(mn, mx)
n2 = np.random.randint(mn, mx-1)
n2 = n2 if n2 < n1 else n2+1
return tuple(np.sort([n1, n2]))
def getTranspAxes(ndim):
axes = list(range(ndim))
n1, n2 = getTwoInts(0,ndim)
return tuple(axes[:n1]+axes[n2:]+axes[n1:n2])
for ndim in range(2, 7):
for dt in ['int8', 'half', 'int32', 'int64']:
for _ in range(5):
dims = list(np.random.randint(5, 20, size=ndim))
axes = getTranspAxes(ndim)
x = mx.nd.array(np.random.normal(size=dims), dtype=dt)
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
@pytest.mark.serial
def test_big_transpose():
n = [1]
d = list(np.random.randint(132, 160, size=1))
hw = list(np.random.randint(256, 320, size=2))
c = [10]
dims = n + d + hw + c
axes = (0,4,1,2,3)
x_np = np.random.normal(size=dims).astype('uint8')
x = mx.nd.array(x_np, dtype='uint8')
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x_np, axes=axes), y.asnumpy().astype('uint8'))
axes = (0,2,3,4,1)
z = mx.nd.transpose(y, axes=axes)
assert_allclose(x_np, z.asnumpy().astype('uint8'))
@pytest.mark.serial
def test_larger_transpose():
x = mx.nd.random.normal(shape=(50,51))
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
def test_crop():
for ndim in range(1, 6):
for t in range(5):
dims = []
begin = []
end = []
idx = []
for i in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y._bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y._bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y._bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
def test_broadcast_like_different_types():
x = mx.nd.zeros((2, 1))
y = mx.nd.ones((2, 2))
y = mx.nd.array(y).astype('int32')
z = mx.nd.broadcast_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0]])
assert x.dtype == z.dtype
def test_flip():
for ndim in range(1, 6):
for t in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn._bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad, grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn._bind(
ctx=default_context(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
def test_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
tol = 1e-2 if data_type == 'float16' else 1e-3
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c._simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy, rtol=tol, atol=tol)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy, rtol=tol, atol=tol)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy, rtol=tol, atol=tol)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
def test_batch_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
if ctx.device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_init_grad_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_init_grad_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c._simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c._simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, dtype=outputs[0].dtype, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, dtype=exe_add.outputs[0].dtype, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'],
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'],
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1._simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0], forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'], grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'], grad2, rtol=1e-3, atol=1e-4)
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 0,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 0,pad_size = 5,is_multiply = False, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 0,pad_size = 5,is_multiply = True, dtype = dtype)
with pytest.raises(MXNetError):
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 0,stride2 = 1,pad_size = 4,is_multiply = True, dtype = dtype)
# Seed set because the test is not robust enough to operate on random data
@pytest.mark.seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y._bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0]
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y._bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0]
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out._simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0], np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32,
forward_check_eps=1E-3, backward_check_eps=1E-3,
npy_grad_check=True, finite_grad_check=True):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
def npy_layer_norm_grad(data, gamma, out_grad, axis, eps):
if axis < 0:
axis += data.ndim
exclude_axis = tuple([ele for ele in range(data.ndim) if ele != axis])
data_mean = data.mean(axis=axis, keepdims=True)
data_var = data.var(axis=axis, keepdims=True)
data_std = np.sqrt(data_var + eps)
centered_data = (data - data_mean) / data_std
gamma_grad = (centered_data * out_grad).sum(axis=exclude_axis, keepdims=True)
beta_grad = out_grad.sum(axis=exclude_axis, keepdims=True)
w = out_grad * gamma.reshape([1 if i != axis else data.shape[axis] for i in range(data.ndim)])\
/ data_std
data_grad = w - w.mean(axis=axis, keepdims=True)\
- centered_data * (w * centered_data).mean(axis=axis, keepdims=True)
gamma_grad = gamma_grad.reshape((-1,))
beta_grad = beta_grad.reshape((-1,))
return data_grad, gamma_grad, beta_grad
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s._simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd, forward_check_eps, forward_check_eps)
if finite_grad_check:
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
if npy_grad_check:
# Test for grad_req = write
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
exe = out_s._simple_bind(ctx, data=in_shape, grad_req='write')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad =\
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad, backward_check_eps, backward_check_eps)
# Test for grad_req = add
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_data_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_gamma_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
init_beta_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
exe = out_s._simple_bind(ctx, data=in_shape, grad_req='add')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.grad_dict['data'][:] = init_data_grad
exe.grad_dict['gamma'][:] = init_gamma_grad
exe.grad_dict['beta'][:] = init_beta_grad
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad = \
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(),
gt_data_grad + init_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(),
gt_gamma_grad + init_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(),
gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps)
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_context()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([2,3,4], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64,
np.int32: np.int32, np.int64: np.int64}
dtype_to_str = {np.float16: 'float16', np.float32: 'float32', np.float64: 'float64',
np.int32: 'int32', np.int64: 'int64'}
for enforce_safe_acc in ['1', '0']:
with environment('MXNET_SAFE_ACCUMULATION', enforce_safe_acc):
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
accumulation_type = acc_type[dtype]
if enforce_safe_acc == "0":
backward_dtype = dtype
out_dtype = dtype_to_str[dtype]
accumulation_type = dtype
skip_backward = 'int' in out_dtype
in_data = np.random.uniform(-1, 1, in_shape).astype(accumulation_type)
in_data[abs(in_data) < epsilon] = 2 * epsilon
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=1e-4 if dtype == np.float16 else 1e-5, ctx=ctx, dtype=dtype)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-2 if dtype is np.float16 else 1e-3,
atol=1e-4 if dtype is np.float16 else 1e-5, ctx=ctx)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
@pytest.mark.parametrize('enforce_safe_acc', ['1', '0'])
@pytest.mark.parametrize('dtype,forward_check_eps,backward_check_eps,in_shape_l,finite_grad_check_l', [
(np.float16, 1E-2, 1E-2, [(10, 6, 5), (10, 10)], [True, True]),
(np.float32, 1E-3, 1E-3, [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False]),
(np.float64, 1E-4, 1E-4, [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False])
])
def test_layer_norm(enforce_safe_acc, dtype, forward_check_eps, backward_check_eps,
in_shape_l, finite_grad_check_l):
with environment('MXNET_SAFE_ACCUMULATION', enforce_safe_acc):
for in_shape, finite_grad_check in zip(in_shape_l, finite_grad_check_l):
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
if dtype == np.float16:
npy_grad_check = False
else:
npy_grad_check = True
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps,
backward_check_eps=backward_check_eps,
npy_grad_check=npy_grad_check,
finite_grad_check=finite_grad_check)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@pytest.mark.skip(reason="Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev._bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev._bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test._bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test._bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test._bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape).astype('float32')
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp <= 0.6, [1], [0]) * np.where(data_tmp >= -0.6, [1], [0])])
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x._bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0], np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out)
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s._bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0], np.array([0,1,2,3,4]))
def test_arange_like():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
axis_list = [0, -1]
for sh in shape_list:
for axis in axis_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis)
np_out = np.arange(start=0, stop=sh[axis])
assert_almost_equal(nd_out.asnumpy(), np_out)
def test_arange_like_without_axis():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
for sh in shape_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0)
np_out = np.arange(start=0, stop=val.size)
assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
test_arange_like()
test_arange_like_without_axis()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, rtol=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
for dtype in [np.float16, np.float32, np.float64]:
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(dtype)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, rtol=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5, is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b._simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0], a_npy)
exe.backward() # No error if BlockGrad works
def test_take_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad)
@pytest.mark.parametrize('mode,out_of_range', [
('clip', True),
('wrap', True),
('raise', False)
])
@pytest.mark.parametrize('data_ndim', range(1, 5))
@pytest.mark.parametrize('idx_ndim', range(1, 4))
def test_take(mode, out_of_range, data_ndim, idx_ndim):
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result._simple_bind(default_context(), a=data_shape,
indices=idx_shape)
data_real = np.random.normal(size=data_shape).astype('float32')
if out_of_range:
idx_real = np.random.randint(low=-data_shape[axis], high=data_shape[axis], size=idx_shape)
if mode == 'raise':
idx_real[idx_real == 0] = 1
idx_real *= data_shape[axis]
else:
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
if out_of_range and mode == 'raise':
try:
mx_out = exe.outputs[0].asnumpy()
except MXNetError as e:
return
else:
# Did not raise exception
assert False, "did not raise %s" % MXNetError.__name__
assert_almost_equal(exe.outputs[0], np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
if mode == 'clip':
i = np.clip(i, 0, data_shape[axis])
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'], grad_in)
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid._simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0]
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'], grad_est)
# check addto
exe = grid._simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'], grad_est + grid_grad_npy)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid._simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'], grad_est, rtol=1e-3)
# check addto
exe_add = grid._simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'], grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r, data.asnumpy()[np.arange(n), x.asnumpy()])
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y._simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
assert exe.outputs[0].dtype == dsttype
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0], X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0], X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
def get_cast_op_data():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
def test_cast_float32_to_float16():
input_np = np.array(list(get_cast_op_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
def check_cast(op, input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float32)
sym = op(x, dtype=np.float16)
ctx = default_context()
exe = sym._bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
exe.forward(is_train=True)
assert exe.outputs[0].dtype == np.float16
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
check_cast(mx.sym.Cast, input_np, expected_output)
if default_context().device_type == 'gpu':
check_cast(mx.sym.amp_cast, input_np, expected_output)
def test_amp_multicast():
if default_context().device_type == 'cpu':
return
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res._bind(ctx, {'x': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx),
'y': mx.nd.random.uniform(shape=(3, 3), dtype=np.float32, ctx=ctx),
'z': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
out1, out2, out3 = exe.outputs
assert out1.asnumpy().dtype == np.float32
assert out2.asnumpy().dtype == np.float32
assert out3.asnumpy().dtype == np.float32
def check_amp_multicast(input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res._bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float16, ctx=ctx),
'y': mx.nd.array(input_np, dtype=np.float32, ctx=ctx),
'z': mx.nd.array(input_np, dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
input_np = np.array(list(get_cast_op_data()), dtype=np.float16)
expected_output = input_np.astype(np.float32)
check_amp_multicast(input_np, expected_output)
def test_all_finite():
data = mx.sym.Variable("data", dtype=np.float32)
data2 = mx.sym.Variable("data2", dtype=np.float32)
finite_arr = mx.nd.array([[0, 0]])
inf_arr = mx.nd.array([[np.inf, np.inf]])
z = mx.sym.all_finite(data)
ctx = default_context()
exe = z._bind(ctx, {'data': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
exe = z._bind(ctx, {'data': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z._bind(ctx, {'data': finite_arr, 'data2': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z._bind(ctx, {'data': finite_arr, 'data2': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats)
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis)
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test._bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for i in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for i in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
with mx.np_shape():
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test._bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
with mx.np_shape():
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
with mx.np_shape():
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32
).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth,))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
with mx.np_shape():
test_empty_indices()
test_zero_depth()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym._simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx.astype('float32'))
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym._simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx.astype('float32'))
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)], rtol=1e-3, atol=1e-4)
check_numeric_gradient(sym, [data], rtol=1e-1, atol=1e-2)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1._bind(default_context(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
assert_almost_equal(ndarr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_environment('MXNET_SAFE_ACCUMULATION', '1')
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
assert_almost_equal(dtype_softmax, ref_softmax, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
assert_almost_equal(dtype_input.grad, ref_input.grad, rtol=grad_rtol, atol=grad_atol)
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
def test_softmax_with_length():
def np_softmax_with_length(data, length):
res = np.zeros(data.shape)
for i in range(length.shape[0]):
for j in range(length.shape[1]):
leng = int(length[i, j])
res[i, 0:leng, j] = np_softmax(data[i, 0:leng, j])
return res
ndim = 3
shape = rand_shape_nd(ndim, dim=10)
len_shape = list(shape)
del len_shape[1]
len_shape = tuple(len_shape)
for dtype in [np.float16, np.float32, np.float64]:
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_length = np.random.randint(1, shape[1] + 1, len_shape)
mx_length = mx.nd.array(np_length, dtype=np.int32)
np_out = np_softmax_with_length(np_data, np_length)
data = mx.sym.Variable("data")
length = mx.sym.Variable("length")
mx_sym = mx.sym.softmax(data=data, length=length, use_length=True, axis=1)
location = {"data": mx_data, "length": mx_length}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [np.ones(shape, dtype=dtype)],
[np.zeros(shape), np.zeros(len_shape, dtype=np.int32)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3, dtype="asnumpy")
def test_pick():
def test_pick_helper(index_type=np.int32):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth, contrib=False):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
if contrib:
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
else:
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc._bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0].copy()
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest, outTrain)
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest, loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts, labels, true_loss, contrib=contrib)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels2, true_loss, contrib=contrib)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels3, true_loss, contrib=contrib)
def test_ctc_loss_with_large_classes():
ctx = default_context()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss, expected_loss)
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label, contrib=False): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
if contrib:
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
else:
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l, loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad, grad_truth, atol=1e-5, rtol=1e-5)
for contrib in [False, True]:
for label in ['first', 'last']:
check_ctc_loss_grad(label, contrib=contrib)
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
print(a_.asnumpy())
print(a_real.asnumpy())
assert same(qa.asnumpy(), qa_real.asnumpy())
assert_almost_equal(a_.asnumpy(), a_real.asnumpy(), rtol=1e-2)
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
# test 0-size output
mx.set_np_shape(True)
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 0, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.zeros((0, 3))
expected_grad = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
mx.set_np_shape(False)
# test gradient
shape = (100, 30)
a = mx.nd.random.randint(0, 100, shape=shape)
a.attach_grad()
bi = mx.nd.random.randint(0, 100, shape=shape[0:1]) > 50
ci = mx.nd.random.randint(0, 100, shape=shape[0:1]) < 50
mx_grad = mx.nd.zeros_like(a)
mx.autograd.mark_variables([a], [mx_grad], grad_reqs='add')
T = 3
for _ in range(T):
with mx.autograd.record():
b = mx.nd.contrib.boolean_mask(a, bi)
c = mx.nd.contrib.boolean_mask(a, ci)
su = b.sum() + c.sum()
su.backward()
grad = (bi + ci).asnumpy().reshape((-1,) + (1,) * (len(shape)-1))
grad = np.tile(grad, (1,) + shape[1:])
# T times
grad *= T
assert_allclose(a.grad.asnumpy(), grad)
a_np = a.asnumpy()
assert same(b.asnumpy(), a_np[bi.asnumpy().astype('bool')])
assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')])
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
# helper function to identify inputs likely to fail check_numeric_gradient tol test
# due to finite difference method inaccuracies or function discontuities at the origin
def bad_input_finder(f, f_grad, dtype):
eps = default_numeric_eps()[np.dtype(dtype)]
rtol = default_rtols()[np.dtype(dtype)]
def expected_relative_error(x):
fd_gradient = (f(x+eps/2) - f(x-eps/2)) / eps
return abs(fd_gradient/f_grad(x) - 1)
def is_fd_problem_input(x):
return abs(x) < eps/2 or expected_relative_error(x) > rtol
return np.vectorize(is_fd_problem_input)
def test_reciprocal_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(np.reciprocal,
lambda x: -np.reciprocal(x)**2, np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
def test_cbrt_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(np.cbrt,
lambda x: 1./(3 * np.cbrt(x)**2), np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
def test_rcbrt_op():
data_tmp = np.random.rand(3, 4).astype(np.float32) * 10 - 5
# Avoid possible division by 0 errors and finite difference method
# inaccuracies by replacing problem inputs with 1.0.
is_bad_input = bad_input_finder(lambda x: 1./np.cbrt(x),
lambda x: -1./(3 * np.cbrt(x)**4), np.float32)
data_tmp[is_bad_input(data_tmp)] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output, expected_output, rtol=rtol, atol=atol)
assert_almost_equal(x2.grad, expected_grad, rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x, np.ones(shape=(10, 10), dtype=np.float32))
@pytest.mark.skip(reason="Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/17467")
def test_custom_op_fork():
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
if not sys.platform.startswith('win'): # no fork in windows
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive() and p.exitcode == 0
def _build_dot_custom(fun_forward, name):
class Dot(mx.operator.CustomOp):
def __init__(self):
super(Dot, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
fun_forward(in_data, out_data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register(name)
class DotProp(mx.operator.CustomOpProp):
def __init__(self):
super(DotProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [(in_shape[0][0], in_shape[1][1])]
def create_operator(self, ctx, shapes, dtypes):
return Dot()
def test_custom_op_exc():
# test except handling
# see https://github.com/apache/incubator-mxnet/pull/14693
# 1. error in python code
def custom_exc1():
def f(in_data, out_data):
assert False
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot1')
a = mx.nd.zeros((4, 1))
b = mx.nd.zeros((1, 4))
c = mx.nd.Custom(a, b, op_type='Dot1')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc1)
# 2. error in pushing operator to engine
def custom_exc2():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot2')
a = mx.nd.zeros((4, 2))
b = mx.nd.zeros((1, 4))
# trigger error by invalid input shapes of operands
c = mx.nd.Custom(a, b, op_type='Dot2')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc2)
# 3. error in real execution
if default_context().device_type == 'cpu':
def custom_exc3():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
out_data[0].wait_to_read()
_build_dot_custom(f, 'Dot3')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot3')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc3)
def custom_exc4():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
_build_dot_custom(f, 'Dot4')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot4')
c.wait_to_read()
pytest.raises(MXNetError, custom_exc4)
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
def test_deformable_convolution():
for num_batch in [1, 2]:
for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]):
for input_height, input_width in itertools.product([5, 6], [5, 6]):
for dilate in [(1, 1), (2, 2)]:
for grad_nodes in [['im_data'], ['offset_data'], ['weight']]:
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data").as_np_ndarray()
offset_data_var = mx.symbol.Variable(name="offset_data").as_np_ndarray()
weight_var = mx.symbol.Variable(name="weight").as_np_ndarray()
bias_var = mx.symbol.Variable(name="bias").as_np_ndarray()
op = mx.sym.npx.deformable_convolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0), numeric_eps=1.0/64)
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@pytest.mark.skip(reason="Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = None, atol_fw = None,
rtol_bw = None, atol_bw = None, num_eps = None):
def np_random_data(shape, dtype=np.float32):
return np.random.uniform(low=-0.5,
high=0.5, size=shape).astype(dtype)
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np_random_data(shape1, dtype)
data_in2 = np_random_data(shape2, dtype)
data_in3 = np_random_data(shape3, dtype)
data_in4 = np_random_data(shape4, dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
def test_gemm():
_gemm_test_helper(np.float64, True)
with environment('MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION', '0'):
_gemm_test_helper(np.float32, True)
if default_context().device_type == 'gpu':
with environment('MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION', '1'):
_gemm_test_helper(np.float32, True)
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@xfail_when_nonstandard_decimal_separator
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 2e-6
rtol_bw = 1e-5
atol_bw = 1e-5
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
def check_fw_grad(sym, location, expected):
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
if grad_check == 1:
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
shape = (4, 4, 1, 1)
ones = mx.nd.ones(shape).asnumpy()
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw_grad(test_potrf, [data_in], [res_potrf])
# test potri
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw_grad(test_potri, [data_in], [res_potri])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw_grad(test_trsm, [trian_in, data_in], [ones])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw_grad(test_trmm, [trian_in, data_in], [ones])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw_grad(test_sumlogdiag, [data_in], [res_sumlogdiag])
# more elaborate example of Cholesky factorization
low_trian = trian
if upper:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw_grad(test_potrf, [a], [r])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw_grad(test_potri, [a], [r])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw_grad(test_trsm, [a, b], [r])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw_grad(test_trsm2, [a, b], [r])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw_grad(test_trsm3, [a, b], [r])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw_grad(test_trsm4, [a, b], [r])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = [a, rep_3x(matrix, 4, 4)]
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw_grad(test_trmm, a, [r])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw_grad(test_trmm2, a, [r])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw_grad(test_trmm3, a, [r])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw_grad(test_trmm4, a, [r])
# test sumlogdiag
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw_grad(test_sumlogdiag, [rep_3x(pow, 4, 4)], [r])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@pytest.mark.seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
def test_laop_5():
# tests for diagonal and triangular matrix extraction and generation
data = mx.symbol.Variable('data')
# test complete range of small matrices to cover corner cases
for n in range(1, 5):
# test batched and non-batched processing
for b in range(3):
shape = (n, n) if b == 0 else (b, n, n)
data_in = np.random.uniform(1, 10, shape)
# test all legal offsets of the diagonal
for offs in range(1-n, n):
# test extraction of diagonal
test_diag = mx.sym.linalg.extractdiag(data, offset=offs)
res_diag = np.diagonal(data_in, offset=offs) if b==0 else np.diagonal(data_in, axis1=1, axis2=2, offset=offs)
check_symbolic_forward(test_diag, [data_in], [res_diag])
check_numeric_gradient(test_diag, [data_in])
# test generation of diagonal matrix
test_diag2 = mx.sym.linalg.makediag(data, offset=offs)
res_diag2 = None
if b == 0:
res_diag2 = np.diagflat(res_diag, k=offs)
else:
for i in range(b):
res = np.reshape(np.diagflat(res_diag[i], k=offs), (1, n, n))
res_diag2 = res if res_diag2 is None else np.concatenate((res_diag2, res), axis=0)
check_symbolic_forward(test_diag2, [res_diag], [res_diag2])
check_numeric_gradient(test_diag2, [res_diag])
# check both settings for parameter "lower" in case of zero offset
lower_vals = [True] if offs != 0 else [True, False]
for lower in lower_vals:
# test extraction of triangle by doing a full roundtrip as the intermediate extracted
# triangle has different orderings than numpy.
test_trian = mx.sym.linalg.extracttrian(data, offset=offs, lower=lower)
test_trian = mx.sym.linalg.maketrian(test_trian, offset=offs, lower=lower)
extracts_lower = (offs < 0) or ((offs == 0) and lower)
res_trian = None
if b == 0:
res_trian = np.tril(data_in, offs) if extracts_lower else np.triu(data_in, offs)
else:
for i in range(b):
res = np.tril(data_in[i], offs) if extracts_lower else np.triu(data_in[i], offs)
res = np.reshape(res, (1, n, n))
res_trian = res if res_trian is None else np.concatenate((res_trian, res), axis=0)
check_symbolic_forward(test_trian, [data_in], [res_trian])
check_numeric_gradient(test_trian, [data_in])
# Tests for linalg.inverse
@pytest.mark.skip(reason="Test crashes https://github.com/apache/incubator-mxnet/issues/15975")
def test_laop_6():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-4
atol_bw = 1e-6
data = mx.symbol.Variable('data')
check_fw = lambda sym, location, expected:\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
## det(I + dot(v, v.T)) = 1 + dot(v.T, v) >= 1, so it's always invertible;
## det is away from zero, so the value of logdet is stable
v = np.random.random(4)
a = np.eye(4) + np.outer(v, v)
a = np.tile(a, (3, 1, 1))
permute_mat = np.eye(4)[[1, 0, 2, 3]]
# test matrix inverse
r = np.eye(4)
r = np.tile(r, (3, 1, 1))
test_inverse = mx.sym.linalg.inverse(data)
test_eye = mx.sym.linalg.gemm2(data, test_inverse)
check_fw(test_eye, [a], [r])
check_grad(test_inverse, [a])
# test matrix determinant
# det
r = np.linalg.det(a)
test_det = mx.sym.linalg.det(data)
check_fw(test_det, [a], [r])
check_grad(test_det, [a])
# test slogdet
r1 = np.array([1., 1., 1.])
r2 = np.log(np.abs(np.linalg.det(a)))
test_sign, test_logabsdet = mx.sym.linalg.slogdet(data)
check_fw(test_sign, [a], [r1])
check_fw(test_sign, [np.dot(a, permute_mat)], [-r1])
check_fw(test_logabsdet, [a], [r2])
check_grad(test_logabsdet, [a])
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
## TODO: test fails intermittently when cudnn on. temporarily disabled cudnn until gets fixed.
## tracked at https://github.com/apache/incubator-mxnet/issues/14288
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y._simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y._simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
# check_dropout_ratio(0.5, shape, cudnn_off=False)
# check_dropout_ratio(0.0, shape, cudnn_off=False)
# check_dropout_ratio(1.0, shape, cudnn_off=False)
# check_dropout_ratio(0.75, shape, cudnn_off=False)
# check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
# check_passthrough(0.5, shape, cudnn_off=False)
# check_passthrough(0.0, shape, cudnn_off=False)
# check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
# check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
def test_gather_nd_check_bound():
def _test_gather_nd_exception(data, indices):
output = mx.nd.gather_nd(data, indices).asnumpy()
# check if indices is out of bound
data = mx.nd.array([[0, 1, 2], [3, 4, 5]])
indices1 = mx.nd.array([[0, 1, 0], [0, 1, 3]])
indices2 = mx.nd.array([[0, 1, 0], [0, 1, -5]])
assertRaises(IndexError, _test_gather_nd_exception, data, indices1)
# IndexError: index 3 is out of bounds for axis 1 with size 3
assertRaises(IndexError, _test_gather_nd_exception, data, indices2)
# IndexError: index -5 is out of bounds for axis 1 with size 3
# check if the negative indices are wrapped correctly
indices1 = mx.nd.array([[0, 1, -1], [0, 1, -2]])
indices2 = mx.nd.array([[0, 1, 1], [0, 1, 1]])
data1 = mx.nd.gather_nd(data, indices1)
data2 = mx.nd.gather_nd(data, indices2)
assert_almost_equal(data1, data2, rtol=1e-5, atol=1e-5)
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@pytest.mark.seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0],
'power': [lambda x, y: mx.sym.power(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@pytest.mark.serial
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
with mx.np_shape():
var1 = mx.sym.var(name="data", shape=(-1, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3))
var1 = mx.sym.var(name='data', shape=(10, -1))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1))
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@mx.use_np_shape
def test_zero_size_min_max():
def min():
a = mx.nd.zeros(shape=(5, 0))
a.min()
def max():
a = mx.nd.zeros(shape=(5, 0))
a.max()
pytest.raises(MXNetError, min)
pytest.raises(MXNetError, max)
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@pytest.mark.serial
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputWidth - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def py_bilinear_resize_backward(x, incoming_grads, mode='size'):
data1 = np.zeros_like(x)
data2 = incoming_grads
batchsize = data1.shape[0]
channels = data1.shape[1]
height1 = data1.shape[2]
width1 = data1.shape[3]
height2 = data2.shape[2]
width2 = data2.shape[3]
rheight = float(height1 - 1) / (height2 - 1) if (height2 > 1) else 0
rwidth = float(width1 - 1) / (width2 - 1) if (width2 > 1) else 0
# special case: just copy
if height1 == height2 and width1 == width2:
data1 += data2
return [data1]
for h2 in range(0, height2):
for w2 in range(0, width2):
h1r = rheight * h2
h1 = int(h1r)
h1p = 1 if (h1 < height1 - 1) else 0
h1lambda = h1r - h1
h0lambda = 1 - h1lambda
#
w1r = rwidth * w2
w1 = int(w1r)
w1p = 1 if (w1 < width1 - 1) else 0
w1lambda = w1r - w1
w0lambda = 1 - w1lambda
#
for n in range(0, batchsize):
for c in range(0, channels):
d2val = data2[n][c][h2][w2]
data1[n][c][h1][w1] += h0lambda * w0lambda * d2val
data1[n][c][h1][w1 + w1p] += h0lambda * w1lambda * d2val
data1[n][c][h1 + h1p][w1] += h1lambda * w0lambda * d2val
data1[n][c][h1 + h1p][w1 + w1p] += h1lambda * w1lambda * d2val
if mode == 'like':
return data1, np.zeros_like(incoming_grads)
return [data1]
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y, py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
def check_bilinear_resize_align_corners_op():
img_shape = [1, 1, 3, 2]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
# align_corners = False
expected_data[0] = [
64.000, 56.000, 40.000, 32.000, 56.000, 52.000, 44.000, 40.000, 40.000, 44.000, 52.000, 56.000,
36.500, 45.625, 63.875, 73.000, 45.500, 56.875, 79.625, 91.000, 50.000, 62.500, 87.500, 100.000
]
# align_corners = True
expected_data[1] = [
64.000, 53.333, 42.667, 32.000, 51.200, 49.067, 46.933, 44.800, 38.400, 44.800, 51.200, 57.600,
35.600, 47.467, 59.333, 71.200, 42.800, 57.067, 71.333, 85.600, 50.000, 66.667, 83.333, 100.000
]
x = np.array(data, dtype=np.float32).reshape(img_shape)
x_nd = mx.nd.array(x)
y0 = np.array(expected_data[0]).reshape((1, 1, target_height, target_width))
y0_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=False)
assert_almost_equal(y0, y0_nd.asnumpy(), atol=1e-3)
y1 = np.array(expected_data[1]).reshape((1, 1, target_height, target_width))
y1_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=True)
assert_almost_equal(y1, y1_nd.asnumpy(), atol=1e-3)
def check_bilinear_resize_modes_op(shape, scale_height=None, scale_width=None, shape_1=None, mode=None):
x = mx.nd.random.uniform(shape=shape)
original_h = shape[2]
original_w = shape[3]
if mode == 'odd_scale':
assert scale_height is not None and scale_width is not None
new_h = int(original_h * scale_height) if (original_h % 2) == 0 else \
int((original_h - 1) * scale_height) + 1
new_w = int(original_w * scale_width) if (original_w % 2) == 0 \
else int((original_w - 1) * scale_width) + 1
y = mx.nd.contrib.BilinearResize2D(x, scale_height=scale_height,
scale_width=scale_width,
mode='odd_scale')
elif mode == 'to_even_down':
new_h = original_h if (original_h % 2) == 0 else original_h - 1
new_w = original_w if (original_w % 2) == 0 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_down')
elif mode == 'to_even_up':
new_h = original_h if (original_h % 2) == 0 else original_h + 1
new_w = original_w if (original_w % 2) == 0 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_up')
elif mode == 'to_odd_down':
new_h = original_h if (original_h % 2) == 1 else original_h - 1
new_w = original_w if (original_w % 2) == 1 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_down')
elif mode == 'to_odd_up':
new_h = original_h if (original_h % 2) == 1 else original_h + 1
new_w = original_w if (original_w % 2) == 1 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_up')
elif mode == 'like':
x_1 = mx.nd.random.uniform(shape=shape_1)
new_h = x_1.shape[2]
new_w = x_1.shape[3]
y = mx.nd.contrib.BilinearResize2D(x, x_1, mode='like')
new_shape_desired = np.array([shape[0], shape[1], new_h, new_w], dtype='int')
new_shape_got = np.array(y.shape, dtype='int')
data_sym = mx.sym.var('data')
data_np = x.asnumpy()
expected = py_bilinear_resize(data_np, new_h, new_w)
out_grads = np.ones([shape[0], shape[1], new_h, new_w])
expected_backward = py_bilinear_resize_backward(data_np, out_grads, mode)
assert_array_equal(new_shape_desired, new_shape_got, "Desired and got shapes are not equal. {} vs {}".format(
str(new_shape_desired.tolist()), str(new_shape_got.tolist())))
assert_almost_equal(y.asnumpy(), expected, 1e-3, 0)
if mode != 'like':
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, None, scale_height=scale_height, scale_width=scale_width, mode=mode)
check_symbolic_forward(resize_sym, [data_np], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np], rtol=1e-2, atol=1e-4)
else:
data_sym_like = mx.sym.var('data_like')
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, data_sym_like, mode=mode)
date_np_like = x_1.asnumpy()
check_symbolic_forward(resize_sym, [data_np, date_np_like], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np, date_np_like], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np, date_np_like], rtol=1e-2, atol=1e-4)
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
shape = (2, 2, 20, 20)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape = (2, 2, 21, 21)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape_0 = (2, 2, 21, 21)
shape_1 = (2, 2, 10, 10)
check_bilinear_resize_modes_op(shape_0, shape_1=shape_1, mode='like')
check_bilinear_resize_modes_op(shape_1, shape_1=shape_0, mode='like')
check_bilinear_resize_align_corners_op()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
tol = 1e-2 if dtype is np.float16 else 1e-5
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output, expected, rtol=tol, atol=tol)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected], rtol=tol, atol=tol)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected], rtol=tol, atol=tol)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
def allclose_function(contexts):
def getRandom(base, percent = 1.):
return base * (1 + percent * (2 * np.random.random_sample() - 1.) / 100)
title = 'exp'
for ctx in contexts:
title += ' cpu' if ctx == mx.cpu() else ' gpu'
title += ' nElem shape'
num_ctx = len(contexts)
result = [False, False]
for dtype in [np.float16, np.float32, np.float64]:
rtol = getRandom(1e-2 if dtype is np.float16 else 1e-5)
atol = getRandom(1e-4 if dtype is np.float16 else 1e-7)
print('\nnumpy.{}: atol = {} rtol = {}'.format(dtype.__name__, atol, rtol))
print(title)
for ndim in range(1, 10):
shape = rand_shape_nd(ndim, 8)
a_np = np.random.randn(*shape).astype(dtype)
b_np = (a_np + np.random.randn(*shape).astype(dtype) / 10000000).astype(dtype)
expected = np.allclose(a_np, b_np, rtol, atol)
for n, ctx in enumerate(contexts):
a_ctx = mx.nd.array(a_np, dtype = dtype, ctx=ctx)
b_ctx = mx.nd.array(b_np, dtype = dtype, ctx=ctx)
output = mx.nd.contrib.allclose(a_ctx, b_ctx, rtol=rtol, atol=atol)
result[n] = output.asnumpy() == 1
if expected != result[n]:
# Preparing the output of elements of the array, which are considered as "not close" AND
# corresponding elements of comparison CPU/GPU/Python vectors, which are considered as "close"
v_ctx = 'CPU' if ctx == mx.cpu() else 'GPU'
if expected:
v_cmp = 'Python'
a_b = a_ctx.asnumpy()
b_b = b_ctx.asnumpy()
a_g = np.asarray(a_np)
b_g = np.asarray(b_np)
else:
v_cmp = v_ctx
v_ctx = 'Python'
a_b = np.asarray(a_np)
b_b = np.asarray(b_np)
a_g = a_ctx.asnumpy()
b_g = b_ctx.asnumpy()
print('\n *** Violations found on %s, but not on %s side ***' % (v_ctx, v_cmp))
frmt = " a[{0:d}]: b[{0:d}]:" \
" abs(a[{0:d}]-b[{0:d}]) - atol + rtol*abs(b[{0:d}]):"
# Define the indices of all violations and corresponding values of coordinates
bad_indexes = np.abs(a_b - b_b) >= atol + rtol * abs(b_b)
a_values = [a_b[bad_indexes], a_g[bad_indexes]]
b_values = [b_b[bad_indexes], b_g[bad_indexes]]
idx = np.asarray(np.where(bad_indexes == True))
idx = idx.reshape(1, idx.size)
idx_flat = np.asarray(np.where(bad_indexes.flatten() == True)).flatten()
for i in range(len(a_values[0])):
flat_idx = idx_flat[i]
print('{}: index = {} flat_index = {}'.format('%4d'%i, idx[i], flat_idx))
print(frmt.format(flat_idx))
for j in range(2):
diff = np.abs(a_values[j][i]-b_values[j][i]) - atol + rtol*abs(b_values[j][i])
print('{}: {} {} {}'.format('%6s'%v_ctx, a_values[j][i], b_values[j][i], diff))
if num_ctx == 1:
print(' {0:d} {1:d} {2:10d} {3:}'.format(expected, result[0], np.prod(shape), shape))
else:
print(' {0:d} {1:d} {2:d} {3:10d} {4:}'.format(expected, result[0], result[1], np.prod(shape), shape))
if expected != result[0] or num_ctx > 1 and expected != result[1]:
assert False
@pytest.mark.serial
def test_allclose_function():
allclose_function([default_context()])
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1, np_bins1)
assert_almost_equal(mx_histo1, np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2, np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2, np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1._bind(ctx=default_context(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2._bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
@pytest.mark.skip(reason="test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915")
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@pytest.mark.serial
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
def test_unravel_index():
unravel_shape = (2, 10)
unravel_size = np.prod(unravel_shape)
for shape in [(10,), (2, 10), (3, 4, 5)]:
a = np.random.randint(0, unravel_size, size=shape)
b = np.stack(np.unravel_index(a, shape=unravel_shape), 0)
a_mx = mx.nd.array(a)
b_mx = mx.nd.unravel_index(a_mx, shape=unravel_shape)
assert_array_equal(b, b_mx.asnumpy())
def test_context_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
@pytest.mark.serial
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_context()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output, real_output, atol=1e-3)
assert_almost_equal(data.grad, dx, atol=1e-3)
assert_almost_equal(rois.grad, drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_context()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
def test_op_rroi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0)
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
return val
def rroialign_forward(data, rois, pooled_size, spatial_scale, sampling_ratio):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 6,\
ValueError(
'The length of the axis 1 of rois should be 6 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
out = np.zeros((R, C, PH, PW), dtype=T)
for r in range(R):
batch_ind = int(rois[r, 0])
roi_center_w, roi_center_h, roi_w, roi_h = rois[r, 1:5] * T(spatial_scale)
roi_theta = T(rois[r,5] * np.pi / 180.0)
roi_w = T(max(roi_w, 1.0))
roi_h = T(max(roi_h, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
roi_start_h = T(-roi_h / 2.0)
roi_start_w = T(-roi_w / 2.0)
for c in range(C):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
for iy in range(roi_bin_grid_h):
yy = roi_start_h + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
xx = roi_start_w + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
x = xx * np.cos(roi_theta, dtype=T) + yy * np.sin(roi_theta, dtype=T) + roi_center_w
y = yy * np.cos(roi_theta, dtype=T) - xx * np.sin(roi_theta, dtype=T) + roi_center_h
v = bilinear_interpolate(
bdata[c], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out
def test_rroi_align_value(sampling_ratio=-1):
ctx = default_context()
if ctx.device_type == 'gpu':
print('skipped testing rroi align for gpu since it is not supported yet')
return
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
theta = mx.nd.random.uniform(0, 180, (R,1), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy, wh, theta, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
output = mx.nd.contrib.RROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sampling_ratio=sampling_ratio)
real_output = rroialign_forward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio)
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
test_rroi_align_value()
test_rroi_align_value(sampling_ratio=2)
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
for k in [0, 1, -1, np.random.randint(-min(h,w) + 1, min(h,w))]:
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r, np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r, np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@pytest.mark.serial
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@pytest.mark.serial
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
def test_moments():
dim = random.randint(2, 5)
shape = rand_shape_nd(dim, dim=5)
axes = [i for i in range(dim)]
test_dims = random.sample(axes, random.randint(1, dim))
test_axes = tuple(sorted(test_dims))
np_a = np.random.uniform(-1.0, 1.0, shape)
a = mx.nd.array(np_a)
for keepdims in [True, False]:
eps = 1e-3
np_a[abs(np_a) < eps] = 2 * eps
np_mean = np.mean(np_a, axis=test_axes, keepdims=keepdims)
np_var = np.var(np_a, axis=test_axes, keepdims=keepdims)
mx_mean, mx_var = mx.nd.moments(a, keepdims=keepdims, axes=test_axes)
N = np_a.size / np_mean.size
mx_sym = mx.sym.Variable("data")
mx_moments = mx.sym.moments(mx_sym, axes=test_axes, keepdims=keepdims)
mx_test_sym = mx.sym.elemwise_add(mx_moments[0], mx_moments[1])
if len(np_mean.shape) == 0:
np_mean = np_mean.reshape(mx_mean.shape)
np_var = np_var.reshape(mx_var.shape)
assert np_mean.shape == mx_mean.shape
assert np_var.shape == mx_var.shape
check_symbolic_forward(mx_test_sym, [np_a], [np_mean + np_var], rtol=1e-3, atol=1e-5)
check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4)
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@pytest.mark.serial
def test_image_normalize():
# Part 1 - Test 3D input with 3D mean/std
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D input with 3D mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
# Part 3 - Test 3D input with scalar mean/std
shape_3d = (3, 28, 28)
mean = 1.0
std = 2.0
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][:] = (data_expected_3d[:][:][:] - 1.0) / 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 4 - Test 4D input with scalar mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[:][:][:][:] = (data_expected_4d[:][:][:][:] - 1.0) / 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[:][:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
@pytest.mark.serial
def test_index_array():
def test_index_array_default():
for shape in [(10,), (7, 5, 29), (5, 7, 11, 13, 17, 19)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_dim():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(())
expected = np.zeros((0,))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones((0, 0, 0))
expected = np.zeros((0, 0, 0, 3))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
def test_index_array_select_axes():
shape = (5, 7, 11, 13, 17, 19)
for axes in [(3,), (4, 1), (5, 1, 3), (-1,), (-5, -1, -3)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=axes)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)[..., axes]
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_select_axes_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=(2, 1))
input_array = np.ones((0, 0, 0, 0))
expected = np.zeros((0, 0, 2))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
test_index_array_default()
test_index_array_default_zero_dim()
test_index_array_default_zero_size()
test_index_array_select_axes()
test_index_array_select_axes_zero_size()
def test_scalar_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=())
assertRaises(MXNetError, mx.nd.ones, shape=())
with mx.np_shape():
data_mx = mx.nd.ones(shape=())
data_np = np.ones((), dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
def test_zero_size_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0))
assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0))
with mx.np_shape():
data_mx = mx.nd.ones(shape=(0, 1, 0, 4))
data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
def test_concat_with_zero_size_tensor():
with mx.np_shape():
data1 = mx.nd.ones((0, 8, 12))
data2 = mx.nd.ones((3, 8, 12))
data3 = mx.nd.ones((0, 8, 12))
ret = mx.nd.Concat(data1, data2, data3, dim=0)
assert ret.shape == (3, 8, 12)
data1 = mx.nd.ones((0, 3, 10))
data2 = mx.nd.ones((0, 4, 10))
data3 = mx.nd.ones((0, 5, 10))
ret = mx.nd.Concat(data1, data2, data3, dim=1)
assert ret.shape == (0, 12, 10)
def test_np_shape_decorator():
@mx.use_np_shape
def check_scalar_one():
"""Generate scalar one tensor"""
return mx.nd.ones(shape=())
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
for active in [True, False]:
with mx.np_shape(active=active):
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
@mx.use_np_shape
def check_concat(shape1, shape2, axis):
data1 = mx.nd.ones(shape1)
data2 = mx.nd.ones(shape2)
ret = mx.nd.Concat(data1, data2, dim=axis)
expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis)
assert ret.shape == expected_ret.shape
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
for active in [True, False]:
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
def test_add_n():
data_shape = (2, 2)
input_num = 5
data = [mx.nd.random.uniform(shape=data_shape) for i in range(input_num)]
rslt = mx.nd.zeros(shape=data_shape)
for i in range(input_num):
rslt += data[i]
add_n_rslt = mx.nd.add_n(*data, out=data[0])
assert_almost_equal(rslt.asnumpy(), add_n_rslt.asnumpy(), atol=1e-5)
def test_get_all_registered_operators():
ops = get_all_registered_operators()
assert isinstance(ops, list)
assert len(ops) > 0
assert 'Activation' in ops
def test_get_operator_arguments():
operator_arguments = get_operator_arguments('Activation')
assert isinstance(operator_arguments, OperatorArguments)
assert operator_arguments.names == ['data', 'act_type']
assert operator_arguments.types \
== ['NDArray-or-Symbol', "{'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"]
assert operator_arguments.narg == 2
def test_transpose_infer_shape_back():
o1 = mx.sym.ones(shape=[2,3])
o2 = mx.sym.ones(shape=[-1,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b._bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_transpose_infer_shape_mixed():
o1 = mx.sym.ones(shape=[2,-1])
o2 = mx.sym.ones(shape=[3,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b._bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_sample_normal_default_shape():
# Test case from https://github.com/apache/incubator-mxnet/issues/16135
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]))
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=())
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=1)
assert s.shape == (1, 1)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=(1,))
assert s.shape == (1, 1)
def test_large_tensor_disabled_err_msg():
LARGE_X = 4300000000
MEDIUM_X = 1000000000
SMALL_Y = 1
shape = (2, LARGE_X)
def check_nd_array():
x = np.arange(0, LARGE_X)
assertRaises(MXNetError, mx.nd.array, x)
def check_nd_ones():
assertRaises(MXNetError, mx.nd.ones, shape)
def check_nd_zeros():
assertRaises(MXNetError, mx.nd.zeros, shape)
def check_nd_full():
val = 1
assertRaises(Exception, mx.nd.full, shape, val)
def check_nd_arange():
start = 0
stop = LARGE_X
assertRaises(Exception, mx.nd.arange, start, stop)
def check_nd_random():
shape = (2, LARGE_X)
def check_random_exp():
lam = 4
assertRaises(MXNetError, mx.nd.random_exponential, lam, shape)
def check_random_gamma():
alpha = 9
beta = 0.5
assertRaises(MXNetError, mx.nd.random_gamma, alpha, beta, shape)
def check_random_normal():
loc = 0
scale = 1
assertRaises(MXNetError, mx.nd.random_normal, loc, scale, shape)
def check_random_poisson():
lam = 4
assertRaises(MXNetError, mx.nd.random_poisson, alpha, lam, shape)
def check_random_randint():
low = 0
high = 1000000
assertRaises(MXNetError, mx.nd.random_randint, low, high, shape)
def check_random_uniform():
low = 0
hight = 1
assertRaises(MXNetError, mx.nd.random_uniform, alpha, beta, shape)
def check_multihead_attention_selfatt(dtype):
def convert_weight(F, q_weight, k_weight, v_weight, num_heads):
q_weight = F.reshape(q_weight, shape=(num_heads, -1, 0), reverse=True)
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(q_weight, k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, q_bias, k_bias, v_bias, num_heads):
q_bias = F.reshape(q_bias, shape=(num_heads, -1))
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(q_bias, k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'qkv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
qkv_weight = convert_weight(mx.sym, q_weight, k_weight, v_weight, num_heads)
qkv_bias = convert_bias(mx.sym, q_bias, k_bias, v_bias, num_heads)
qkv = mx.sym.transpose(qkv, axes=(1, 0, 2))
qkv_proj = mx.sym.FullyConnected(qkv, weight=qkv_weight, bias=qkv_bias, flatten=False,
num_hidden=qkv_units * 3, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_selfatt_qk(
qkv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_selfatt_valatt(
qkv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
type_dict={'qkv': dtype,
'q_weight': dtype,
'k_weight': dtype,
'v_weight': dtype,
'q_bias': dtype,
'k_bias': dtype,
'v_bias': dtype,
'sonde': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(qkv, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(qkv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(qkv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
type_dict={'qkv': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@assert_raises_cuda_not_satisfied(min_version='9.1')
@pytest.mark.serial
def test_multihead_attention_selfatt():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_selfatt(dtype=dtype)
def check_multihead_attention_encdec(dtype):
def convert_weight(F, k_weight, v_weight, num_heads):
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, k_bias, v_bias, num_heads):
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'q': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'kv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
kv_weight = convert_weight(mx.sym, k_weight, v_weight, num_heads)
kv_bias = convert_bias(mx.sym, k_bias, v_bias, num_heads)
kv = mx.sym.transpose(kv, axes=(1, 0, 2))
kv_proj = mx.sym.FullyConnected(kv, weight=kv_weight, bias=kv_bias, flatten=False,
num_hidden=qkv_units * 2, no_bias=False)
q = mx.sym.transpose(q, axes=(1, 0, 2))
q_proj = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_encdec_qk(
q_proj, kv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_encdec_valatt(
kv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
out_weight=(out_dim, qkv_units),
out_bias=(out_dim,),
type_dict={'q': dtype,
'kv': dtype,
'q_weight': dtype,
'q_bias': dtype,
'k_weight': dtype,
'k_bias': dtype,
'v_weight': dtype,
'v_bias': dtype,
'out_weight': dtype,
'out_bias': dtype,
},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(kv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(kv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output._simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
type_dict={'q': dtype,
'kv': dtype},
grad_req='write')
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@assert_raises_cuda_not_satisfied(min_version='9.1')
@pytest.mark.serial
def test_multihead_attention_encdec():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_encdec(dtype=dtype)
@pytest.mark.serial
def test_im2col_col2im():
def compute_output_size(spatial, kernel, stride=1, dilate=1, pad=0):
pad_size = spatial + 2 * pad
dilated_kernel = dilate * (kernel - 1) + 1
return (pad_size - dilated_kernel) // stride + 1
def build_kwargs(kernel, stride=1, dilate=1, pad=0):
return {'kernel': (kernel, kernel),
'stride': (stride, stride),
'dilate': (dilate, dilate),
'pad': (pad, pad)}
# use im2col to compute convolution
def test_conv_compute(input_shape, num_filter, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
w = mx.nd.uniform(shape=(num_filter, channel, kernel, kernel))
c1 = mx.nd.dot(col.transpose((0, 2, 1)), w.reshape(num_filter, -1).T).transpose((0, 2, 1))
hos = compute_output_size(input_shape[2], kernel, stride, dilate, pad)
wos = compute_output_size(input_shape[3], kernel, stride, dilate, pad)
c1 = c1.reshape((batch_size, num_filter, hos, wos))
c2 = mx.nd.Convolution(data, num_filter=num_filter, weight=w, no_bias=True, **kwargs)
assert_almost_equal(c1.asnumpy(), c2.asnumpy(), rtol=1e-5, atol=1e-5)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# use composite of im2col and col2im to reconstruct image
def test_reconstruct(input_shape, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
im1 = mx.nd.col2im(col, input_shape[2:], **kwargs)
im2 = mx.nd.col2im(mx.nd.ones_like(col), input_shape[2:], **kwargs) * data
assert_almost_equal(im1.asnumpy(), im2.asnumpy(), rtol=1e-5, atol=1e-5)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# test gradient
# the grad of im2col is col2im, and vice versa
def test_grad(input_shape, kernel, stride=1, dilate=1, pad=0):
# im2col
data = mx.sym.Variable('data')
kwargs = build_kwargs(kernel, stride, dilate, pad)
sym = mx.sym.im2col(data, **kwargs)
im = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(im, **kwargs)
col_shape = col.shape
expected = mx.nd.col2im(col, input_shape[2:], **kwargs)
check_symbolic_backward(sym, [im.asnumpy()], [col.asnumpy()], [expected.asnumpy()])
# col2im
data = mx.sym.Variable('data')
sym = mx.sym.col2im(data, input_shape[2:], **kwargs)
col = mx.nd.uniform(shape=col_shape)
im = mx.nd.col2im(col, input_shape[2:], **kwargs)
expected = mx.nd.im2col(im, **kwargs)
check_symbolic_backward(sym, [col.asnumpy()], [im.asnumpy()], [expected.asnumpy()])
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
def test_elemwise_sum_for_gradient_accumulation():
for nrepeat in range(1, 10):
stored_grad = dict()
for grad_req in ['write', 'add']:
a = mx.nd.array([1])
b = mx.nd.array([2])
if grad_req == 'write':
a.attach_grad(grad_req='write')
elif grad_req == 'add':
a.attach_grad(grad_req='add')
a.grad[:] = 0
with mx.autograd.record():
for _ in range(nrepeat):
b = b * a
b.backward()
stored_grad[grad_req] = a.grad.asscalar()
assert stored_grad['write'] == stored_grad['add']
assert stored_grad['write'] == 2 * nrepeat
def test_elementwise_ops_on_misaligned_input():
a = mx.nd.array([1,2,3,4], dtype='float16')
b = mx.nd.array([1,2,3,4], dtype='float16')
c = a[1:3]
d = b[1:3]
# Note: testing just elemwise_add since all elemwise_ops
# share the implementation
mx.nd.elemwise_add(c, d, out=c)
mx.nd.waitall()
a = mx.nd.array([1,2,3,4], dtype='float16')
b = mx.nd.array([1,2,3,4], dtype='float16')
c = a[0:3]
d = b[0:3]
mx.nd.elemwise_add(c, d, out=c)
mx.nd.waitall()
assert a[3].asscalar() == 4.0
@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64'])
@pytest.mark.parametrize('lead_dim', [2, 3, 4, 6, 10])
@pytest.mark.parametrize('both_ways', [False, True])
def test_broadcast_ops_on_misaligned_input(dtype, lead_dim, both_ways):
shape = list(rand_shape_2d()) + [lead_dim]
small_shape = [shape[0], 1, lead_dim]
if both_ways:
# Broadcast in both ways [1, K, L] x [M, 1, L]
big_shape = [1, shape[1], lead_dim]
else:
big_shape = shape
size = np.product(shape)
small_size = np.product(small_shape)
big_size = np.product(big_shape)
a = mx.nd.arange(5000)
b = mx.nd.arange(5000)
e = mx.nd.arange(5000)
c = a[1:big_size + 1].reshape(big_shape)
d = b[1:small_size + 1].reshape(small_shape)
f = e[1:size + 1].reshape(shape)
mx.nd.broadcast_add(c, d, out=f)
expected = c.asnumpy() + d.asnumpy()
mx.nd.waitall()
assert_almost_equal(f, expected)
@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64'])
@pytest.mark.parametrize('lead_dim', [2, 3, 4, 6, 10])
@pytest.mark.parametrize('both_ways', [False, True])
def test_broadcast_ops_on_misaligned_input_oneside(dtype, lead_dim, both_ways):
shape = list(rand_shape_2d()) + [lead_dim]
small_shape = [shape[0], shape[1], 1]
if both_ways:
# Broadcast in both ways [1, K, L] x [M, 1, 1]
big_shape = [1, shape[1], lead_dim]
else:
big_shape = shape
size = np.product(shape)
small_size = np.product(small_shape)
big_size = np.product(big_shape)
a = mx.nd.arange(5000)
b = mx.nd.arange(5000)
e = mx.nd.arange(5000)
c = a[1:big_size + 1].reshape(big_shape)
d = b[1:small_size + 1].reshape(small_shape)
f = e[1:size + 1].reshape(shape)
mx.nd.broadcast_add(c, d, out=f)
expected = c.asnumpy() + d.asnumpy()
mx.nd.waitall()
assert_almost_equal(f, expected)
|
yajiedesign/mxnet
|
tests/python/unittest/test_operator.py
|
Python
|
apache-2.0
| 407,016
|
__author__ = 'Keiran'
from model.contact import Contact
import pytest
def test_contact_compare(app, orm):
with pytest.allure.step('Given a sorted contact list from DB'):
contacts_from_db = orm.get_contact_list()
sorted_contacts_from_db = list(sorted(contacts_from_db, key=Contact.id_or_max))
with pytest.allure.step('Given a sorted contact list from home page'):
contacts_from_home_page = app.contact.get_contact_list()
sorted_contacts_from_home_page = list(sorted(contacts_from_home_page, key=Contact.id_or_max))
with pytest.allure.step('Then I compare this lists'):
for index in range(len(sorted_contacts_from_db)):
assert sorted_contacts_from_db[index] == sorted_contacts_from_home_page[index]
assert sorted_contacts_from_db[index].join_mails() == sorted_contacts_from_home_page[index].all_mails
assert sorted_contacts_from_db[index].join_phones() == sorted_contacts_from_home_page[index].all_phones
|
IKeiran/FPT-Sinyakov
|
test/test_contact_compare.py
|
Python
|
apache-2.0
| 993
|
import os
import time
import json
import pprint
from util import hook
def readConfig():
### Read config json and parse it
confJson = None
with open(os.getcwd() + '/antiFloodBotConfig.json', 'r') as confFile:
confJson = confFile.read()
return json.loads(confJson)
inputs = {} #store time (unixtimestamp in sec) of every entry sent by user in map where key is user nickname
kicked = [] #store nicknames of kicked users
conf = readConfig()
timeIntervalScope = conf['timeIntervalScope'] # interval when entries are collected [sec]
entryThreshold = conf['entryThreshold'] #how many entries are allowed in timeIntervalScope
logFile = conf['logFile']
@hook.event('PRIVMSG')
def antiFlood(inp, nick=None, msg=None, conn=None, chan=None):
if (nick not in inputs):
inputs[nick] = []
currentTime = time.time()
timeThreshold = currentTime - timeIntervalScope
inputs[nick].append(currentTime)
inputs[nick] = filter(lambda x: x > timeThreshold, inputs[nick]) #filter out every entry older than 8 sec (threshold)
if len(inputs[nick]) >= entryThreshold: #if user has good day, kick one
explanationMessage = conf['kickMessage']
file = open(logFile, 'a')
file.write('Trying to kick %s on channel %s \n' % (nick, chan))
if nick in kicked:
explanationMessage = conf['banMessage']
out = "MODE %s +b %s" % (chan, nick)
conn.send(out)
file.write('%s is kicked with ban \n' % (nick))
out = "KICK %s %s : %s" % (chan, nick, explanationMessage)
conn.send(out)
kicked.append(nick)
file.close()
#todo
#if the same user joins again within 24 hour and keeps spamming temp ban in XX time.
#step 3) if the same user joins after the removal of the ban and spams, permanent ban.
@hook.event('PRIVMSG')
def paramDump(inp, nick=None, msg=None, conn=None, chan=None):
def saveToFile(file, label, obj):
file.write("===== " + label + " ======== \n")
file.write("type " + str(type (obj)) + " ========\n")
file.write("methods " + str(dir(obj)) + " ========\n")
file.write("properties ========\n")
pprint.pprint(obj, file)
file.write("\n\n\n")
file = open(logFile, 'a')
saveToFile(file, "inp", inp)
saveToFile(file, "nick", nick)
saveToFile(file, "msg", msg)
saveToFile(file, "chan", chan)
saveToFile(file, "conn", conn)
file.close()
@hook.event("004")
def onConnect(param, conn=None, raw=None):
conn.send("Antiflod bot is ready")
|
mrok/ircAntiFloodBot
|
src/antiFloodBot.py
|
Python
|
apache-2.0
| 2,562
|
# Copyright (c) 2013 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import threading
from huxley.consts import TestRunModes
from huxley.errors import TestError
from huxley.images import images_identical, image_diff
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
# Since we want consistent focus screenshots we steal focus
# when taking screenshots. To avoid races we lock during this
# process.
SCREENSHOT_LOCK = threading.RLock()
class TestStep(object):
def __init__(self, offset_time):
self.offset_time = offset_time
def execute(self, run):
raise NotImplementedError
class ClickTestStep(TestStep):
CLICK_ID = '_huxleyClick'
def __init__(self, offset_time, pos):
super(ClickTestStep, self).__init__(offset_time)
self.pos = pos
def execute(self, run):
print ' Clicking', self.pos
if run.d.name == 'phantomjs':
# PhantomJS 1.x does not support 'click()' so use Selenium
body = run.d.find_element_by_tag_name('body')
ActionChains(run.d).move_to_element_with_offset(body, self.pos[0], self.pos[1]).click().perform()
elif run.d.name == 'Safari':
el = run.d.execute_script('return document.elementFromPoint(%d, %d);' % (self.pos[0], self.pos[1]))
if el:
el.click()
else:
print ' warning, no element found at (%d, %d);' % (self.pos[0], self.pos[1])
else:
# Work around multiple bugs in WebDriver's implementation of click()
run.d.execute_script(
'document.elementFromPoint(%d, %d).click();' % (self.pos[0], self.pos[1])
)
run.d.execute_script(
'document.elementFromPoint(%d, %d).focus();' % (self.pos[0], self.pos[1])
)
class ScrollTestStep(TestStep):
SCROLL_OFFSET_ID = '_huxleyScroll'
def __init__(self, offset_time, pos):
super(ScrollTestStep, self).__init__(offset_time)
self.pos = pos
def execute(self, run):
print ' Scrolling', self.pos
run.d.execute_script(
'window.scrollTo(%d, %d);' % (self.pos[0], self.pos[1])
)
class KeyTestStep(TestStep):
KEYS_BY_JS_KEYCODE = {
33: Keys.PAGE_UP,
34: Keys.PAGE_DOWN,
35: Keys.END,
36: Keys.HOME,
37: Keys.LEFT,
38: Keys.UP,
39: Keys.RIGHT,
40: Keys.DOWN,
46: Keys.DELETE,
186: ";",
187: "=",
188: ",",
190: ".",
191: "/",
192: "`",
219: "[",
220: "\\",
221: "]",
222: "'",
}
KEYS_BY_JS_KEYCODE_SHIFT = dict(KEYS_BY_JS_KEYCODE.items() + {
48: ")",
49: "!",
50: "@",
51: "#",
52: "$",
53: "%",
54: "^",
55: "&",
56: "*",
57: "(",
186: ":",
187: "+",
188: "<",
190: ">",
191: "?",
192: "~",
219: "{",
220: "|",
221: "}",
222: "\"",
}.items())
KEY_ID = '_huxleyKey'
# param is [keyCode, shiftKey]
def __init__(self, offset_time, param):
super(KeyTestStep, self).__init__(offset_time)
# backwards compat. for old records where a string was saved
if isinstance(param, basestring):
self.key = param
else:
codes = self.KEYS_BY_JS_KEYCODE_SHIFT if param[1] else self.KEYS_BY_JS_KEYCODE
char = chr(param[0])
if not param[1]:
char = char.lower()
self.key = codes.get(param[0], char)
def execute(self, run):
if self.key == Keys.HOME:
print ' Scrolling to top'
run.d.execute_script('window.scrollTo(0, 0)')
elif self.key == Keys.END:
print ' Scrolling to bottom'
run.d.execute_script('window.scrollTo(0, document.body.clientHeight)')
else:
print ' Typing', self.key
id = run.d.execute_script('return document.activeElement.id;')
if id is None or id == '':
run.d.execute_script(
'document.activeElement.id = %r;' % self.KEY_ID
)
id = self.KEY_ID
run.d.find_element_by_id(id).send_keys(self.key)
class ScreenshotTestStep(TestStep):
def __init__(self, offset_time, run, index):
super(ScreenshotTestStep, self).__init__(offset_time)
self.index = index
def get_path(self, run):
return os.path.join(run.path, 'screenshot' + str(self.index) + '.png')
def execute(self, run):
print ' Taking screenshot', self.index
original = self.get_path(run)
new = os.path.join(run.path, 'last.png')
with SCREENSHOT_LOCK:
# Steal focus for a consistent screenshot
run.d.switch_to_window(run.d.window_handles[0])
# iOS insertion points are visible in screenshots
if run.d.name == 'Safari':
active = run.d.execute_script('a = document.activeElement; a.blur(); return a;')
if run.mode == TestRunModes.RERECORD:
run.d.save_screenshot(original)
else:
run.d.save_screenshot(new)
try:
if not images_identical(original, new, run.test.mask):
if run.save_diff:
diffpath = os.path.join(run.path, 'diff.png')
diff = image_diff(original, new, diffpath, run.diffcolor, run.test.mask)
raise TestError(
('Screenshot %s was different; compare %s with %s. See %s ' +
'for the comparison. diff=%r') % (
self.index, original, new, diffpath, diff
)
)
else:
raise TestError('Screenshot %s was different.' % self.index)
finally:
if not run.save_diff:
os.unlink(new)
|
lyft/huxley
|
huxley/steps.py
|
Python
|
apache-2.0
| 6,736
|
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import random
import six
from tempest.api.network import base
from tempest.common.utils import data_utils
from tempest import config
from tempest.lib import exceptions as lib_exc
from tempest import test
CONF = config.CONF
class NetworksTestDHCPv6(base.BaseNetworkTest):
_ip_version = 6
""" Test DHCPv6 specific features using SLAAC, stateless and
stateful settings for subnets. Also it shall check dual-stack
functionality (IPv4 + IPv6 together).
The tests include:
generating of SLAAC EUI-64 address in subnets with various settings
receiving SLAAC addresses in combinations of various subnets
receiving stateful IPv6 addresses
addressing in subnets with router
"""
@classmethod
def skip_checks(cls):
super(NetworksTestDHCPv6, cls).skip_checks()
msg = None
if not CONF.network_feature_enabled.ipv6:
msg = "IPv6 is not enabled"
elif not CONF.network_feature_enabled.ipv6_subnet_attributes:
msg = "DHCPv6 attributes are not enabled."
if msg:
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(NetworksTestDHCPv6, cls).resource_setup()
cls.network = cls.create_network()
def _remove_from_list_by_index(self, things_list, elem):
for index, i in enumerate(things_list):
if i['id'] == elem['id']:
break
del things_list[index]
def _clean_network(self):
body = self.ports_client.list_ports()
ports = body['ports']
for port in ports:
if (port['device_owner'].startswith('network:router_interface') and
port['device_id'] in [r['id'] for r in self.routers]):
self.routers_client.remove_router_interface(port['device_id'],
port_id=port['id'])
else:
if port['id'] in [p['id'] for p in self.ports]:
self.ports_client.delete_port(port['id'])
self._remove_from_list_by_index(self.ports, port)
body = self.subnets_client.list_subnets()
subnets = body['subnets']
for subnet in subnets:
if subnet['id'] in [s['id'] for s in self.subnets]:
self.subnets_client.delete_subnet(subnet['id'])
self._remove_from_list_by_index(self.subnets, subnet)
body = self.routers_client.list_routers()
routers = body['routers']
for router in routers:
if router['id'] in [r['id'] for r in self.routers]:
self.routers_client.delete_router(router['id'])
self._remove_from_list_by_index(self.routers, router)
def _get_ips_from_subnet(self, **kwargs):
subnet = self.create_subnet(self.network, **kwargs)
port_mac = data_utils.rand_mac_address()
port = self.create_port(self.network, mac_address=port_mac)
real_ip = next(iter(port['fixed_ips']), None)['ip_address']
eui_ip = data_utils.get_ipv6_addr_by_EUI64(subnet['cidr'],
port_mac).format()
return real_ip, eui_ip
@test.idempotent_id('e5517e62-6f16-430d-a672-f80875493d4c')
def test_dhcpv6_stateless_eui64(self):
# NOTE: When subnets configured with RAs SLAAC (AOM=100) and DHCP
# stateless (AOM=110) both for radvd and dnsmasq, port shall receive
# IP address calculated from its MAC.
for ra_mode, add_mode in (
('slaac', 'slaac'),
('dhcpv6-stateless', 'dhcpv6-stateless'),
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
real_ip, eui_ip = self._get_ips_from_subnet(**kwargs)
self._clean_network()
self.assertEqual(eui_ip, real_ip,
('Real port IP is %s, but shall be %s when '
'ipv6_ra_mode=%s and ipv6_address_mode=%s') % (
real_ip, eui_ip, ra_mode, add_mode))
@test.idempotent_id('ae2f4a5d-03ff-4c42-a3b0-ce2fcb7ea832')
def test_dhcpv6_stateless_no_ra(self):
# NOTE: When subnets configured with dnsmasq SLAAC and DHCP stateless
# and there is no radvd, port shall receive IP address calculated
# from its MAC and mask of subnet.
for ra_mode, add_mode in (
(None, 'slaac'),
(None, 'dhcpv6-stateless'),
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
kwargs = dict((k, v) for k, v in six.iteritems(kwargs) if v)
real_ip, eui_ip = self._get_ips_from_subnet(**kwargs)
self._clean_network()
self.assertEqual(eui_ip, real_ip,
('Real port IP %s shall be equal to EUI-64 %s'
'when ipv6_ra_mode=%s,ipv6_address_mode=%s') % (
real_ip, eui_ip,
ra_mode if ra_mode else "Off",
add_mode if add_mode else "Off"))
@test.idempotent_id('81f18ef6-95b5-4584-9966-10d480b7496a')
def test_dhcpv6_invalid_options(self):
"""Different configurations for radvd and dnsmasq are not allowed"""
for ra_mode, add_mode in (
('dhcpv6-stateless', 'dhcpv6-stateful'),
('dhcpv6-stateless', 'slaac'),
('slaac', 'dhcpv6-stateful'),
('dhcpv6-stateful', 'dhcpv6-stateless'),
('dhcpv6-stateful', 'slaac'),
('slaac', 'dhcpv6-stateless'),
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
self.assertRaises(lib_exc.BadRequest,
self.create_subnet,
self.network,
**kwargs)
@test.idempotent_id('21635b6f-165a-4d42-bf49-7d195e47342f')
def test_dhcpv6_stateless_no_ra_no_dhcp(self):
# NOTE: If no radvd option and no dnsmasq option is configured
# port shall receive IP from fixed IPs list of subnet.
real_ip, eui_ip = self._get_ips_from_subnet()
self._clean_network()
self.assertNotEqual(eui_ip, real_ip,
('Real port IP %s equal to EUI-64 %s when '
'ipv6_ra_mode=Off and ipv6_address_mode=Off,'
'but shall be taken from fixed IPs') % (
real_ip, eui_ip))
@test.idempotent_id('4544adf7-bb5f-4bdc-b769-b3e77026cef2')
def test_dhcpv6_two_subnets(self):
# NOTE: When one IPv6 subnet configured with dnsmasq SLAAC or DHCP
# stateless and other IPv6 is with DHCP stateful, port shall receive
# EUI-64 IP addresses from first subnet and DHCP address from second
# one. Order of subnet creating should be unimportant.
for order in ("slaac_first", "dhcp_first"):
for ra_mode, add_mode in (
('slaac', 'slaac'),
('dhcpv6-stateless', 'dhcpv6-stateless'),
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
kwargs_dhcp = {'ipv6_address_mode': 'dhcpv6-stateful'}
if order == "slaac_first":
subnet_slaac = self.create_subnet(self.network, **kwargs)
subnet_dhcp = self.create_subnet(
self.network, **kwargs_dhcp)
else:
subnet_dhcp = self.create_subnet(
self.network, **kwargs_dhcp)
subnet_slaac = self.create_subnet(self.network, **kwargs)
port_mac = data_utils.rand_mac_address()
eui_ip = data_utils.get_ipv6_addr_by_EUI64(
subnet_slaac['cidr'],
port_mac
).format()
port = self.create_port(self.network, mac_address=port_mac)
real_ips = dict([(k['subnet_id'], k['ip_address'])
for k in port['fixed_ips']])
real_dhcp_ip, real_eui_ip = [real_ips[sub['id']]
for sub in [subnet_dhcp,
subnet_slaac]]
self.ports_client.delete_port(port['id'])
self.ports.pop()
body = self.ports_client.list_ports()
ports_id_list = [i['id'] for i in body['ports']]
self.assertNotIn(port['id'], ports_id_list)
self._clean_network()
self.assertEqual(real_eui_ip,
eui_ip,
'Real IP is {0}, but shall be {1}'.format(
real_eui_ip,
eui_ip))
msg = ('Real IP address is {0} and it is NOT on '
'subnet {1}'.format(real_dhcp_ip, subnet_dhcp['cidr']))
self.assertIn(netaddr.IPAddress(real_dhcp_ip),
netaddr.IPNetwork(subnet_dhcp['cidr']), msg)
@test.idempotent_id('4256c61d-c538-41ea-9147-3c450c36669e')
def test_dhcpv6_64_subnets(self):
# NOTE: When one IPv6 subnet configured with dnsmasq SLAAC or DHCP
# stateless and other IPv4 is with DHCP of IPv4, port shall receive
# EUI-64 IP addresses from first subnet and IPv4 DHCP address from
# second one. Order of subnet creating should be unimportant.
for order in ("slaac_first", "dhcp_first"):
for ra_mode, add_mode in (
('slaac', 'slaac'),
('dhcpv6-stateless', 'dhcpv6-stateless'),
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
if order == "slaac_first":
subnet_slaac = self.create_subnet(self.network, **kwargs)
subnet_dhcp = self.create_subnet(
self.network, ip_version=4)
else:
subnet_dhcp = self.create_subnet(
self.network, ip_version=4)
subnet_slaac = self.create_subnet(self.network, **kwargs)
port_mac = data_utils.rand_mac_address()
eui_ip = data_utils.get_ipv6_addr_by_EUI64(
subnet_slaac['cidr'],
port_mac
).format()
port = self.create_port(self.network, mac_address=port_mac)
real_ips = dict([(k['subnet_id'], k['ip_address'])
for k in port['fixed_ips']])
real_dhcp_ip, real_eui_ip = [real_ips[sub['id']]
for sub in [subnet_dhcp,
subnet_slaac]]
self._clean_network()
self.assertEqual(real_eui_ip,
eui_ip,
'Real IP is {0}, but shall be {1}'.format(
real_eui_ip,
eui_ip))
msg = ('Real IP address is {0} and it is NOT on '
'subnet {1}'.format(real_dhcp_ip, subnet_dhcp['cidr']))
self.assertIn(netaddr.IPAddress(real_dhcp_ip),
netaddr.IPNetwork(subnet_dhcp['cidr']), msg)
@test.idempotent_id('4ab211a0-276f-4552-9070-51e27f58fecf')
def test_dhcp_stateful(self):
# NOTE: With all options below, DHCPv6 shall allocate address from
# subnet pool to port.
for ra_mode, add_mode in (
('dhcpv6-stateful', 'dhcpv6-stateful'),
('dhcpv6-stateful', None),
(None, 'dhcpv6-stateful'),
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
kwargs = dict((k, v) for k, v in six.iteritems(kwargs) if v)
subnet = self.create_subnet(self.network, **kwargs)
port = self.create_port(self.network)
port_ip = next(iter(port['fixed_ips']), None)['ip_address']
self._clean_network()
msg = ('Real IP address is {0} and it is NOT on '
'subnet {1}'.format(port_ip, subnet['cidr']))
self.assertIn(netaddr.IPAddress(port_ip),
netaddr.IPNetwork(subnet['cidr']), msg)
@test.idempotent_id('51a5e97f-f02e-4e4e-9a17-a69811d300e3')
def test_dhcp_stateful_fixedips(self):
# NOTE: With all options below, port shall be able to get
# requested IP from fixed IP range not depending on
# DHCP stateful (not SLAAC!) settings configured.
for ra_mode, add_mode in (
('dhcpv6-stateful', 'dhcpv6-stateful'),
('dhcpv6-stateful', None),
(None, 'dhcpv6-stateful'),
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
kwargs = dict((k, v) for k, v in six.iteritems(kwargs) if v)
subnet = self.create_subnet(self.network, **kwargs)
ip_range = netaddr.IPRange(subnet["allocation_pools"][0]["start"],
subnet["allocation_pools"][0]["end"])
ip = netaddr.IPAddress(random.randrange(ip_range.first,
ip_range.last)).format()
port = self.create_port(self.network,
fixed_ips=[{'subnet_id': subnet['id'],
'ip_address': ip}])
port_ip = next(iter(port['fixed_ips']), None)['ip_address']
self._clean_network()
self.assertEqual(port_ip, ip,
("Port IP %s is not as fixed IP from "
"port create request: %s") % (
port_ip, ip))
@test.idempotent_id('98244d88-d990-4570-91d4-6b25d70d08af')
def test_dhcp_stateful_fixedips_outrange(self):
# NOTE: When port gets IP address from fixed IP range it
# shall be checked if it's from subnets range.
kwargs = {'ipv6_ra_mode': 'dhcpv6-stateful',
'ipv6_address_mode': 'dhcpv6-stateful'}
subnet = self.create_subnet(self.network, **kwargs)
ip_range = netaddr.IPRange(subnet["allocation_pools"][0]["start"],
subnet["allocation_pools"][0]["end"])
ip = netaddr.IPAddress(random.randrange(
ip_range.last + 1, ip_range.last + 10)).format()
self.assertRaises(lib_exc.BadRequest,
self.create_port,
self.network,
fixed_ips=[{'subnet_id': subnet['id'],
'ip_address': ip}])
@test.idempotent_id('57b8302b-cba9-4fbb-8835-9168df029051')
def test_dhcp_stateful_fixedips_duplicate(self):
# NOTE: When port gets IP address from fixed IP range it
# shall be checked if it's not duplicate.
kwargs = {'ipv6_ra_mode': 'dhcpv6-stateful',
'ipv6_address_mode': 'dhcpv6-stateful'}
subnet = self.create_subnet(self.network, **kwargs)
ip_range = netaddr.IPRange(subnet["allocation_pools"][0]["start"],
subnet["allocation_pools"][0]["end"])
ip = netaddr.IPAddress(random.randrange(
ip_range.first, ip_range.last)).format()
self.create_port(self.network,
fixed_ips=[
{'subnet_id': subnet['id'],
'ip_address': ip}])
self.assertRaisesRegexp(lib_exc.Conflict,
"object with that identifier already exists",
self.create_port,
self.network,
fixed_ips=[{'subnet_id': subnet['id'],
'ip_address': ip}])
def _create_subnet_router(self, kwargs):
subnet = self.create_subnet(self.network, **kwargs)
router = self.create_router(
router_name=data_utils.rand_name("routerv6-"),
admin_state_up=True)
port = self.create_router_interface(router['id'],
subnet['id'])
body = self.ports_client.show_port(port['port_id'])
return subnet, body['port']
@test.idempotent_id('e98f65db-68f4-4330-9fea-abd8c5192d4d')
def test_dhcp_stateful_router(self):
# NOTE: With all options below the router interface shall
# receive DHCPv6 IP address from allocation pool.
for ra_mode, add_mode in (
('dhcpv6-stateful', 'dhcpv6-stateful'),
('dhcpv6-stateful', None),
):
kwargs = {'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': add_mode}
kwargs = dict((k, v) for k, v in six.iteritems(kwargs) if v)
subnet, port = self._create_subnet_router(kwargs)
port_ip = next(iter(port['fixed_ips']), None)['ip_address']
self._clean_network()
self.assertEqual(port_ip, subnet['gateway_ip'],
("Port IP %s is not as first IP from "
"subnets allocation pool: %s") % (
port_ip, subnet['gateway_ip']))
def tearDown(self):
self._clean_network()
super(NetworksTestDHCPv6, self).tearDown()
|
nuagenetworks/tempest
|
tempest/api/network/test_dhcp_ipv6.py
|
Python
|
apache-2.0
| 18,551
|
"""
WSGI config for server_proj project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import site
site.addsitedir('/path/to/spacescout_builds/server_proj/lib/python2.6/site-packages')
site.addsitedir('/path/to/spacescout_builds/server_proj')
#os.environ.setdefault("DJANGO_SETTINGS_MODULE", "server_proj.settings")
os.environ["DJANGO_SETTINGS_MODULE"] = "server_proj.settings"
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
uw-it-aca/scout-vagrant
|
provisioning/templates/sample.wsgi.py
|
Python
|
apache-2.0
| 1,364
|
#!/usr/bin/env python
#
# Copyright 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testlib
import logging
import splunklib.client as client
class Tests(testlib.SDKTestCase):
def setUp(self):
self.service = client.connect(**self.opts.kwargs)
self.storage_passwords = self.service.storage_passwords
def tearDown(self):
# Delete all passwords created by SDK tests
for sp in self.storage_passwords:
if "delete-me" in sp.username or "delete-me" in sp.realm:
sp.delete()
def test_create(self):
start_count = len(self.storage_passwords)
realm = testlib.tmpname()
username = testlib.tmpname()
p = self.storage_passwords.create("changeme", username, realm)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, realm)
self.assertEqual(p.username, username)
self.assertEqual(p.clear_password, "changeme")
self.assertEqual(p.name, realm + ":" + username + ":")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_create_with_backslashes(self):
start_count = len(self.storage_passwords)
realm = "\\" + testlib.tmpname()
username = "\\" + testlib.tmpname()
# Prepends one escaped slash
p = self.storage_passwords.create("changeme", username, realm)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, realm)
# Prepends one escaped slash
self.assertEqual(p.username, username)
self.assertEqual(p.clear_password, "changeme")
# Checks for 2 escaped slashes (Splunk encodes the single slash)
self.assertEqual(p.name, "\\" + realm + ":\\" + username + ":")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_create_with_slashes(self):
start_count = len(self.storage_passwords)
realm = "/" + testlib.tmpname()
username = "/" + testlib.tmpname()
# Prepends one escaped slash
p = self.storage_passwords.create("changeme", username, realm)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, realm)
# Prepends one escaped slash
self.assertEqual(p.username, username)
self.assertEqual(p.clear_password, "changeme")
# Checks for 2 escaped slashes (Splunk encodes the single slash)
self.assertEqual(p.name, realm + ":" + username + ":")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_create_norealm(self):
start_count = len(self.storage_passwords)
username = testlib.tmpname()
p = self.storage_passwords.create("changeme", username)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, None)
self.assertEqual(p.username, username)
self.assertEqual(p.clear_password, "changeme")
self.assertEqual(p.name, ":" + username + ":")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_create_with_colons(self):
start_count = len(self.storage_passwords)
username = testlib.tmpname()
realm = testlib.tmpname()
p = self.storage_passwords.create("changeme", username + ":end",
":start" + realm)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, ":start" + realm)
self.assertEqual(p.username, username + ":end")
self.assertEqual(p.clear_password, "changeme")
self.assertEqual(p.name,
"\\:start" + realm + ":" + username + "\\:end:")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
prefix = testlib.tmpname()
realm = prefix + ":r:e:a:l:m:"
user = ":u:s:e:r:"
p = self.storage_passwords.create("changeme", user, realm)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, realm)
self.assertEqual(p.username, user)
self.assertEqual(p.clear_password, "changeme")
self.assertEqual(p.name,
prefix + "\\:r\\:e\\:a\\:l\\:m\\::\\:u\\:s\\:e\\:r\\::")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_create_crazy(self):
start_count = len(self.storage_passwords)
username = testlib.tmpname()
realm = testlib.tmpname()
p = self.storage_passwords.create("changeme",
username + ":end!@#$%^&*()_+{}:|<>?",
":start::!@#$%^&*()_+{}:|<>?" + realm)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, ":start::!@#$%^&*()_+{}:|<>?" + realm)
self.assertEqual(p.username, username + ":end!@#$%^&*()_+{}:|<>?")
self.assertEqual(p.clear_password, "changeme")
self.assertEqual(p.name,
"\\:start\\:\\:!@#$%^&*()_+{}\\:|<>?" + realm + ":" + username + "\\:end!@#$%^&*()_+{}\\:|<>?:")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_read(self):
start_count = len(self.storage_passwords)
username = testlib.tmpname()
p = self.storage_passwords.create("changeme", username)
self.assertEqual(start_count + 1, len(self.storage_passwords))
for sp in self.storage_passwords:
self.assertTrue(p.name in self.storage_passwords)
# Name works with or without a trailing colon
self.assertTrue((":" + username + ":") in self.storage_passwords)
self.assertTrue((":" + username) in self.storage_passwords)
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_update(self):
start_count = len(self.storage_passwords)
realm = testlib.tmpname()
username = testlib.tmpname()
p = self.storage_passwords.create("changeme", username, realm)
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, realm)
self.assertEqual(p.username, username)
self.assertEqual(p.clear_password, "changeme")
self.assertEqual(p.name, realm + ":" + username + ":")
p.update(password="Splunkeroo!")
self.assertEqual(p.clear_password, "changeme")
p.refresh()
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, realm)
self.assertEqual(p.username, username)
self.assertEqual(p.clear_password, "Splunkeroo!")
self.assertEqual(p.name, realm + ":" + username + ":")
p.delete()
self.assertEqual(start_count, len(self.storage_passwords))
def test_delete(self):
start_count = len(self.storage_passwords)
username = testlib.tmpname()
p = self.storage_passwords.create("changeme", username, "myrealm")
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.assertEqual(p.realm, "myrealm")
self.assertEqual(p.username, username)
self.assertEqual(p.clear_password, "changeme")
self.assertEqual(p.name, "myrealm:" + username + ":")
self.storage_passwords.delete(username, "myrealm")
self.assertEqual(start_count, len(self.storage_passwords))
self.storage_passwords.create("changeme", username, "myrealm")
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.storage_passwords.delete("myrealm:" + username + ":")
self.assertEqual(start_count, len(self.storage_passwords))
# Test named parameters
self.storage_passwords.create(password="changeme", username=username,
realm="myrealm")
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.storage_passwords.delete(username, "myrealm")
self.assertEqual(start_count, len(self.storage_passwords))
self.storage_passwords.create(password="changeme", username=username + "/foo",
realm="/myrealm")
self.assertEqual(start_count + 1, len(self.storage_passwords))
self.storage_passwords.delete(username + "/foo", "/myrealm")
self.assertEqual(start_count, len(self.storage_passwords))
if __name__ == "__main__":
try:
import unittest2 as unittest
except ImportError:
import unittest
unittest.main()
|
sullivanmatt/splunk-sdk-python
|
tests/test_storage_passwords.py
|
Python
|
apache-2.0
| 9,282
|
# Copyright 2016 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
import mock
from openhtf import plugs
from openhtf.core import base_plugs
from openhtf.core import monitors
from six.moves import queue
class EmptyPlug(base_plugs.BasePlug):
pass
class TestMonitors(unittest.TestCase):
def setUp(self):
super(TestMonitors, self).setUp()
self.test_state = mock.MagicMock(execution_uid='01234567890')
def provide_plugs(plug_map):
return {name: cls() for name, cls in plug_map}
self.test_state.plug_manager.provide_plugs = provide_plugs
def test_basics(self):
# Use a queue to ensure that we got at least 1 complete response. An Event
# would cause a race condition, so we'd need 2 Events, so a Queue is easier.
q = queue.Queue()
def monitor_func(test):
del test # Unused.
q.put(1)
return 1
@monitors.monitors('meas', monitor_func, poll_interval_ms=100)
def phase(test):
del test # Unused.
while q.qsize() < 2:
time.sleep(0.1)
phase(self.test_state)
name, first_meas, _ = self.test_state.mock_calls[0]
# For some reason, self.test_state.test_api differs between what monitors.py
# gets and what the monitor-phase/monitored-phase get in 1/100 runs. As a
# result, we have to use test_state.mock_calls directly and just assert the
# name is correct.
assert name == 'test_api.measurements.meas.__setitem__'
# Measurement time is at the end of the monitor func, which can take
# upwards of 100 milliseconds depending on how busy the infrastructure is,
# so we only check that it's less than a second.
self.assertLessEqual(
first_meas[0], 100, msg='At time 0, there should be a call made.')
self.assertEqual(
1, first_meas[1], msg="And it should be the monitor func's return val")
def testPlugs(self):
q = queue.Queue()
@plugs.plug(empty=EmptyPlug)
def monitor(test, empty):
del test # Unused.
del empty # Unused.
q.put(2)
return 2
@monitors.monitors('meas', monitor, poll_interval_ms=100)
def phase(test):
del test # Unused.
while q.qsize() < 2:
time.sleep(0.1)
phase(self.test_state)
name, first_meas, _ = self.test_state.mock_calls[0]
assert name == 'test_api.measurements.meas.__setitem__'
# Measurement time is at the end of the monitor func, which can take
# upwards of 100 milliseconds depending on how busy the infrastructure is,
# so we only check that it's less than a second.
self.assertLessEqual(
first_meas[0], 100, msg='At time 0, there should be a call made.')
self.assertEqual(
2, first_meas[1], msg="And it should be the monitor func's return val")
|
google/openhtf
|
test/core/monitors_test.py
|
Python
|
apache-2.0
| 3,303
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# module builder script
#
import os, sys, shutil, tempfile, subprocess, platform
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
support_dir = os.path.join(template_dir, 'support')
sdk_dir = os.path.dirname(template_dir)
android_support_dir = os.path.join(sdk_dir, 'android')
sys.path.extend([sdk_dir, support_dir, android_support_dir])
from androidsdk import AndroidSDK
from manifest import Manifest
import traceback, uuid, time, thread, string, markdown
from os.path import join, splitext, split, exists
def run_pipe(args, cwd=None):
return subprocess.Popen(args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, cwd=cwd)
def print_emulator_line(line):
if line:
s = line.strip()
if s!='':
if s.startswith("["):
print s
else:
print "[DEBUG] %s" % s
sys.stdout.flush()
def run_python(args, cwd=None):
args.insert(0, sys.executable)
return run(args, cwd=cwd)
def run(args, cwd=None):
proc = run_pipe(args, cwd)
rc = None
while True:
print_emulator_line(proc.stdout.readline())
rc = proc.poll()
if rc!=None: break
return rc
def run_ant(project_dir):
build_xml = os.path.join(project_dir, 'build.xml')
ant = 'ant'
if 'ANT_HOME' in os.environ:
ant = os.path.join(os.environ['ANT_HOME'], 'bin', 'ant')
if platform.system() == 'Windows':
ant += '.bat'
ant_args = [ant, '-f', build_xml]
if platform.system() == 'Windows':
ant_args = ['cmd.exe', '/C'] + ant_args
else:
# wrap with /bin/sh in Unix, in some cases the script itself isn't executable
ant_args = ['/bin/sh'] + ant_args
run(ant_args, cwd=project_dir)
ignoreFiles = ['.gitignore', '.cvsignore', '.DS_Store'];
ignoreDirs = ['.git','.svn','_svn','CVS'];
android_sdk = None
def copy_resources(source, target):
if not os.path.exists(os.path.expanduser(target)):
os.makedirs(os.path.expanduser(target))
for root, dirs, files in os.walk(source):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles:
continue
from_ = os.path.join(root, file)
to_ = os.path.expanduser(from_.replace(source, target, 1))
to_directory = os.path.expanduser(split(to_)[0])
if not exists(to_directory):
os.makedirs(to_directory)
shutil.copyfile(from_, to_)
def is_ios(platform):
return platform == 'iphone' or platform == 'ipad' or platform == 'ios'
def is_android(platform):
return platform == 'android'
def stage(platform, project_dir, manifest, callback):
dont_delete = True
dir = tempfile.mkdtemp('ti','m')
print '[DEBUG] Staging module project at %s' % dir
try:
name = manifest.name
moduleid = manifest.moduleid
version = manifest.version
script = os.path.join(template_dir,'..','project.py')
# create a temporary proj
create_project_args = [script, name, moduleid, dir, platform]
if is_android(platform):
create_project_args.append(android_sdk.get_android_sdk())
run_python(create_project_args)
gen_project_dir = os.path.join(dir, name)
gen_resources_dir = os.path.join(gen_project_dir, 'Resources')
# copy in our example source
copy_resources(os.path.join(project_dir,'example'), gen_resources_dir)
# patch in our tiapp.xml
tiapp = os.path.join(gen_project_dir, 'tiapp.xml')
xml = open(tiapp).read()
tiappf = open(tiapp,'w')
xml = xml.replace('<guid/>','<guid></guid>')
xml = xml.replace('</guid>','</guid>\n<modules>\n<module version="%s">%s</module>\n</modules>\n' % (version,moduleid))
# generate a guid since this is currently done by developer
guid = str(uuid.uuid4())
xml = xml.replace('<guid></guid>','<guid>%s</guid>' % guid)
tiappf.write(xml)
tiappf.close()
module_dir = os.path.join(gen_project_dir,'modules',platform)
if not os.path.exists(module_dir):
os.makedirs(module_dir)
module_zip_name = '%s-%s-%s.zip' % (moduleid.lower(), platform, version)
module_zip = os.path.join(project_dir, 'dist', module_zip_name)
if is_ios(platform):
module_zip = os.path.join(project_dir, module_zip_name)
script = os.path.join(project_dir,'build.py')
run_python([script])
elif is_android(platform):
run_ant(project_dir)
shutil.copy(module_zip, gen_project_dir)
callback(gen_project_dir)
except:
dont_delete = True
traceback.print_exc(file=sys.stderr)
sys.exit(1)
finally:
if not dont_delete: shutil.rmtree(dir)
def docgen(module_dir, dest_dir):
if not os.path.exists(dest_dir):
print "Creating dir: %s" % dest_dir
os.makedirs(dest_dir)
doc_dir = os.path.join(module_dir, 'documentation')
if not os.path.exists(doc_dir):
print "Couldn't find documentation file at: %s" % doc_dir
return
for file in os.listdir(doc_dir):
if file in ignoreFiles or os.path.isdir(os.path.join(doc_dir, file)):
continue
md = open(os.path.join(doc_dir, file), 'r').read()
html = markdown.markdown(md)
filename = string.replace(file, '.md', '.html')
filepath = os.path.join(dest_dir, filename)
print 'Generating %s' % filepath
open(filepath, 'w+').write(html)
# a simplified .properties file parser
def read_properties(file):
properties = {}
for line in file.read().splitlines():
line = line.strip()
if len(line) > 0 and line[0] == '#': continue
if len(line) == 0 or '=' not in line: continue
key, value = line.split('=', 1)
properties[key.strip()] = value.strip().replace('\\\\', '\\')
return properties
def main(args):
global android_sdk
# command platform project_dir
command = args[1]
platform = args[2]
project_dir = os.path.expanduser(args[3])
manifest = Manifest(os.path.join(project_dir, 'manifest'))
error = False
if is_android(platform):
build_properties = read_properties(open(os.path.join(project_dir, 'build.properties')))
android_sdk_path = os.path.dirname(os.path.dirname(build_properties['android.platform']))
android_sdk = AndroidSDK(android_sdk_path)
if command == 'run':
def run_callback(gen_project_dir):
script = os.path.abspath(os.path.join(template_dir,'..',platform,'builder.py'))
script_args = [script, 'run', gen_project_dir]
if is_android(platform):
script_args.append(android_sdk.get_android_sdk())
rc = run_python(script_args)
# run the project
if rc==1:
if is_ios(platform):
error = os.path.join(gen_project_dir,'build','iphone','build','build.log')
print "[ERROR] Build Failed. See: %s" % os.path.abspath(error)
else:
print "[ERROR] Build Failed."
stage(platform, project_dir, manifest, run_callback)
elif command == 'run-emulator':
if is_android(platform):
def run_emulator_callback(gen_project_dir):
script = os.path.abspath(os.path.join(template_dir, '..', platform, 'builder.py'))
run_python([script, 'run-emulator', gen_project_dir, android_sdk.get_android_sdk()])
stage(platform, project_dir, manifest, run_emulator_callback)
elif command == 'docgen':
if is_android(platform):
dest_dir = args[4]
docgen(project_dir, dest_dir)
if error:
sys.exit(1)
else:
sys.exit(0)
if __name__ == "__main__":
main(sys.argv)
|
arnaudsj/titanium_mobile
|
support/module/builder.py
|
Python
|
apache-2.0
| 7,110
|
"""
Asciimatics is a package to help people create full-screen text UIs (from interactive forms to
ASCII animations) on any platform. It is licensed under the Apache Software Foundation License 2.0.
"""
__author__ = 'Peter Brittain'
try:
from .version import version
except ImportError:
# Someone is running straight from the GIT repo - dummy out the version
version = "0.0.0"
__version__ = version
|
peterbrittain/asciimatics
|
asciimatics/__init__.py
|
Python
|
apache-2.0
| 414
|
#
# Copyright 2014 Infoxchange Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helpers related to maintaining a registry of classes
"""
class Registry(dict):
"""
A registry class, used for registering services, drivers, etc.
This is not the registry itself. The registry itself is in
forklift.services, forklift.drivers, etc.
"""
def __call__(self, name):
"""
Use registry as a decorator to register Forklift services
"""
def inner(cls):
"""
Decorator
"""
self[name] = cls
return cls
return inner
|
infoxchange/docker-forklift
|
forklift/registry.py
|
Python
|
apache-2.0
| 1,142
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
# just running it on code in our repositories, not on externally acquired data.
from xml.dom.minidom import parse
from pants.backend.codegen.targets.jaxb_library import JaxbLibrary
from pants.backend.codegen.tasks.code_gen import CodeGen
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.address import SyntheticAddress
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.java.distribution.distribution import Distribution
from pants.util.dirutil import safe_mkdir
# python documentation says xml parsing is insecure, but this should be safe usage because we're
class JaxbGen(CodeGen, NailgunTask):
"""Generates java source files from jaxb schema (.xsd)."""
_CONFIG_SECTION = 'jaxb-gen'
def __init__(self, *args, **kwargs):
"""
:param context: inherited parameter from Task
:param workdir: inherited parameter from Task
"""
super(JaxbGen, self).__init__(*args, **kwargs)
self.gen_langs = set()
lang = 'java'
if self.context.products.isrequired(lang):
self.gen_langs.add(lang)
self.jar_location = os.path.join(Distribution.cached().home, '..', 'lib', 'tools.jar')
@property
def config_section(self):
return self._CONFIG_SECTION
def _compile_schema(self, args):
classpath = [self.jar_location]
java_main = 'com.sun.tools.internal.xjc.Driver'
return self.runjava(classpath=classpath, main=java_main, args=args, workunit_name='xjc')
def is_forced(self, lang):
return lang in self.gen_langs
def is_gentarget(self, target):
return isinstance(target, JaxbLibrary)
def prepare_gen(self, target):
pass
def genlang(self, lang, targets):
if lang != 'java':
raise TaskError('Unrecognized jaxb language: %s' % lang)
output_dir = os.path.join(self.workdir, 'gen-java')
safe_mkdir(output_dir)
cache = []
for target in targets:
if not isinstance(target, JaxbLibrary):
raise TaskError('Invalid target type "{class_type}" (expected JaxbLibrary)'
.format(class_type=type(target).__name__))
target_files = []
for source in target.sources_relative_to_buildroot():
path_to_xsd = source
output_package = target.package
if output_package is None:
output_package = self._guess_package(source)
output_package = self._correct_package(output_package)
output_directory = output_dir
safe_mkdir(output_directory)
args = ['-p', output_package, '-d', output_directory, path_to_xsd]
result = self._compile_schema(args)
if result != 0:
raise TaskError('xjc ... exited non-zero ({code})'.format(code=result))
target_files.append(self._sources_to_be_generated(target.package, path_to_xsd))
cache.append((target, target_files))
return cache
def genlangs(self):
return {'java': lambda t: t.is_jvm}
def createtarget(self, lang, gentarget, dependees):
predicates = self.genlangs()
languages = predicates.keys()
if not (lang in languages) or not (predicates[lang](gentarget)):
raise TaskError('Invalid language "{lang}" for task {task}'
.format(lang=lang, task=type(self).__name__))
to_generate = []
for source in gentarget.sources_relative_to_buildroot():
to_generate.extend(self._sources_to_be_generated(gentarget.package, source))
spec_path = os.path.join(os.path.relpath(self.workdir, get_buildroot()), 'gen-java')
address = SyntheticAddress(spec_path=spec_path, target_name=gentarget.id)
target = self.context.add_new_target(
address,
JavaLibrary,
derived_from=gentarget,
sources=to_generate,
provides=gentarget.provides,
dependencies=[],
excludes=gentarget.payload.excludes
)
for dependee in dependees:
dependee.inject_dependency(target.address)
return target
@classmethod
def _guess_package(self, path):
"""Used in genlang to actually invoke the compiler with the proper arguments, and in
createtarget (via _sources_to_be_generated) to declare what the generated files will be.
"""
package = ''
slash = path.rfind(os.path.sep)
com = path.rfind(os.path.join('', 'com', ''))
if com < 0 and path.find(os.path.join('com', '')) == 0:
package = path[:slash]
elif com >= 0:
package = path[com:slash]
package = package.replace(os.path.sep, ' ')
package = package.strip().replace(' ', '.')
return package
@classmethod
def _correct_package(self, package):
package = package.replace('/', '.')
package = re.sub(r'^\.+', '', package)
package = re.sub(r'\.+$', '', package)
if re.search(r'\.{2,}', package) is not None:
raise ValueError('Package name cannot have consecutive periods! (%s)' % package)
return package
@classmethod
def _sources_to_be_generated(self, package, path):
"""This method (or some variation of it) seems to be common amongst all implementations of
code-generating tasks.
As far as I can tell, its purpose is to peek into the relevant schema files and figure out what
the final output files will be. This is typically implemented with a variety of hacks,
accompanied by TODO's saying to do it properly in the future (see apache_thrift_gen.py and
protobuf_gen.py). The implementation in this file does it 'properly' using python's xml parser,
though I am making some assumptions about how .xsd's are supposed to be formatted, as that is
not a subject I am particularly informed about.
"""
doc = parse(path)
if package is None:
package = self._guess_package(path)
package = self._correct_package(package)
names = []
for root in doc.childNodes:
if re.match('.*?:schema$', root.nodeName, re.I) is not None:
for element in root.childNodes:
if element.nodeName != '#text' and element.attributes.has_key('name'):
name = element.attributes['name'].nodeValue
if len(name) == 0: continue
# enforce pascal-case class names
name = name[0:1].upper() + name[1:]
names.append(name)
names.append('ObjectFactory')
outdir = package.replace('.', '/')
return [os.path.join(outdir, '%s.java' % name) for name in names]
|
tejal29/pants
|
src/python/pants/backend/codegen/tasks/jaxb_gen.py
|
Python
|
apache-2.0
| 6,748
|
def test_delite_group(app):
app.session.login( username="admin", password="secret")
app.group.delete_first_group()
app.session.logout()
if __name__ == '__main__':
pytest.main('test_del_group.py')
|
Alex-Chizhov/python_training
|
home_work_6/test/test_del_group.py
|
Python
|
apache-2.0
| 201
|
import getpass
import json
import getopt
from genericpath import isfile
from os.path import sep
from pingdumb.main_module import url_type
def read_config():
f_path = "." + sep + "pingdumb.json"
if not isfile(f_path):
f = open(f_path, 'w')
conf = {
"url": "jellyms.kr",
"smtpServer": "smtp.gmail.com:587",
"smtpUser": "",
"toEmail": "",
"interval": 300,
}
f.write(json.dumps(conf))
f.close()
return conf
else:
f = open(f_path, 'r+b')
conf = json.loads(f.read().decode('utf-8'))
f.close()
return conf
def write_config(conf):
if 'smtpPw' in conf:
del conf['smtpPw']
f_path = "." + sep + "pingdumb.json"
f = open(f_path, 'w')
f.truncate()
f.write(json.dumps(conf))
f.close()
def input_conf(message, default):
value = input(message)
if not value:
return default
return value
def set_config():
configure = read_config()
url_for_test = input_conf(
"URL to test? (" + configure["url"] + ")", configure["url"]
)
url_for_test = url_type(url_for_test)
recv_mail = input_conf(
"Receive mail? (" + configure["toEmail"] + ")",
configure["toEmail"]
)
s_server = input_conf(
"SMTP server? (" + configure["smtpServer"] + ")",
configure["smtpServer"]
)
s_user = input_conf(
"SMTP Server username? (" + configure["smtpUser"] + ")",
configure["smtpUser"]
)
s_pw = getpass.getpass("SMTP Server password?", "")
interval = input_conf(
"interval of seconds? (" + str(configure["interval"]) + ")",
configure["interval"]
)
interval = int(interval)
configure["url"] = url_for_test
configure["toEmail"] = recv_mail
configure["smtpServer"] = s_server
configure["smtpUser"] = s_user
configure["smtpPw"] = s_pw
configure["interval"] = interval
return configure
def configure_to_tuple():
configure = read_config()
return configure["url"], configure["smtpServer"], \
configure["smtpUser"], configure["toEmail"], configure["interval"]
def extract_password_with_argv(argv):
opts, args = getopt.getopt(argv, 'p')
for o, a in opts:
if o == "-p":
return getpass.getpass("SMTP Server password", "")
|
kyunooh/pingdumb
|
pingdumb/conf.py
|
Python
|
apache-2.0
| 2,422
|
"""Cache util functions for ReSDKTables."""
import os
import pickle
import sys
from shutil import rmtree
from typing import Any
from resdk.__about__ import __version__
def _default_cache_dir() -> str:
"""Return default cache directory specific for the current OS.
Code originally from Orange3.misc.environ.
"""
if sys.platform == "darwin":
base = os.path.expanduser("~/Library/Caches")
elif sys.platform == "win32":
base = os.getenv("APPDATA", os.path.expanduser("~/AppData/Local"))
elif os.name == "posix":
base = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
else:
base = os.path.expanduser("~/.cache")
return base
def cache_dir_resdk_base() -> str:
"""Return base ReSDK cache directory."""
return os.path.join(_default_cache_dir(), "ReSDK")
def cache_dir_resdk() -> str:
"""Return ReSDK cache directory."""
v = __version__
if "dev" in v:
# remove git commit hash
v = v[: v.find("dev") + 3]
base = os.path.join(cache_dir_resdk_base(), v)
if sys.platform == "win32":
# On Windows cache and data dir are the same.
# Microsoft suggest using a Cache subdirectory
return os.path.join(base, "Cache")
else:
return base
def clear_cache_dir_resdk() -> None:
"""Delete all cache files from the default cache directory."""
cache_dir = cache_dir_resdk_base()
if os.path.exists(cache_dir):
rmtree(cache_dir)
def load_pickle(pickle_file: str) -> Any:
"""Load object from the pickle file.
:param pickle_file: file path
:return: un-pickled object
"""
if os.path.exists(pickle_file):
with open(pickle_file, "rb") as handle:
return pickle.load(handle)
def save_pickle(obj: Any, pickle_file: str, override=False) -> None:
"""Save given object into a pickle file.
:param obj: object to bi pickled
:param pickle_file: file path
:param override: if True than override existing file
:return:
"""
if not os.path.exists(pickle_file) or override:
with open(pickle_file, "wb") as handle:
pickle.dump(obj, handle, protocol=pickle.HIGHEST_PROTOCOL)
|
genialis/resolwe-bio-py
|
src/resdk/utils/table_cache.py
|
Python
|
apache-2.0
| 2,205
|
import sys
import threading
import time
io_lock = threading.Lock()
blocker = threading.Lock()
def block(i):
t = threading.current_thread()
with io_lock:
print('{} with ident {} going to sleep'.format(
t.name, t.ident))
if i:
blocker.acquire() # acquired but never released
time.sleep(0.2)
with io_lock:
print(t.name, 'finishing')
return
# Create and start several threads that "block"
threads = [
threading.Thread(target=block, args=(i,))
for i in range(3)
]
for t in threads:
t.setDaemon(True)
t.start()
# Map the threads from their identifier to the thread object
threads_by_ident = dict((t.ident, t) for t in threads)
# Show where each thread is "blocked"
time.sleep(0.01)
with io_lock:
for ident, frame in sys._current_frames().items():
t = threads_by_ident.get(ident)
if not t:
# Main thread
continue
print('{} stopped in {} at line {} of {}'.format(
t.name, frame.f_code.co_name,
frame.f_lineno, frame.f_code.co_filename))
|
jasonwee/asus-rt-n14uhp-mrtg
|
src/lesson_runtime_features/sys_current_frames.py
|
Python
|
apache-2.0
| 1,093
|
# -*- coding: utf-8 -*-
import nose.tools as ns
import os
from os.path import join
from tempfile import gettempdir
from relshell.record import Record
from relshell.recorddef import RecordDef
from relshell.batch import Batch
from shellstreaming.core.batch_queue import BatchQueue
from shellstreaming.ostream.localfile import LocalFile
TEST_FILE = join(gettempdir(), 'shellstreaming_test_localfile.txt')
def teardown():
os.remove(TEST_FILE)
def test_localfile_usage():
# prepare input queue
q = BatchQueue()
for batch in _create_batches():
q.push(batch) # [fix] - Batch's output format has to be customized by user
q.push(None)
# run ostream
ostream = LocalFile(TEST_FILE, output_format='csv', input_queue=q)
ostream.join()
# check contents
with open(TEST_FILE) as f:
ns.eq_(f.read(),
'''"111"
"222"
"333"
'''
)
def _create_batches():
rdef = RecordDef([{'name': 'col0', 'type': 'INT'}])
return (
Batch(rdef, (Record(111), Record(222), )),
Batch(rdef, (Record(333), )),
)
|
laysakura/shellstreaming
|
test/ostream/test_localfile.py
|
Python
|
apache-2.0
| 1,074
|
#!/usr/bin/env python
import imp
import os
import sys
PYCART_DIR = ''.join(['python-', '.'.join(map(str, sys.version_info[:2]))])
try:
zvirtenv = os.path.join(os.environ['OPENSHIFT_HOMEDIR'], PYCART_DIR,
'virtenv', 'bin', 'activate_this.py')
execfile(zvirtenv, dict(__file__ = zvirtenv) )
except IOError:
pass
def run_gevent_server(app, ip, port=8181):
from gevent.pywsgi import WSGIServer
WSGIServer((ip, port), app).serve_forever()
def run_simple_httpd_server(app, ip, port=8181):
from wsgiref.simple_server import make_server
make_server(ip, port, app).serve_forever()
#
# IMPORTANT: Put any additional includes below this line. If placed above this
# line, it's possible required libraries won't be in your searchable path
#
#
# main():
#
if __name__ == '__main__':
ip = os.environ['OPENSHIFT_PYTHON_IP']
port = 8181
zapp = imp.load_source('application', 'wsgi/application')
# Use gevent if we have it, otherwise run a simple httpd server.
print 'Starting WSGIServer on %s:%d ... ' % (ip, port)
try:
run_gevent_server(zapp.application, ip, port)
except:
print 'gevent probably not installed - using default simple server ...'
run_simple_httpd_server(zapp.application, ip, port)
|
getupcloud/openshift-nginx-python-2.7
|
app.py
|
Python
|
apache-2.0
| 1,287
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
from os.path import isfile
from os.path import join
import re
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
import sqlalchemy
from sqlalchemy import and_
from sqlalchemy.schema import MetaData
from sqlalchemy.sql import select
from glance.common import timeutils
from glance.i18n import _, _LE, _LI, _LW
LOG = logging.getLogger(__name__)
metadata_opts = [
cfg.StrOpt('metadata_source_path',
default='/etc/glance/metadefs/',
help=_("""
Absolute path to the directory where JSON metadefs files are stored.
Glance Metadata Definitions ("metadefs") are served from the database,
but are stored in files in the JSON format. The files in this
directory are used to initialize the metadefs in the database.
Additionally, when metadefs are exported from the database, the files
are written to this directory.
NOTE: If you plan to export metadefs, make sure that this directory
has write permissions set for the user being used to run the
glance-api service.
Possible values:
* String value representing a valid absolute pathname
Related options:
* None
""")),
]
CONF = cfg.CONF
CONF.register_opts(metadata_opts)
def get_metadef_namespaces_table(meta):
return sqlalchemy.Table('metadef_namespaces', meta, autoload=True)
def get_metadef_resource_types_table(meta):
return sqlalchemy.Table('metadef_resource_types', meta, autoload=True)
def get_metadef_namespace_resource_types_table(meta):
return sqlalchemy.Table('metadef_namespace_resource_types', meta,
autoload=True)
def get_metadef_properties_table(meta):
return sqlalchemy.Table('metadef_properties', meta, autoload=True)
def get_metadef_objects_table(meta):
return sqlalchemy.Table('metadef_objects', meta, autoload=True)
def get_metadef_tags_table(meta):
return sqlalchemy.Table('metadef_tags', meta, autoload=True)
def _get_resource_type_id(meta, name):
rt_table = get_metadef_resource_types_table(meta)
resource_type = (
select([rt_table.c.id]).
where(rt_table.c.name == name).
select_from(rt_table).
execute().fetchone())
if resource_type:
return resource_type[0]
return None
def _get_resource_type(meta, resource_type_id):
rt_table = get_metadef_resource_types_table(meta)
return (
rt_table.select().
where(rt_table.c.id == resource_type_id).
execute().fetchone())
def _get_namespace_resource_types(meta, namespace_id):
namespace_resource_types_table = (
get_metadef_namespace_resource_types_table(meta))
return (
namespace_resource_types_table.select().
where(namespace_resource_types_table.c.namespace_id == namespace_id).
execute().fetchall())
def _get_namespace_resource_type_by_ids(meta, namespace_id, rt_id):
namespace_resource_types_table = (
get_metadef_namespace_resource_types_table(meta))
return (
namespace_resource_types_table.select().
where(and_(
namespace_resource_types_table.c.namespace_id == namespace_id,
namespace_resource_types_table.c.resource_type_id == rt_id)).
execute().fetchone())
def _get_properties(meta, namespace_id):
properties_table = get_metadef_properties_table(meta)
return (
properties_table.select().
where(properties_table.c.namespace_id == namespace_id).
execute().fetchall())
def _get_objects(meta, namespace_id):
objects_table = get_metadef_objects_table(meta)
return (
objects_table.select().
where(objects_table.c.namespace_id == namespace_id).
execute().fetchall())
def _get_tags(meta, namespace_id):
tags_table = get_metadef_tags_table(meta)
return (
tags_table.select().
where(tags_table.c.namespace_id == namespace_id).
execute().fetchall())
def _get_resource_id(table, namespace_id, resource_name):
resource = (
select([table.c.id]).
where(and_(table.c.namespace_id == namespace_id,
table.c.name == resource_name)).
select_from(table).
execute().fetchone())
if resource:
return resource[0]
return None
def _clear_metadata(meta):
metadef_tables = [get_metadef_properties_table(meta),
get_metadef_objects_table(meta),
get_metadef_tags_table(meta),
get_metadef_namespace_resource_types_table(meta),
get_metadef_namespaces_table(meta),
get_metadef_resource_types_table(meta)]
for table in metadef_tables:
table.delete().execute()
LOG.info(_LI("Table %s has been cleared"), table)
def _clear_namespace_metadata(meta, namespace_id):
metadef_tables = [get_metadef_properties_table(meta),
get_metadef_objects_table(meta),
get_metadef_tags_table(meta),
get_metadef_namespace_resource_types_table(meta)]
namespaces_table = get_metadef_namespaces_table(meta)
for table in metadef_tables:
table.delete().where(table.c.namespace_id == namespace_id).execute()
namespaces_table.delete().where(
namespaces_table.c.id == namespace_id).execute()
def _populate_metadata(meta, metadata_path=None, merge=False,
prefer_new=False, overwrite=False):
if not metadata_path:
metadata_path = CONF.metadata_source_path
try:
if isfile(metadata_path):
json_schema_files = [metadata_path]
else:
json_schema_files = [f for f in os.listdir(metadata_path)
if isfile(join(metadata_path, f))
and f.endswith('.json')]
except OSError as e:
LOG.error(encodeutils.exception_to_unicode(e))
return
if not json_schema_files:
LOG.error(_LE("Json schema files not found in %s. Aborting."),
metadata_path)
return
namespaces_table = get_metadef_namespaces_table(meta)
namespace_rt_table = get_metadef_namespace_resource_types_table(meta)
objects_table = get_metadef_objects_table(meta)
tags_table = get_metadef_tags_table(meta)
properties_table = get_metadef_properties_table(meta)
resource_types_table = get_metadef_resource_types_table(meta)
for json_schema_file in json_schema_files:
try:
file = join(metadata_path, json_schema_file)
with open(file) as json_file:
metadata = json.load(json_file)
except Exception as e:
LOG.error(_LE("Failed to parse json file %(file_path)s while "
"populating metadata due to: %(error_msg)s"),
{"file_path": file,
"error_msg": encodeutils.exception_to_unicode(e)})
continue
values = {
'namespace': metadata.get('namespace'),
'display_name': metadata.get('display_name'),
'description': metadata.get('description'),
'visibility': metadata.get('visibility'),
'protected': metadata.get('protected'),
'owner': metadata.get('owner', 'admin')
}
db_namespace = select(
[namespaces_table.c.id]
).where(
namespaces_table.c.namespace == values['namespace']
).select_from(
namespaces_table
).execute().fetchone()
if db_namespace and overwrite:
LOG.info(_LI("Overwriting namespace %s"), values['namespace'])
_clear_namespace_metadata(meta, db_namespace[0])
db_namespace = None
if not db_namespace:
values.update({'created_at': timeutils.utcnow()})
_insert_data_to_db(namespaces_table, values)
db_namespace = select(
[namespaces_table.c.id]
).where(
namespaces_table.c.namespace == values['namespace']
).select_from(
namespaces_table
).execute().fetchone()
elif not merge:
LOG.info(_LI("Skipping namespace %s. It already exists in the "
"database."), values['namespace'])
continue
elif prefer_new:
values.update({'updated_at': timeutils.utcnow()})
_update_data_in_db(namespaces_table, values,
namespaces_table.c.id, db_namespace[0])
namespace_id = db_namespace[0]
for resource_type in metadata.get('resource_type_associations', []):
rt_id = _get_resource_type_id(meta, resource_type['name'])
if not rt_id:
val = {
'name': resource_type['name'],
'created_at': timeutils.utcnow(),
'protected': True
}
_insert_data_to_db(resource_types_table, val)
rt_id = _get_resource_type_id(meta, resource_type['name'])
elif prefer_new:
val = {'updated_at': timeutils.utcnow()}
_update_data_in_db(resource_types_table, val,
resource_types_table.c.id, rt_id)
values = {
'namespace_id': namespace_id,
'resource_type_id': rt_id,
'properties_target': resource_type.get(
'properties_target'),
'prefix': resource_type.get('prefix')
}
namespace_resource_type = _get_namespace_resource_type_by_ids(
meta, namespace_id, rt_id)
if not namespace_resource_type:
values.update({'created_at': timeutils.utcnow()})
_insert_data_to_db(namespace_rt_table, values)
elif prefer_new:
values.update({'updated_at': timeutils.utcnow()})
_update_rt_association(namespace_rt_table, values,
rt_id, namespace_id)
for name, schema in metadata.get('properties', {}).items():
values = {
'name': name,
'namespace_id': namespace_id,
'json_schema': json.dumps(schema)
}
property_id = _get_resource_id(
properties_table, namespace_id, name,
)
if not property_id:
values.update({'created_at': timeutils.utcnow()})
_insert_data_to_db(properties_table, values)
elif prefer_new:
values.update({'updated_at': timeutils.utcnow()})
_update_data_in_db(properties_table, values,
properties_table.c.id, property_id)
for object in metadata.get('objects', []):
values = {
'name': object['name'],
'description': object.get('description'),
'namespace_id': namespace_id,
'json_schema': json.dumps(
object.get('properties'))
}
object_id = _get_resource_id(objects_table, namespace_id,
object['name'])
if not object_id:
values.update({'created_at': timeutils.utcnow()})
_insert_data_to_db(objects_table, values)
elif prefer_new:
values.update({'updated_at': timeutils.utcnow()})
_update_data_in_db(objects_table, values,
objects_table.c.id, object_id)
for tag in metadata.get('tags', []):
values = {
'name': tag.get('name'),
'namespace_id': namespace_id,
}
tag_id = _get_resource_id(tags_table, namespace_id, tag['name'])
if not tag_id:
values.update({'created_at': timeutils.utcnow()})
_insert_data_to_db(tags_table, values)
elif prefer_new:
values.update({'updated_at': timeutils.utcnow()})
_update_data_in_db(tags_table, values,
tags_table.c.id, tag_id)
LOG.info(_LI("File %s loaded to database."), file)
LOG.info(_LI("Metadata loading finished"))
def _insert_data_to_db(table, values, log_exception=True):
try:
table.insert(values=values).execute()
except sqlalchemy.exc.IntegrityError:
if log_exception:
LOG.warning(_LW("Duplicate entry for values: %s"), values)
def _update_data_in_db(table, values, column, value):
try:
(table.update(values=values).
where(column == value).execute())
except sqlalchemy.exc.IntegrityError:
LOG.warning(_LW("Duplicate entry for values: %s"), values)
def _update_rt_association(table, values, rt_id, namespace_id):
try:
(table.update(values=values).
where(and_(table.c.resource_type_id == rt_id,
table.c.namespace_id == namespace_id)).execute())
except sqlalchemy.exc.IntegrityError:
LOG.warning(_LW("Duplicate entry for values: %s"), values)
def _export_data_to_file(meta, path):
if not path:
path = CONF.metadata_source_path
namespace_table = get_metadef_namespaces_table(meta)
namespaces = namespace_table.select().execute().fetchall()
pattern = re.compile(r'[\W_]+', re.UNICODE)
for id, namespace in enumerate(namespaces, start=1):
namespace_id = namespace['id']
namespace_file_name = pattern.sub('', namespace['display_name'])
values = {
'namespace': namespace['namespace'],
'display_name': namespace['display_name'],
'description': namespace['description'],
'visibility': namespace['visibility'],
'protected': namespace['protected'],
'resource_type_associations': [],
'properties': {},
'objects': [],
'tags': []
}
namespace_resource_types = _get_namespace_resource_types(meta,
namespace_id)
db_objects = _get_objects(meta, namespace_id)
db_properties = _get_properties(meta, namespace_id)
db_tags = _get_tags(meta, namespace_id)
resource_types = []
for namespace_resource_type in namespace_resource_types:
resource_type = _get_resource_type(
meta, namespace_resource_type['resource_type_id'])
resource_types.append({
'name': resource_type['name'],
'prefix': namespace_resource_type['prefix'],
'properties_target': namespace_resource_type[
'properties_target']
})
values.update({
'resource_type_associations': resource_types
})
objects = []
for object in db_objects:
objects.append({
"name": object['name'],
"description": object['description'],
"properties": json.loads(object['json_schema'])
})
values.update({
'objects': objects
})
properties = {}
for property in db_properties:
properties.update({
property['name']: json.loads(property['json_schema'])
})
values.update({
'properties': properties
})
tags = []
for tag in db_tags:
tags.append({
"name": tag['name']
})
values.update({
'tags': tags
})
try:
file_name = ''.join([path, namespace_file_name, '.json'])
if isfile(file_name):
LOG.info(_LI("Overwriting: %s"), file_name)
with open(file_name, 'w') as json_file:
json_file.write(json.dumps(values))
except Exception as e:
LOG.exception(encodeutils.exception_to_unicode(e))
LOG.info(_LI("Namespace %(namespace)s saved in %(file)s"), {
'namespace': namespace_file_name, 'file': file_name})
def db_load_metadefs(engine, metadata_path=None, merge=False,
prefer_new=False, overwrite=False):
meta = MetaData()
meta.bind = engine
if not merge and (prefer_new or overwrite):
LOG.error(_LE("To use --prefer_new or --overwrite you need to combine "
"of these options with --merge option."))
return
if prefer_new and overwrite and merge:
LOG.error(_LE("Please provide no more than one option from this list: "
"--prefer_new, --overwrite"))
return
_populate_metadata(meta, metadata_path, merge, prefer_new, overwrite)
def db_unload_metadefs(engine):
meta = MetaData()
meta.bind = engine
_clear_metadata(meta)
def db_export_metadefs(engine, metadata_path=None):
meta = MetaData()
meta.bind = engine
_export_data_to_file(meta, metadata_path)
|
openstack/glance
|
glance/db/sqlalchemy/metadata.py
|
Python
|
apache-2.0
| 17,923
|
"""
Google Analytics template tags and filters.
"""
from __future__ import absolute_import
import re
from django.template import Library, Node, TemplateSyntaxError
from templatetags.utils import is_internal_ip, disable_html, get_required_setting
SCOPE_VISITOR = 1
SCOPE_SESSION = 2
SCOPE_PAGE = 3
PROPERTY_ID_RE = re.compile(r'^UA-\d+-\d+$')
SETUP_CODE = """
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', '%(property_id)s']);
_gaq.push(['_trackPageview']);
%(commands)s
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
"""
CUSTOM_VAR_CODE = "_gaq.push(['_setCustomVar', %(index)s, '%(name)s', " \
"'%(value)s', %(scope)s]);"
register = Library()
@register.tag
def google_analytics(parser, token):
"""
Google Analytics tracking template tag.
Renders Javascript code to track page visits. You must supply
your website property ID (as a string) in the
``GOOGLE_ANALYTICS_PROPERTY_ID`` setting.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return GoogleAnalyticsNode()
class GoogleAnalyticsNode(Node):
def __init__(self):
self.property_id = get_required_setting(
'GOOGLE_ANALYTICS_PROPERTY_ID', PROPERTY_ID_RE,
"must be a string looking like 'UA-XXXXXX-Y'")
def render(self, context):
commands = self._get_custom_var_commands(context)
html = SETUP_CODE % {'property_id': self.property_id,
'commands': " ".join(commands)}
if is_internal_ip(context, 'GOOGLE_ANALYTICS'):
html = disable_html(html, 'Google Analytics')
return html
def _get_custom_var_commands(self, context):
values = (context.get('google_analytics_var%s' % i)
for i in range(1, 6))
vars = [(i, v) for i, v in enumerate(values, 1) if v is not None]
commands = []
for index, var in vars:
name = var[0]
value = var[1]
try:
scope = var[2]
except IndexError:
scope = SCOPE_PAGE
commands.append(CUSTOM_VAR_CODE % locals())
return commands
def contribute_to_analytical(add_node):
GoogleAnalyticsNode() # ensure properly configured
add_node('head_bottom', GoogleAnalyticsNode)
|
linkedin/indextank-service
|
storefront/templatetags/google_analytics.py
|
Python
|
apache-2.0
| 2,712
|
#!/usr/bin/env python
import glob
import cv2
import cv_bridge
import rospy
from sensor_msgs.msg import Image
from std_msgs.msg import Int32, Float32
import rospkg
import sys
class Animation:
def __init__(self, directory):
self.fnames = [fname for fname in glob.glob("%s/*" % directory)]
self.fnames.sort()
self.images = [cv2.imread(path) for path in self.fnames]
self.animation_timer = None
self.current_value = 0
self.current_idx = 0
self.set_velocity(1/20.0)
self.current_target = 99
self.image_publisher = rospy.Publisher("/robot/xdisplay", Image,
queue_size=10)
self.value_subscriber = rospy.Subscriber("/confusion/value/command", Int32, self.set_value)
self.target_subscriber = rospy.Subscriber("/confusion/target/command", Int32, self.set_target)
self.target_subscriber = rospy.Subscriber("/confusion/target/command", Int32, self.set_target)
self.value_publisher = rospy.Publisher("/confusion/value/state", Int32,
queue_size=10)
self.target_publisher = rospy.Publisher("/confusion/target/state", Int32,
queue_size=10)
self.timer = rospy.Timer(rospy.Duration(self.velocity), self.timer_cb)
def set_velocity(self, velocity):
if isinstance(velocity, Float32):
velocity = velocity.data
self.velocity = velocity
if (self.animation_timer != None):
self.animation_timer.shutdown()
def set_idx(self, idx):
self.current_idx = idx;
self.current_value = int((float(idx) / len(self.images)) * 100)
self.checkrep()
return self.publish
def set_value(self, value):
if isinstance(value, Int32):
print "setting value from topic"
value = value.data
self.current_value = value
self.current_idx = int((value / 100.0) * (len(self.images)))
print self.current_idx
self.checkrep()
return self.publish_image()
def checkrep(self):
assert 0 <= self.current_idx < len(self.images), self.current_idx
assert 0 <= self.current_value < 100, self.current_value
assert self.current_target == None or (0 <= self.current_target < 100), self.current_target
def publish_image(self):
msg = cv_bridge.CvBridge().cv2_to_imgmsg(self.image, encoding="bgr8")
self.image_publisher.publish(msg)
return self.images[self.current_idx]
def set_target(self, target):
if isinstance(target, Int32):
print "setting target from topic"
target = target.data
print "setting target", target
self.current_target = target
@property
def image(self):
return self.images[self.current_idx]
def publish_state(self):
self.value_publisher.publish(self.current_value)
self.target_publisher.publish(self.current_target)
def timer_cb(self, time):
self.animate()
#print "anime timer is running"
self.publish_state()
def animate(self):
if self.current_target != None:
print "target", self.current_target, self.current_value
if self.current_target < self.current_value:
self.set_value(self.current_value - 1)
elif self.current_target > self.current_value:
self.set_value(self.current_value + 1)
elif self.current_target == self.current_value:
#self.current_target = None
self.current_value = 0
else:
raise ValueError("No target: " + `self.target`)
def main():
rospy.init_node('animator_server', anonymous=True)
rate = rospy.Rate(30)
rospack = rospkg.RosPack()
path = sys.argv[1]
Animation(path)
while not rospy.is_shutdown():
rate.sleep()
if __name__ == "__main__":
main()
|
UCRoboticsLab/BaxterTictactoe
|
src/baxter_tictactoe/scripts/animator_server.py
|
Python
|
apache-2.0
| 4,046
|
import asyncio
import sys
import traceback
import pytest
from pykka import Future, Timeout, get_all
def run_async(coroutine):
loop = asyncio.get_event_loop()
f = asyncio.ensure_future(coroutine, loop=loop)
return loop.run_until_complete(f)
def test_base_future_get_is_not_implemented():
future = Future()
with pytest.raises(NotImplementedError):
future.get()
def test_base_future_set_is_not_implemented():
future = Future()
with pytest.raises(NotImplementedError):
future.set(None)
def test_base_future_set_exception_is_not_implemented():
future = Future()
with pytest.raises(NotImplementedError):
future.set_exception(None)
def test_set_multiple_times_fails(future):
future.set(0)
with pytest.raises(Exception):
future.set(0)
def test_get_all_blocks_until_all_futures_are_available(futures):
futures[0].set(0)
futures[1].set(1)
futures[2].set(2)
result = get_all(futures)
assert result == [0, 1, 2]
def test_get_all_raises_timeout_if_not_all_futures_are_available(futures):
futures[0].set(0)
futures[1].set(1)
# futures[2] is unset
with pytest.raises(Timeout):
get_all(futures, timeout=0)
def test_get_all_can_be_called_multiple_times(futures):
futures[0].set(0)
futures[1].set(1)
futures[2].set(2)
result1 = get_all(futures)
result2 = get_all(futures)
assert result1 == result2
def test_future_in_future_works(runtime):
inner_future = runtime.future_class()
inner_future.set("foo")
outer_future = runtime.future_class()
outer_future.set(inner_future)
assert outer_future.get().get() == "foo"
def test_get_raises_exception_with_full_traceback(runtime):
exc_class_get = None
exc_class_set = None
exc_instance_get = None
exc_instance_set = None
exc_traceback_get = None
exc_traceback_set = None
future = runtime.future_class()
try:
raise NameError("foo")
except NameError:
exc_class_set, exc_instance_set, exc_traceback_set = sys.exc_info()
future.set_exception()
# We could move to another thread at this point
try:
future.get()
except NameError:
exc_class_get, exc_instance_get, exc_traceback_get = sys.exc_info()
assert exc_class_set == exc_class_get
assert exc_instance_set == exc_instance_get
exc_traceback_list_set = list(reversed(traceback.extract_tb(exc_traceback_set)))
exc_traceback_list_get = list(reversed(traceback.extract_tb(exc_traceback_get)))
# All frames from the first traceback should be included in the
# traceback from the future.get() reraise
assert len(exc_traceback_list_set) < len(exc_traceback_list_get)
for i, frame in enumerate(exc_traceback_list_set):
assert frame == exc_traceback_list_get[i]
def test_future_supports_await_syntax(future):
async def get_value():
return await future
future.set(1)
assert run_async(get_value()) == 1
def test_future_supports_yield_from_syntax(future):
def get_value():
val = yield from future
return val
future.set(1)
assert run_async(get_value()) == 1
def test_filter_excludes_items_not_matching_predicate(future):
filtered = future.filter(lambda x: x > 10)
future.set([1, 3, 5, 7, 9, 11, 13, 15, 17, 19])
assert filtered.get(timeout=0) == [11, 13, 15, 17, 19]
def test_filter_on_noniterable(future):
filtered = future.filter(lambda x: x > 10)
future.set(1)
with pytest.raises(TypeError):
filtered.get(timeout=0)
def test_filter_preserves_the_timeout_kwarg(future):
filtered = future.filter(lambda x: x > 10)
with pytest.raises(Timeout):
filtered.get(timeout=0)
def test_filter_reuses_result_if_called_multiple_times(future, mocker):
raise_on_reuse_func = mocker.Mock(side_effect=[False, True, Exception])
filtered = future.filter(raise_on_reuse_func)
future.set([1, 2])
assert filtered.get(timeout=0) == [2]
assert filtered.get(timeout=0) == [2] # First result is reused
assert filtered.get(timeout=0) == [2] # First result is reused
def test_join_combines_multiple_futures_into_one(futures):
joined = futures[0].join(futures[1], futures[2])
futures[0].set(0)
futures[1].set(1)
futures[2].set(2)
assert joined.get(timeout=0) == [0, 1, 2]
def test_join_preserves_timeout_kwarg(futures):
joined = futures[0].join(futures[1], futures[2])
futures[0].set(0)
futures[1].set(1)
# futures[2] is unset
with pytest.raises(Timeout):
joined.get(timeout=0)
def test_map_returns_future_which_passes_result_through_func(future):
mapped = future.map(lambda x: x + 10)
future.set(30)
assert mapped.get(timeout=0) == 40
def test_map_works_on_dict(future):
# Regression test for issue #64
mapped = future.map(lambda x: x["foo"])
future.set({"foo": "bar"})
assert mapped.get(timeout=0) == "bar"
def test_map_does_not_map_each_value_in_futures_iterable_result(future):
# Behavior changed in Pykka 2.0:
# This used to map each value in the future's result through the func,
# yielding [20, 30, 40].
mapped = future.map(lambda x: x + 10)
future.set([10, 20, 30])
with pytest.raises(TypeError):
mapped.get(timeout=0)
def test_map_preserves_timeout_kwarg(future):
mapped = future.map(lambda x: x + 10)
with pytest.raises(Timeout):
mapped.get(timeout=0)
def test_map_reuses_result_if_called_multiple_times(future, mocker):
raise_on_reuse_func = mocker.Mock(side_effect=[10, Exception])
mapped = future.map(raise_on_reuse_func)
future.set(30)
assert mapped.get(timeout=0) == 10
assert mapped.get(timeout=0) == 10 # First result is reused
def test_reduce_applies_function_cumulatively_from_the_left(future):
reduced = future.reduce(lambda x, y: x + y)
future.set([1, 2, 3, 4])
assert reduced.get(timeout=0) == 10
def test_reduce_accepts_an_initial_value(future):
reduced = future.reduce(lambda x, y: x + y, 5)
future.set([1, 2, 3, 4])
assert reduced.get(timeout=0) == 15
def test_reduce_on_noniterable(future):
reduced = future.reduce(lambda x, y: x + y)
future.set(1)
with pytest.raises(TypeError):
reduced.get(timeout=0)
def test_reduce_preserves_the_timeout_kwarg(future):
reduced = future.reduce(lambda x, y: x + y)
with pytest.raises(Timeout):
reduced.get(timeout=0)
def test_reduce_reuses_result_if_called_multiple_times(future, mocker):
raise_on_reuse_func = mocker.Mock(side_effect=[3, 6, Exception])
reduced = future.reduce(raise_on_reuse_func)
future.set([1, 2, 3])
assert reduced.get(timeout=0) == 6
assert reduced.get(timeout=0) == 6 # First result is reused
assert reduced.get(timeout=0) == 6 # First result is reused
|
jodal/pykka
|
tests/test_future.py
|
Python
|
apache-2.0
| 6,910
|
# yellowbrick.model_selection
# Visualizers that wrap the model selection libraries of Scikit-Learn
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Fri Mar 30 10:36:12 2018 -0400
#
# ID: __init__.py [c5355ee] benjamin@bengfort.com $
"""
Visualizers that wrap the model selection libraries of Scikit-Learn
"""
##########################################################################
## Imports
##########################################################################
from .learning_curve import LearningCurve, learning_curve
from .validation_curve import ValidationCurve, validation_curve
from .cross_validation import CVScores, cv_scores
# RFECV and Feature Importances moved here as of YB v1.0
from .importances import FeatureImportances, feature_importances
from .rfecv import RFECV, rfecv
|
DistrictDataLabs/yellowbrick
|
yellowbrick/model_selection/__init__.py
|
Python
|
apache-2.0
| 818
|
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from collections import defaultdict
from structlog import get_logger
from ..utils import epoch
logger = get_logger()
class TimeSeries(list):
def __init__(self, name, start, end, step, values, consolidate='average'):
list.__init__(self, values)
self.name = name
self.start = start
self.end = end
self.step = step
self.consolidationFunc = consolidate
self.valuesPerPoint = 1
self.options = {}
self.pathExpression = name
def __eq__(self, other):
if isinstance(other, TimeSeries):
color_eq = True
if hasattr(self, 'color'):
if hasattr(other, 'color'):
color_eq = (self.color == other.color)
else:
color_eq = False
elif hasattr(other, 'color'):
color_eq = False
return ((self.name, self.start, self.step, self.consolidationFunc,
self.valuesPerPoint, self.options) ==
(other.name, other.start, other.step,
other.consolidationFunc, other.valuesPerPoint,
other.options)) and list.__eq__(self, other) and color_eq
return False
def __iter__(self):
if self.valuesPerPoint > 1:
return self.__consolidatingGenerator(list.__iter__(self))
else:
return list.__iter__(self)
def consolidate(self, valuesPerPoint):
self.valuesPerPoint = int(valuesPerPoint)
def __consolidatingGenerator(self, gen):
buf = []
for x in gen:
buf.append(x)
if len(buf) == self.valuesPerPoint:
while None in buf:
buf.remove(None)
if buf:
yield self.__consolidate(buf)
buf = []
else:
yield None
while None in buf:
buf.remove(None)
if buf:
yield self.__consolidate(buf)
else:
yield None
return
def __consolidate(self, values):
usable = [v for v in values if v is not None]
if not usable:
return None
if self.consolidationFunc == 'sum':
return sum(usable)
if self.consolidationFunc == 'average':
return float(sum(usable)) / len(usable)
if self.consolidationFunc == 'max':
return max(usable)
if self.consolidationFunc == 'min':
return min(usable)
raise Exception(
"Invalid consolidation function: '%s'" % self.consolidationFunc)
def __repr__(self):
return 'TimeSeries(name=%s, start=%s, end=%s, step=%s)' % (
self.name, self.start, self.end, self.step)
class DataStore(object):
"""
Simple object to store results of multi fetches.
Also aids in looking up data by pathExpressions.
"""
def __init__(self):
self.paths = defaultdict(set)
self.data = defaultdict(list)
def get_paths(self, path_expr):
"""
Returns all paths found for path_expr
"""
return sorted(self.paths[path_expr])
def add_data(self, path, time_info, data, exprs):
"""
Stores data before it can be put into a time series
"""
# Dont add if empty
if not nonempty(data):
for d in self.data[path]:
if nonempty(d['values']):
return
# Add data to path
for expr in exprs:
self.paths[expr].add(path)
self.data[path].append({
'time_info': time_info,
'values': data
})
def get_series_list(self, path_expr):
series_list = []
for path in self.get_paths(path_expr):
for data in self.data.get(path):
start, end, step = data['time_info']
series = TimeSeries(path, start, end, step, data['values'])
series.pathExpression = path_expr
series_list.append(series)
return series_list
def fetchData(requestContext, pathExprs):
from ..app import app
startTime = int(epoch(requestContext['startTime']))
endTime = int(epoch(requestContext['endTime']))
if 'now' in requestContext:
now = int(epoch(requestContext['now']))
else:
now = None
# Convert to list if given single path
if not isinstance(pathExprs, list):
pathExprs = [pathExprs]
data_store = DataStore()
multi_nodes = defaultdict(list)
single_nodes = []
path_to_exprs = defaultdict(list)
# Group nodes that support multiple fetches
for pathExpr in pathExprs:
for node in app.store.find(pathExpr, startTime, endTime):
if not node.is_leaf:
continue
if node.path not in path_to_exprs:
if hasattr(node, '__fetch_multi__'):
multi_nodes[node.__fetch_multi__].append(node)
else:
single_nodes.append(node)
path_to_exprs[node.path].append(pathExpr)
# Multi fetches
for finder in app.store.finders:
if not hasattr(finder, '__fetch_multi__'):
continue
nodes = multi_nodes[finder.__fetch_multi__]
if not nodes:
continue
try:
time_info, series = finder.fetch_multi(nodes, startTime, endTime,
now, requestContext)
except TypeError:
time_info, series = finder.fetch_multi(nodes, startTime, endTime)
for path, values in series.items():
data_store.add_data(path, time_info, values,
path_to_exprs[path])
# Single fetches
fetches = [
(node.path, node.fetch(startTime, endTime, now, requestContext))
for node in single_nodes
]
for path, results in fetches:
if not results:
logger.info("no results", path=path, start=startTime,
end=endTime)
continue
try:
time_info, values = results
except ValueError as e:
raise Exception("could not parse timeInfo/values from metric "
"'%s': %s" % (path, e))
data_store.add_data(path, time_info, values, path_to_exprs[path])
return data_store
def nonempty(series):
for value in series:
if value is not None:
return True
return False
|
brutasse/graphite-api
|
graphite_api/render/datalib.py
|
Python
|
apache-2.0
| 7,097
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
# -*- test-case-name: flocker.provision.test.test_install -*-
"""
Install flocker on a remote node.
"""
import posixpath
from textwrap import dedent
from urlparse import urljoin, urlparse
from effect import Func, Effect
import yaml
from zope.interface import implementer
from characteristic import attributes
from pyrsistent import PRecord, field
from ._libcloud import INode
from ._common import PackageSource, Variants
from ._ssh import (
run, run_from_args,
sudo, sudo_from_args,
put,
run_remotely
)
from ._effect import sequence
from flocker import __version__ as version
from flocker.cli import configure_ssh
from flocker.common.version import (
get_installable_version, get_package_key_suffix, is_release,
)
# A systemctl sub-command to start or restart a service. We use restart here
# so that if it is already running it gets restart (possibly necessary to
# respect updated configuration) and because restart will also start it if it
# is not running.
START = "restart"
ZFS_REPO = {
'centos-7': "https://s3.amazonaws.com/archive.zfsonlinux.org/"
"epel/zfs-release.el7.noarch.rpm",
}
ARCHIVE_BUCKET = 'clusterhq-archive'
def get_repository_url(distribution, flocker_version):
"""
Return the URL for the repository of a given distribution.
For ``yum``-using distributions this gives the URL to a package that adds
entries to ``/etc/yum.repos.d``. For ``apt``-using distributions, this
gives the URL for a repo containing a Packages(.gz) file.
:param bytes distribution: The Linux distribution to get a repository for.
:param bytes flocker_version: The version of Flocker to get a repository
for.
:return bytes: The URL pointing to a repository of packages.
:raises: ``UnsupportedDistribution`` if the distribution is unsupported.
"""
distribution_to_url = {
# TODO instead of hardcoding keys, use the _to_Distribution map
# and then choose the name
'centos-7': "https://{archive_bucket}.s3.amazonaws.com/"
"{key}/clusterhq-release$(rpm -E %dist).noarch.rpm".format(
archive_bucket=ARCHIVE_BUCKET,
key='centos',
),
# This could hardcode the version number instead of using
# ``lsb_release`` but that allows instructions to be shared between
# versions, and for earlier error reporting if you try to install on a
# separate version. The $(ARCH) part must be left unevaluated, hence
# the backslash escapes (one to make shell ignore the $ as a
# substitution marker, and then doubled to make Python ignore the \ as
# an escape marker). The output of this value then goes into
# /etc/apt/sources.list which does its own substitution on $(ARCH)
# during a subsequent apt-get update
'ubuntu-14.04': 'https://{archive_bucket}.s3.amazonaws.com/{key}/'
'$(lsb_release --release --short)/\\$(ARCH)'.format(
archive_bucket=ARCHIVE_BUCKET,
key='ubuntu' + get_package_key_suffix(
flocker_version),
),
'ubuntu-15.04': 'https://{archive_bucket}.s3.amazonaws.com/{key}/'
'$(lsb_release --release --short)/\\$(ARCH)'.format(
archive_bucket=ARCHIVE_BUCKET,
key='ubuntu' + get_package_key_suffix(
flocker_version),
),
}
try:
return distribution_to_url[distribution]
except KeyError:
raise UnsupportedDistribution()
def get_repo_options(flocker_version):
"""
Get a list of options for enabling necessary yum repositories.
:param bytes flocker_version: The version of Flocker to get options for.
:return: List of bytes for enabling (or not) a testing repository.
"""
is_dev = not is_release(flocker_version)
if is_dev:
return ['--enablerepo=clusterhq-testing']
else:
return []
class UnsupportedDistribution(Exception):
"""
Raised if trying to support a distribution which is not supported.
"""
@attributes(['distribution'])
class DistributionNotSupported(NotImplementedError):
"""
Raised when the provisioning step is not supported on the given
distribution.
:ivar bytes distribution: The distribution that isn't supported.
"""
def __str__(self):
return "Distribution not supported: %s" % (self.distribution,)
@implementer(INode)
class ManagedNode(PRecord):
"""
A node managed by some other system (eg by hand or by another piece of
orchestration software).
"""
address = field(type=bytes, mandatory=True)
private_address = field(type=(bytes, type(None)),
initial=None, mandatory=True)
distribution = field(type=bytes, mandatory=True)
def task_client_installation_test():
"""
Check that the CLI is working.
"""
return run_from_args(['flocker-deploy', '--version'])
def install_cli_commands_yum(distribution, package_source):
"""
Install Flocker CLI on CentOS.
The ClusterHQ repo is added for downloading latest releases. If
``package_source`` contains a branch, then a BuildBot repo will also
be added to the package search path, to use in-development packages.
Note, the ClusterHQ repo is always enabled, to provide dependencies.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:return: a sequence of commands to run on the distribution
"""
if package_source.branch:
# A development branch has been selected - add its Buildbot repo
use_development_branch = True
result_path = posixpath.join(
'/results/omnibus/', package_source.branch, distribution)
base_url = urljoin(package_source.build_server, result_path)
else:
use_development_branch = False
commands = [
sudo(command="yum install -y " + get_repository_url(
distribution=distribution,
flocker_version=get_installable_version(version))),
]
if use_development_branch:
repo = dedent(b"""\
[clusterhq-build]
name=clusterhq-build
baseurl=%s
gpgcheck=0
enabled=0
""") % (base_url,)
commands.append(put(content=repo,
path='/tmp/clusterhq-build.repo'))
commands.append(sudo_from_args([
'cp', '/tmp/clusterhq-build.repo',
'/etc/yum.repos.d/clusterhq-build.repo']))
repo_options = ['--enablerepo=clusterhq-build']
else:
repo_options = get_repo_options(
flocker_version=get_installable_version(version))
if package_source.os_version:
package = 'clusterhq-flocker-cli-%s' % (package_source.os_version,)
else:
package = 'clusterhq-flocker-cli'
# Install Flocker CLI and all dependencies
commands.append(sudo_from_args(
["yum", "install"] + repo_options + ["-y", package]))
return sequence(commands)
def install_cli_commands_ubuntu(distribution, package_source):
"""
Install flocker CLI on Ubuntu.
The ClusterHQ repo is added for downloading latest releases. If
``package_source`` contains a branch, then a BuildBot repo will also
be added to the package search path, to use in-development packages.
Note, the ClusterHQ repo is always enabled, to provide dependencies.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:return: a sequence of commands to run on the distribution
"""
if package_source.branch:
# A development branch has been selected - add its Buildbot repo
use_development_branch = True
result_path = posixpath.join(
'/results/omnibus/', package_source.branch, distribution)
base_url = urljoin(package_source.build_server, result_path)
else:
use_development_branch = False
commands = [
# Minimal images often have cleared apt caches and are missing
# packages that are common in a typical release. These commands
# ensure that we start from a good base system with the required
# capabilities, particularly that the add-apt-repository command
# and HTTPS URLs are supported.
# FLOC-1880 will ensure these are necessary and sufficient.
sudo_from_args(["apt-get", "update"]),
sudo_from_args([
"apt-get", "-y", "install", "apt-transport-https",
"software-properties-common"]),
# Add ClusterHQ repo for installation of Flocker packages.
sudo(command='add-apt-repository -y "deb {} /"'.format(
get_repository_url(
distribution=distribution,
flocker_version=get_installable_version(version))))
]
if use_development_branch:
# Add BuildBot repo for running tests
commands.append(sudo_from_args([
"add-apt-repository", "-y", "deb {} /".format(base_url)]))
# During a release, the ClusterHQ repo may contain packages with
# a higher version number than the Buildbot repo for a branch.
# Use a pin file to ensure that any Buildbot repo has higher
# priority than the ClusterHQ repo.
buildbot_host = urlparse(package_source.build_server).hostname
commands.append(put(dedent('''\
Package: *
Pin: origin {}
Pin-Priority: 900
'''.format(buildbot_host)), '/tmp/apt-pref'))
commands.append(sudo_from_args([
'mv', '/tmp/apt-pref', '/etc/apt/preferences.d/buildbot-900']))
# Update to read package info from new repos
commands.append(sudo_from_args(["apt-get", "update"]))
if package_source.os_version:
package = 'clusterhq-flocker-cli=%s' % (package_source.os_version,)
else:
package = 'clusterhq-flocker-cli'
# Install Flocker CLI and all dependencies
commands.append(sudo_from_args([
'apt-get', '-y', '--force-yes', 'install', package]))
return sequence(commands)
_task_install_commands = {
'centos-7': install_cli_commands_yum,
'ubuntu-14.04': install_cli_commands_ubuntu,
'ubuntu-15.04': install_cli_commands_ubuntu,
}
def task_install_cli(distribution, package_source=PackageSource()):
"""
Install flocker CLI on a distribution.
The ClusterHQ repo is added for downloading latest releases. If
``package_source`` contains a branch, then a BuildBot repo will also
be added to the package search path, to use in-development packages.
Note, the ClusterHQ repo is always enabled, to provide dependencies.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:return: a sequence of commands to run on the distribution
"""
return _task_install_commands[distribution](distribution, package_source)
def install_cli(package_source, node):
"""
Return an effect to run the CLI installation tasks on a remote node.
:param package_source: Package source description
:param node: Remote node description
"""
return run_remotely(
node.get_default_username(), node.address,
task_install_cli(node.distribution, package_source))
def task_configure_brew_path():
"""
Configure non-interactive shell to use all paths.
By default, OSX provides a minimal $PATH, for programs run via SSH. In
particular /usr/local/bin (which contains `brew`) isn't in the path. This
configures the path to have it there.
"""
return put(
path='.bashrc',
content=dedent("""\
if [ -x /usr/libexec/path_helper ]; then
eval `/usr/libexec/path_helper -s`
fi
"""))
def task_test_homebrew(recipe):
"""
The commands used to install a Homebrew recipe for Flocker and test it.
This taps the ClusterHQ/tap tap, which means that Homebrew looks in the
ClusterHQ/homebrew-tap GitHub repository for any recipe name given.
:param bytes recipe: The name of a recipe in a either the official Homebrew
tap or ClusterHQ/tap, or a URL pointing to a recipe.
:return Effect: Commands used to install a Homebrew recipe for Flocker and
test it.
"""
return sequence([
run_from_args(['brew', 'tap', 'ClusterHQ/tap']),
run("brew update"),
run("brew install {recipe}".format(recipe=recipe)),
run("brew test {recipe}".format(recipe=recipe)),
])
def task_install_ssh_key():
"""
Install the authorized ssh keys of the current user for root as well.
"""
return sequence([
sudo_from_args(['cp', '.ssh/authorized_keys',
'/root/.ssh/authorized_keys']),
])
def task_upgrade_kernel(distribution):
"""
Upgrade kernel.
"""
if distribution == 'centos-7':
return sequence([
run_from_args([
"yum", "install", "-y", "kernel-devel", "kernel"]),
run_from_args(['sync']),
])
elif distribution == 'ubuntu-14.04':
# Not required.
return sequence([])
else:
raise DistributionNotSupported(distribution=distribution)
def task_disable_selinux(distribution):
"""
Disable SELinux for this session and permanently.
XXX: Remove this when we work out suitable SELinux settings.
See https://clusterhq.atlassian.net/browse/FLOC-619.
"""
if distribution in ('centos-7',):
return sequence([
run("if selinuxenabled; then setenforce 0; fi"),
run("test -e /etc/selinux/config && "
"sed --in-place='.preflocker' "
"'s/^SELINUX=.*$/SELINUX=disabled/g' "
"/etc/selinux/config"),
])
elif distribution in ('ubuntu-14.04',):
# Ubuntu does not have SELinux enabled
return sequence([])
else:
raise DistributionNotSupported(distribution=distribution)
def _remove_private_key(content):
"""
Remove most of the contents of a private key file for logging.
"""
prefix = '-----BEGIN PRIVATE KEY-----'
suffix = '-----END PRIVATE KEY-----'
start = content.find(prefix)
if start < 0:
# no private key
return content
# Keep prefix, subsequent newline, and 4 characters at start of key
trim_start = start + len(prefix) + 5
end = content.find(suffix, trim_start)
if end < 0:
end = len(content)
# Keep suffix and previous 4 characters and newline at end of key
trim_end = end - 5
if trim_end <= trim_start:
# strangely short key, keep all content
return content
return content[:trim_start] + '...REMOVED...' + content[trim_end:]
def task_install_control_certificates(ca_cert, control_cert, control_key):
"""
Install certificates and private key required by the control service.
:param FilePath ca_cert: Path to CA certificate on local machine.
:param FilePath control_cert: Path to control service certificate on
local machine.
:param FilePath control_key: Path to control service private key
local machine.
"""
# Be better if permissions were correct from the start.
# https://clusterhq.atlassian.net/browse/FLOC-1922
return sequence([
run('mkdir -p /etc/flocker'),
run('chmod u=rwX,g=,o= /etc/flocker'),
put(path="/etc/flocker/cluster.crt", content=ca_cert.getContent()),
put(path="/etc/flocker/control-service.crt",
content=control_cert.getContent()),
put(path="/etc/flocker/control-service.key",
content=control_key.getContent(),
log_content_filter=_remove_private_key),
])
def task_install_node_certificates(ca_cert, node_cert, node_key):
"""
Install certificates and private key required by a node.
:param FilePath ca_cert: Path to CA certificate on local machine.
:param FilePath node_cert: Path to node certificate on
local machine.
:param FilePath node_key: Path to node private key
local machine.
"""
# Be better if permissions were correct from the start.
# https://clusterhq.atlassian.net/browse/FLOC-1922
return sequence([
run('mkdir -p /etc/flocker'),
run('chmod u=rwX,g=,o= /etc/flocker'),
put(path="/etc/flocker/cluster.crt", content=ca_cert.getContent()),
put(path="/etc/flocker/node.crt",
content=node_cert.getContent()),
put(path="/etc/flocker/node.key",
content=node_key.getContent(),
log_content_filter=_remove_private_key),
])
def task_enable_docker(distribution):
"""
Start docker and configure it to start automatically.
"""
if distribution in ('centos-7',):
return sequence([
run_from_args(["systemctl", "enable", "docker.service"]),
run_from_args(["systemctl", "start", "docker.service"]),
])
elif distribution == 'ubuntu-14.04':
# Ubuntu enables docker service during installation
return sequence([])
else:
raise DistributionNotSupported(distribution=distribution)
def open_firewalld(service):
"""
Open firewalld port for a service.
:param str service: Name of service.
"""
return sequence([
run_from_args(command + [service])
for command in [['firewall-cmd', '--permanent', '--add-service'],
['firewall-cmd', '--add-service']]])
def open_ufw(service):
"""
Open ufw port for a service.
:param str service: Name of service.
"""
return sequence([
run_from_args(['ufw', 'allow', service])
])
def task_enable_flocker_control(distribution):
"""
Enable flocker-control service.
"""
if distribution in ('centos-7',):
return sequence([
run_from_args(['systemctl', 'enable', 'flocker-control']),
run_from_args(['systemctl', START, 'flocker-control']),
])
elif distribution == 'ubuntu-14.04':
# Since the flocker-control service is currently installed
# alongside the flocker-dataset-agent service, the default control
# service configuration does not automatically start the
# service. Here, we provide an override file to start it.
return sequence([
put(
path='/etc/init/flocker-control.override',
content=dedent('''\
start on runlevel [2345]
stop on runlevel [016]
'''),
),
run("echo 'flocker-control-api\t4523/tcp\t\t\t# Flocker Control API port' >> /etc/services"), # noqa
run("echo 'flocker-control-agent\t4524/tcp\t\t\t# Flocker Control Agent port' >> /etc/services"), # noqa
run_from_args(['service', 'flocker-control', 'start']),
])
else:
raise DistributionNotSupported(distribution=distribution)
def task_open_control_firewall(distribution):
"""
Open the firewall for flocker-control.
"""
if distribution in ('centos-7',):
open_firewall = open_firewalld
elif distribution == 'ubuntu-14.04':
open_firewall = open_ufw
else:
raise DistributionNotSupported(distribution=distribution)
return sequence([
open_firewall(service)
for service in ['flocker-control-api', 'flocker-control-agent']
])
# Set of dataset fields which are *not* sensitive. Only fields in this
# set are logged. This should contain everything except usernames and
# passwords (or equivalents). Implemented as a whitelist in case new
# security fields are added.
_ok_to_log = frozenset((
'auth_plugin',
'auth_url',
'backend',
'region',
'zone',
))
def _remove_dataset_fields(content):
"""
Remove non-whitelisted fields from dataset for logging.
"""
content = yaml.safe_load(content)
dataset = content['dataset']
for key in dataset:
if key not in _ok_to_log:
dataset[key] = 'REMOVED'
return yaml.safe_dump(content)
def task_configure_flocker_agent(control_node, dataset_backend,
dataset_backend_configuration):
"""
Configure the flocker agents by writing out the configuration file.
:param bytes control_node: The address of the control agent.
:param DatasetBackend dataset_backend: The volume backend the nodes are
configured with.
:param dict dataset_backend_configuration: The backend specific
configuration options.
"""
dataset_backend_configuration = dataset_backend_configuration.copy()
dataset_backend_configuration.update({
u"backend": dataset_backend.name,
})
put_config_file = put(
path='/etc/flocker/agent.yml',
content=yaml.safe_dump(
{
"version": 1,
"control-service": {
"hostname": control_node,
"port": 4524,
},
"dataset": dataset_backend_configuration,
},
),
log_content_filter=_remove_dataset_fields
)
return sequence([put_config_file])
def task_enable_flocker_agent(distribution):
"""
Enable the flocker agents.
:param bytes distribution: The distribution name.
"""
if distribution in ('centos-7',):
return sequence([
run_from_args(['systemctl', 'enable', 'flocker-dataset-agent']),
run_from_args(['systemctl', START, 'flocker-dataset-agent']),
run_from_args(['systemctl', 'enable', 'flocker-container-agent']),
run_from_args(['systemctl', START, 'flocker-container-agent']),
])
elif distribution == 'ubuntu-14.04':
return sequence([
run_from_args(['service', 'flocker-dataset-agent', 'start']),
run_from_args(['service', 'flocker-container-agent', 'start']),
])
else:
raise DistributionNotSupported(distribution=distribution)
def task_create_flocker_pool_file():
"""
Create a file-back zfs pool for flocker.
"""
return sequence([
run('mkdir -p /var/opt/flocker'),
run('truncate --size 10G /var/opt/flocker/pool-vdev'),
run('zpool create flocker /var/opt/flocker/pool-vdev'),
])
def task_install_zfs(distribution, variants=set()):
"""
Install ZFS on a node.
:param bytes distribution: The distribution the node is running.
:param set variants: The set of variant configurations to use when
"""
commands = []
if distribution == 'ubuntu-14.04':
commands += [
# ZFS not available in base Ubuntu - add ZFS repo
run_from_args([
"add-apt-repository", "-y", "ppa:zfs-native/stable"]),
]
commands += [
# Update to read package info from new repos
run_from_args([
"apt-get", "update"]),
# Package spl-dkms sometimes does not have libc6-dev as a
# dependency, add it before ZFS installation requires it.
# See https://github.com/zfsonlinux/zfs/issues/3298
run_from_args(["apt-get", "-y", "install", "libc6-dev"]),
run_from_args(['apt-get', '-y', 'install', 'zfsutils']),
]
elif distribution in ('centos-7',):
commands += [
run_from_args(["yum", "install", "-y", ZFS_REPO[distribution]]),
]
if distribution == 'centos-7':
commands.append(
run_from_args(["yum", "install", "-y", "epel-release"]))
if Variants.ZFS_TESTING in variants:
commands += [
run_from_args(['yum', 'install', '-y', 'yum-utils']),
run_from_args([
'yum-config-manager', '--enable', 'zfs-testing'])
]
commands += [
run_from_args(['yum', 'install', '-y', 'zfs']),
]
else:
raise DistributionNotSupported(distribution)
return sequence(commands)
def configure_zfs(node, variants):
"""
Configure ZFS for use as a Flocker backend.
:param INode node: The node to configure ZFS on.
:param set variants: The set of variant configurations to use when
:return Effect:
"""
return sequence([
run_remotely(
username='root',
address=node.address,
commands=task_upgrade_kernel(
distribution=node.distribution),
),
node.reboot(),
run_remotely(
username='root',
address=node.address,
commands=sequence([
task_install_zfs(
distribution=node.distribution,
variants=variants),
task_create_flocker_pool_file(),
]),
),
Effect(
Func(lambda: configure_ssh(node.address, 22))),
])
def _uninstall_flocker_ubuntu1404():
"""
Return an ``Effect`` for uninstalling the Flocker package from an Ubuntu
14.04 machine.
"""
return run_from_args([
b"apt-get", b"remove", b"-y", b"--purge", b"clusterhq-python-flocker",
])
def _uninstall_flocker_centos7():
"""
Return an ``Effect`` for uninstalling the Flocker package from a CentOS 7
machine.
"""
return sequence([
run_from_args([
b"yum", b"erase", b"-y", b"clusterhq-python-flocker",
]),
run_from_args([
b"yum", b"erase", b"-y", b"clusterhq-release",
]),
])
_flocker_uninstallers = {
"ubuntu-14.04": _uninstall_flocker_ubuntu1404,
"centos-7": _uninstall_flocker_centos7,
}
def task_uninstall_flocker(distribution):
"""
Return an ``Effect`` for uninstalling the Flocker package from the given
distribution.
"""
return _flocker_uninstallers[distribution]()
def uninstall_flocker(nodes):
"""
Return an ``Effect`` for uninstalling the Flocker package from all of the
given nodes.
"""
return _run_on_all_nodes(
nodes,
task=lambda node: task_uninstall_flocker(node.distribution)
)
def task_install_flocker(
distribution=None,
package_source=PackageSource(),
):
"""
Install flocker cluster on a distribution.
The ClusterHQ repo is added for downloading latest releases. If
``package_source`` contains a branch, then a BuildBot repo will also
be added to the package search path, to use in-development packages.
Note, the ClusterHQ repo is always enabled, to provide dependencies.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:raises: ``UnsupportedDistribution`` if the distribution is unsupported.
"""
if package_source.branch:
# A development branch has been selected - add its Buildbot repo
use_development_branch = True
result_path = posixpath.join(
'/results/omnibus/', package_source.branch, distribution)
base_url = urljoin(package_source.build_server, result_path)
else:
use_development_branch = False
if distribution in ('ubuntu-14.04', 'ubuntu-15.04'):
commands = [
# Ensure add-apt-repository command and HTTPS URLs are supported
# FLOC-1880 will ensure these are necessary and sufficient
run_from_args([
"apt-get", "-y", "install", "apt-transport-https",
"software-properties-common"]),
# Add Docker repo for recent Docker versions
run_from_args([
"add-apt-repository", "-y", "ppa:james-page/docker"]),
# Add ClusterHQ repo for installation of Flocker packages.
run(command='add-apt-repository -y "deb {} /"'.format(
get_repository_url(
distribution=distribution,
flocker_version=get_installable_version(version)))),
]
if use_development_branch:
# Add BuildBot repo for testing
commands.append(run_from_args([
"add-apt-repository", "-y", "deb {} /".format(base_url)]))
# During a release, the ClusterHQ repo may contain packages with
# a higher version number than the Buildbot repo for a branch.
# Use a pin file to ensure that any Buildbot repo has higher
# priority than the ClusterHQ repo.
buildbot_host = urlparse(package_source.build_server).hostname
commands.append(put(
dedent('''\
Package: *
Pin: origin {}
Pin-Priority: 900
'''.format(buildbot_host)),
'/etc/apt/preferences.d/buildbot-900'))
commands += [
# Update to read package info from new repos
run_from_args([
"apt-get", "update"]),
]
if package_source.os_version:
package = 'clusterhq-flocker-node=%s' % (
package_source.os_version,)
else:
package = 'clusterhq-flocker-node'
# Install Flocker node and all dependencies
commands.append(run_from_args([
'apt-get', '-y', '--force-yes', 'install', package]))
return sequence(commands)
elif distribution in ('centos-7',):
commands = [
run(command="yum clean all"),
run(command="yum install -y " + get_repository_url(
distribution=distribution,
flocker_version=get_installable_version(version)))
]
if use_development_branch:
repo = dedent(b"""\
[clusterhq-build]
name=clusterhq-build
baseurl=%s
gpgcheck=0
enabled=0
""") % (base_url,)
commands.append(put(content=repo,
path='/etc/yum.repos.d/clusterhq-build.repo'))
repo_options = ['--enablerepo=clusterhq-build']
else:
repo_options = get_repo_options(
flocker_version=get_installable_version(version))
if package_source.os_version:
package = 'clusterhq-flocker-node-%s' % (
package_source.os_version,)
else:
package = 'clusterhq-flocker-node'
commands.append(run_from_args(
["yum", "install"] + repo_options + ["-y", package]))
return sequence(commands)
else:
raise UnsupportedDistribution()
ACCEPTANCE_IMAGES = [
"postgres:latest",
"clusterhq/mongodb:latest",
"clusterhq/flask",
"clusterhq/flaskenv",
"busybox",
]
def task_pull_docker_images(images=ACCEPTANCE_IMAGES):
"""
Pull docker images.
:param list images: List of images to pull. Defaults to images used in
acceptance tests.
"""
return sequence([
run_from_args(['docker', 'pull', image]) for image in images
])
def task_enable_updates_testing(distribution):
"""
Enable the distribution's proposed updates repository.
:param bytes distribution: See func:`task_install_flocker`
"""
raise DistributionNotSupported(distribution=distribution)
def task_enable_docker_head_repository(distribution):
"""
Enable the distribution's repository containing in-development docker
builds.
:param bytes distribution: See func:`task_install_flocker`
"""
if distribution == "centos-7":
return sequence([
put(content=dedent("""\
[virt7-testing]
name=virt7-testing
baseurl=http://cbs.centos.org/repos/virt7-testing/x86_64/os/
enabled=1
gpgcheck=0
"""),
path="/etc/yum.repos.d/virt7-testing.repo")
])
else:
raise DistributionNotSupported(distribution=distribution)
def provision(distribution, package_source, variants):
"""
Provision the node for running flocker.
This drives all the common node installation steps in:
* http://doc-dev.clusterhq.com/gettingstarted/installation.html
:param bytes address: Address of the node to provision.
:param bytes username: Username to connect as.
:param bytes distribution: See func:`task_install_flocker`
:param PackageSource package_source: See func:`task_install_flocker`
:param set variants: The set of variant configurations to use when
provisioning
"""
commands = []
if Variants.DISTRO_TESTING in variants:
commands.append(task_enable_updates_testing(distribution))
if Variants.DOCKER_HEAD in variants:
commands.append(task_enable_docker_head_repository(distribution))
commands.append(
task_install_flocker(
package_source=package_source, distribution=distribution))
if distribution in ('centos-7'):
commands.append(task_disable_selinux(distribution))
commands.append(task_enable_docker(distribution))
return sequence(commands)
def _run_on_all_nodes(nodes, task):
"""
Run some commands on some nodes.
:param nodes: An iterable of ``Node`` instances where the commands should
be run.
:param task: A one-argument callable which is called with each ``Node`` and
should return the ``Effect`` to run on that node.
:return: An ``Effect`` that runs the commands on a group of nodes.
"""
return sequence(list(
run_remotely(
username='root',
address=node.address,
commands=task(node),
)
for node in nodes
))
def install_flocker(nodes, package_source):
"""
Return an ``Effect`` that installs a certain version of Flocker on the
given nodes.
:param nodes: An iterable of ``Node`` instances on which to install
Flocker.
:param PackageSource package_source: The version of Flocker to install.
:return: An ``Effect`` which installs Flocker on the nodes.
"""
return _run_on_all_nodes(
nodes,
task=lambda node: task_install_flocker(
distribution=node.distribution,
package_source=package_source,
)
)
def configure_cluster(cluster, dataset_backend_configuration):
"""
Configure flocker-control, flocker-dataset-agent and
flocker-container-agent on a collection of nodes.
:param Cluster cluster: Description of the cluster to configure.
:param dict dataset_backend_configuration: Configuration parameters to
supply to the dataset backend.
"""
return sequence([
run_remotely(
username='root',
address=cluster.control_node.address,
commands=sequence([
task_install_control_certificates(
cluster.certificates.cluster.certificate,
cluster.certificates.control.certificate,
cluster.certificates.control.key),
task_enable_flocker_control(cluster.control_node.distribution),
]),
),
sequence([
sequence([
run_remotely(
username='root',
address=node.address,
commands=sequence([
task_install_node_certificates(
cluster.certificates.cluster.certificate,
certnkey.certificate,
certnkey.key),
task_configure_flocker_agent(
control_node=cluster.control_node.address,
dataset_backend=cluster.dataset_backend,
dataset_backend_configuration=(
dataset_backend_configuration
),
),
task_enable_flocker_agent(
distribution=node.distribution,
)]),
),
]) for certnkey, node
in zip(cluster.certificates.nodes, cluster.agent_nodes)
])
])
|
Azulinho/flocker
|
flocker/provision/_install.py
|
Python
|
apache-2.0
| 36,812
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute.plugins.v3 import admin_password
from nova.compute import api as compute_api
from nova import exception
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
def fake_get(self, context, id):
return {'uuid': id}
def fake_get_non_existed(self, context, id):
raise exception.InstanceNotFound(instance_id=id)
def fake_set_admin_password(self, context, instance, password=None):
pass
def fake_set_admin_password_failed(self, context, instance, password=None):
raise exception.InstancePasswordSetFailed(instance=instance, reason='')
def fake_set_admin_password_non_implement(self, context, instance,
password=None):
raise NotImplementedError()
class AdminPasswordTest(test.NoDBTestCase):
def setUp(self):
super(AdminPasswordTest, self).setUp()
self.stubs.Set(compute_api.API, 'set_admin_password',
fake_set_admin_password)
self.stubs.Set(compute_api.API, 'get', fake_get)
self.app = fakes.wsgi_app_v3(init_only=('servers',
admin_password.ALIAS))
def _make_request(self, url, body):
req = webob.Request.blank(url)
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.content_type = 'application/json'
res = req.get_response(self.app)
return res
def test_change_password(self):
url = '/v3/servers/1/action'
body = {'change_password': {'admin_password': 'test'}}
res = self._make_request(url, body)
self.assertEqual(res.status_int, 204)
def test_change_password_empty_string(self):
url = '/v3/servers/1/action'
body = {'change_password': {'admin_password': ''}}
res = self._make_request(url, body)
self.assertEqual(res.status_int, 204)
def test_change_password_with_non_implement(self):
url = '/v3/servers/1/action'
body = {'change_password': {'admin_password': 'test'}}
self.stubs.Set(compute_api.API, 'set_admin_password',
fake_set_admin_password_non_implement)
res = self._make_request(url, body)
self.assertEqual(res.status_int, 501)
def test_change_password_with_non_existed_instance(self):
url = '/v3/servers/1/action'
body = {'change_password': {'admin_password': 'test'}}
self.stubs.Set(compute_api.API, 'get', fake_get_non_existed)
res = self._make_request(url, body)
self.assertEqual(res.status_int, 404)
def test_change_password_with_non_string_password(self):
url = '/v3/servers/1/action'
body = {'change_password': {'admin_password': 1234}}
res = self._make_request(url, body)
self.assertEqual(res.status_int, 400)
def test_change_password_failed(self):
url = '/v3/servers/1/action'
body = {'change_password': {'admin_password': 'test'}}
self.stubs.Set(compute_api.API, 'set_admin_password',
fake_set_admin_password_failed)
res = self._make_request(url, body)
self.assertEqual(res.status_int, 409)
def test_change_password_without_admin_password(self):
url = '/v3/servers/1/action'
body = {'change_password': {}}
res = self._make_request(url, body)
self.assertEqual(res.status_int, 400)
def test_change_password_none(self):
url = '/v3/servers/1/action'
body = {'change_password': None}
res = self._make_request(url, body)
self.assertEqual(res.status_int, 400)
class AdminPasswordXMLTest(test.NoDBTestCase):
def setUp(self):
super(AdminPasswordXMLTest, self).setUp()
self.deserializer = admin_password.ChangePasswordDeserializer()
def test_change_password_deserializer(self):
request = '<change_password admin_password="1"></change_password>'
expected = {'body': {'change_password': {'admin_password': '1'}}}
res = self.deserializer.default(request)
self.assertEqual(res, expected)
def test_change_password_deserializer_without_admin_password(self):
request = '<change_password></change_password>'
expected = {'body': {'change_password': None}}
res = self.deserializer.default(request)
self.assertEqual(res, expected)
def test_change_pass_no_pass(self):
request = """<?xml version="1.0" encoding="UTF-8"?>
<change_password
xmlns="http://docs.openstack.org/compute/api/v1.1"/> """
request = self.deserializer.default(request)
expected = {
"change_password": None
}
self.assertEqual(request['body'], expected)
def test_change_pass_empty_pass(self):
request = """<?xml version="1.0" encoding="UTF-8"?>
<change_password
xmlns="http://docs.openstack.org/compute/api/v1.1"
admin_password=""/> """
request = self.deserializer.default(request)
expected = {
"change_password": {
"admin_password": "",
},
}
self.assertEqual(request['body'], expected)
|
sacharya/nova
|
nova/tests/api/openstack/compute/plugins/v3/test_admin_password.py
|
Python
|
apache-2.0
| 5,987
|
'''-------------------------------------------------------------------------
Copyright IBM Corp. 2015, 2015 All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
Limitations under the License.
-------------------------------------------------------------------------'''
'''
IMPORTANT: Make sure the variables AUTH_PI and KEYSTONE_IP point to the system
you are testing!!!
'''
'''------------------------------------------------------------------------'''
# Establishing Swift connection, user ID, etc
PROXY_PROTOCOL = 'HTTP'
AUTH_PROTOCOL = 'HTTP'
DEV_AUTH_IP = '9.26.19.179'
AUTH_IP = DEV_AUTH_IP
PROXY_PORT = '80'
AUTH_PORT = '5000'
ACCOUNT = 'service'
USER_NAME = 'swift'
PASSWORD = 'passw0rd'
|
hroumani/genericStorletStore
|
storletDeploy/sys_test_params.py
|
Python
|
apache-2.0
| 1,153
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from quantum.openstack.common import cfg
meta_plugin_opts = [
cfg.StrOpt('plugin_list', default='',
help=_("List of plugins to load")),
cfg.StrOpt('l3_plugin_list', default='',
help=_("List of L3 plugins to load")),
cfg.StrOpt('default_flavor', default='',
help=_("Default flavor to use")),
cfg.StrOpt('default_l3_flavor', default='',
help=_("Default L3 flavor to use")),
cfg.StrOpt('supported_extension_aliases', default='',
help=_("Supported extension aliases")),
cfg.StrOpt('extension_map', default='',
help=_("A list of extensions, per plugin, to load.")),
]
proxy_plugin_opts = [
cfg.StrOpt('admin_user',
help=_("Admin user")),
cfg.StrOpt('admin_password',
help=_("Admin password")),
cfg.StrOpt('admin_tenant_name',
help=_("Admin tenant name")),
cfg.StrOpt('auth_url',
help=_("Authentication URL")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('auth_region',
help=_("Authentication region")),
]
cfg.CONF.register_opts(meta_plugin_opts, "META")
cfg.CONF.register_opts(proxy_plugin_opts, "PROXY")
|
rossella/neutron
|
quantum/plugins/metaplugin/common/config.py
|
Python
|
apache-2.0
| 1,981
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubevirt
from kubevirt.rest import ApiException
from kubevirt.models.k8s_io_apimachinery_pkg_apis_meta_v1_root_paths import K8sIoApimachineryPkgApisMetaV1RootPaths
class TestK8sIoApimachineryPkgApisMetaV1RootPaths(unittest.TestCase):
""" K8sIoApimachineryPkgApisMetaV1RootPaths unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testK8sIoApimachineryPkgApisMetaV1RootPaths(self):
"""
Test K8sIoApimachineryPkgApisMetaV1RootPaths
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubevirt.models.k8s_io_apimachinery_pkg_apis_meta_v1_root_paths.K8sIoApimachineryPkgApisMetaV1RootPaths()
pass
if __name__ == '__main__':
unittest.main()
|
kubevirt/client-python
|
test/test_k8s_io_apimachinery_pkg_apis_meta_v1_root_paths.py
|
Python
|
apache-2.0
| 1,105
|
#!/usr/bin/env python
# # -*- coding: utf-8 -*-
"""
title : __main__.py
description : entry point for core sira component
usage : python sira [OPTIONS]
-h Display this usage message
-d [input_directory] Specify the directory with the required
config and model files
-s Run simulation
-f Conduct model fitting. Must be done
after a complete run with `-s` flag
-l Conduct loss analysis. Must be done
after a complete run with `-s` flag
-v [LEVEL] Choose `verbose` mode, or choose logging
level DEBUG, INFO, WARNING, ERROR, CRITICAL
python_version : 3.7
"""
from __future__ import print_function
import sys
import numpy as np
np.seterr(divide='print', invalid='raise')
import time
import re
from colorama import init, Fore, Back, Style
init()
import os
import argparse
from sira.logger import configure_logger
import logging
import logging.config
from sira.configuration import Configuration
from sira.scenario import Scenario
from sira.modelling.hazard import HazardsContainer
from sira.model_ingest import ingest_model
from sira.simulation import calculate_response
from sira.modelling.system_topology import SystemTopology
from sira.infrastructure_response import (
write_system_response,
plot_mean_econ_loss,
pe_by_component_class
)
from sira.fit_model import fit_prob_exceed_model
from sira.loss_analysis import run_scenario_loss_analysis
import numpy as np
def main():
# define arg parser
parser = argparse.ArgumentParser(
prog='sira', description="run sira", add_help=True)
# [Either] Supply config file and model file directly:
parser.add_argument("-c", "--config_file", type=str)
parser.add_argument("-m", "--model_file", type=str)
# [Or] Supply only the directory where the input files reside
parser.add_argument("-d", "--input_directory", type=str)
# Tell the code what tasks to do
parser.add_argument(
"-s", "--simulation", action='store_true', default=False)
parser.add_argument(
"-f", "--fit", action='store_true', default=False)
parser.add_argument(
"-l", "--loss_analysis", action='store_true', default=False)
parser.add_argument(
"-v", "--verbose", dest="loglevel", type=str,
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default="INFO",
help="Choose option for logging level from: \n"+
"DEBUG, INFO, WARNING, ERROR, CRITICAL.")
args = parser.parse_args()
# error handling
if args.input_directory and (args.config_file or args.model_file):
parser.error("--input_directory and [--config_file and --model_file]"
" are mutually exclusive ...")
sys.exit(2)
# error handling
if not any([args.simulation, args.fit, args.loss_analysis]):
parser.error(
"\nAt least one of these three flags is required:\n"
" --simulation (-s) or --fit (-f) or --loss_analysis (-s).\n"
" The options for fit or loss_analysis requires the -s flag, "
" or a previous completed run with the -s flag.")
sys.exit(2)
proj_root_dir = args.input_directory
if not os.path.isdir(proj_root_dir):
print("Invalid path supplied:\n {}".format(proj_root_dir))
sys.exit(1)
proj_input_dir = os.path.join(proj_root_dir, "input")
config_file_name = None
model_file_name = None
for fname in os.listdir(proj_input_dir):
confmatch = re.search(r"(?i)^config.*\.json$", fname)
if confmatch is not None:
config_file_name = confmatch.string
modelmatch = re.search(r"(?i)^model.*\.json$", fname)
if modelmatch is not None:
model_file_name = modelmatch.string
if config_file_name is None:
parser.error(
"Config file not found. "
"A valid config file name must begin with the term `config`, "
"and must be a JSON file.\n")
sys.exit(2)
if model_file_name is None:
parser.error(
"Model file not found. "
"A valid model file name must begin the term `model`, "
"and must be a JSON file.\n")
sys.exit(2)
args.config_file = os.path.join(proj_input_dir, config_file_name)
args.model_file = os.path.join(proj_input_dir, model_file_name)
args.output = os.path.join(args.input_directory, "output")
if not os.path.isfile(args.config_file):
parser.error(
"Unable to locate config file "+str(args.config_file)+" ...")
sys.exit(2)
if not os.path.isfile(args.model_file):
parser.error(
"Unable to locate model file "+str(args.model_file)+" ...")
sys.exit(2)
args.output = os.path.join(
os.path.dirname(os.path.dirname(args.config_file)), "output")
try:
if not os.path.exists(args.output):
os.makedirs(args.output)
except Exception:
parser.error(
"Unable to create output folder " + str(args.output) + " ...")
sys.exit(2)
# ---------------------------------------------------------------------
# Set up logging
# ---------------------------------------------------------------------
timestamp = time.strftime('%Y.%m.%d %H:%M:%S')
log_path = os.path.join(args.output, "log.txt")
configure_logger(log_path, args.loglevel)
rootLogger = logging.getLogger(__name__)
print("\n")
rootLogger.info(Fore.GREEN +
'Simulation initiated at: {}\n'.format(timestamp) +
Fore.RESET)
# ---------------------------------------------------------------------
# Configure simulation model.
# Read data and control parameters and construct objects.
# ---------------------------------------------------------------------
config = Configuration(args.config_file, args.model_file, args.output)
scenario = Scenario(config)
hazards = HazardsContainer(config)
infrastructure = ingest_model(config)
# ---------------------------------------------------------------------
# SIMULATION
# Get the results of running a simulation
# ---------------------------------------------------------------------
# response_list = [
# {}, # [0] hazard level vs component damage state index
# {}, # [1] hazard level vs infrastructure output
# {}, # [2] hazard level vs component response
# {}, # [3] hazard level vs component type response
# [], # [4] array of infrastructure output per sample
# [], # [5] array of infrastructure econ loss per sample
# {}, # [6] hazard level vs component class dmg level pct
# {}] # [7] hazard level vs component class expected damage index
if args.simulation:
response_list = calculate_response(hazards, scenario, infrastructure)
# ---------------------------------------------------------------------
# Post simulation processing.
# After the simulation has run the results are aggregated, saved
# and the system fragility is calculated.
# ---------------------------------------------------------------------
write_system_response(response_list, infrastructure, scenario, hazards)
economic_loss_array = response_list[5]
plot_mean_econ_loss(scenario, economic_loss_array, hazards)
if config.HAZARD_INPUT_METHOD == "hazard_array":
pe_by_component_class(
response_list, infrastructure, scenario, hazards)
# ---------------------------------------------------------------------
# Visualizations
# Construct visualization for system topology
# ---------------------------------------------------------------------
sys_topology_view = SystemTopology(infrastructure, scenario)
sys_topology_view.draw_sys_topology(viewcontext="as-built")
rootLogger.info('Simulation completed...')
# -------------------------------------------------------------------------
# FIT MODEL ANALYSIS
# -------------------------------------------------------------------------
if args.fit:
args.pe_sys = None
existing_models = [
"potablewatertreatmentplant", "pwtp",
"wastewatertreatmentplant", "wwtp",
"watertreatmentplant", "wtp",
"powerstation",
"substation",
"potablewaterpumpstation"
]
if infrastructure.system_class.lower() == 'powerstation':
args.pe_sys = os.path.join(
config.RAW_OUTPUT_DIR, 'pe_sys_econloss.npy')
elif infrastructure.system_class.lower() == 'substation':
args.pe_sys = os.path.join(
config.RAW_OUTPUT_DIR, 'pe_sys_cpfailrate.npy')
elif infrastructure.system_class.lower() in existing_models:
args.pe_sys = os.path.join(
config.RAW_OUTPUT_DIR, 'pe_sys_econloss.npy')
if args.pe_sys is not None:
rootLogger.info('Start: Attempting to fit MODEL to simulation '
'data...')
hazard_scenarios = hazards.hazard_scenario_list
sys_limit_states = infrastructure.get_system_damage_states()
pe_sys = np.load(args.pe_sys)
# Calculate & Plot Fitted Models
fit_prob_exceed_model(
hazard_scenarios,
pe_sys,
sys_limit_states,
config.OUTPUT_PATH,
config)
rootLogger.info('End: Model fitting complete.')
else:
rootLogger.error("Input pe_sys file not found: " +
str(args.output))
# -------------------------------------------------------------------------
# SCENARIO LOSS ANALYSIS
# -------------------------------------------------------------------------
if args.loss_analysis:
args.ct = os.path.join(config.OUTPUT_PATH, 'comptype_response.csv')
args.cp = os.path.join(config.OUTPUT_PATH, 'component_response.csv')
if args.ct is not None and args.cp is not None:
run_scenario_loss_analysis(
scenario, hazards, infrastructure, config, args.ct, args.cp)
else:
if args.ct is None:
rootLogger.error("Input files not found: " + str(args.ct))
if args.cp is None:
rootLogger.error("Input files not found: " + str(args.cp))
rootLogger.info('RUN COMPLETE.\n')
rootLogger.info("Config file used : " + args.config_file)
rootLogger.info("Model file used : " + args.model_file)
rootLogger.info("Outputs saved in : " +
Fore.YELLOW + args.output + Fore.RESET + '\n')
if __name__ == "__main__":
main()
|
GeoscienceAustralia/sifra
|
__main__.py
|
Python
|
apache-2.0
| 11,130
|
import unittest
import mock
from mopidy_playbackdefaults import PlaybackDefaultsFrontend
class PlaybackDefaultsFrontendTest(unittest.TestCase):
def test_no_settings(self):
config = {'playbackdefaults': {'default_random': '', 'default_repeat': '', 'default_consume': '', 'default_single': ''}}
core = mock.Mock()
self.assertEqual(core.tracklist.set_random.call_count, 0)
self.assertEqual(core.tracklist.set_repeat.call_count, 0)
self.assertEqual(core.tracklist.set_consume.call_count, 0)
self.assertEqual(core.tracklist.set_single.call_count, 0)
PlaybackDefaultsFrontend(config, core)
self.assertEqual(core.tracklist.set_random.call_count, 0)
self.assertEqual(core.tracklist.set_repeat.call_count, 0)
self.assertEqual(core.tracklist.set_consume.call_count, 0)
self.assertEqual(core.tracklist.set_single.call_count, 0)
def test_random(self):
config = {'playbackdefaults': {'default_random': '', 'default_repeat': '', 'default_consume': '', 'default_single': ''}}
core = mock.Mock()
self.assertEqual(core.tracklist.set_random.call_count, 0)
self.assertEqual(core.tracklist.set_repeat.call_count, 0)
self.assertEqual(core.tracklist.set_consume.call_count, 0)
self.assertEqual(core.tracklist.set_single.call_count, 0)
config['playbackdefaults']['default_random'] = True
PlaybackDefaultsFrontend(config, core)
core.tracklist.set_random.assert_called_once_with(True)
config['playbackdefaults']['default_random'] = False
PlaybackDefaultsFrontend(config, core)
self.assertEqual(core.tracklist.set_random.call_count, 2)
core.tracklist.set_random.assert_called_with(False)
self.assertEqual(core.tracklist.set_repeat.call_count, 0)
self.assertEqual(core.tracklist.set_consume.call_count, 0)
self.assertEqual(core.tracklist.set_single.call_count, 0)
def test_repeat(self):
config = {'playbackdefaults': {'default_random': '', 'default_repeat': '', 'default_consume': '', 'default_single': ''}}
core = mock.Mock()
self.assertEqual(core.tracklist.set_random.call_count, 0)
self.assertEqual(core.tracklist.set_repeat.call_count, 0)
self.assertEqual(core.tracklist.set_consume.call_count, 0)
self.assertEqual(core.tracklist.set_single.call_count, 0)
config['playbackdefaults']['default_repeat'] = True
PlaybackDefaultsFrontend(config, core)
core.tracklist.set_repeat.assert_called_once_with(True)
config['playbackdefaults']['default_repeat'] = False
PlaybackDefaultsFrontend(config, core)
self.assertEqual(core.tracklist.set_repeat.call_count, 2)
core.tracklist.set_repeat.assert_called_with(False)
self.assertEqual(core.tracklist.set_random.call_count, 0)
self.assertEqual(core.tracklist.set_consume.call_count, 0)
self.assertEqual(core.tracklist.set_single.call_count, 0)
def test_consume(self):
config = {'playbackdefaults': {'default_random': '', 'default_repeat': '', 'default_consume': '', 'default_single': ''}}
core = mock.Mock()
self.assertEqual(core.tracklist.set_random.call_count, 0)
self.assertEqual(core.tracklist.set_repeat.call_count, 0)
self.assertEqual(core.tracklist.set_consume.call_count, 0)
self.assertEqual(core.tracklist.set_single.call_count, 0)
config['playbackdefaults']['default_consume'] = True
PlaybackDefaultsFrontend(config, core)
core.tracklist.set_consume.assert_called_once_with(True)
config['playbackdefaults']['default_consume'] = False
PlaybackDefaultsFrontend(config, core)
self.assertEqual(core.tracklist.set_consume.call_count, 2)
core.tracklist.set_consume.assert_called_with(False)
self.assertEqual(core.tracklist.set_random.call_count, 0)
self.assertEqual(core.tracklist.set_repeat.call_count, 0)
self.assertEqual(core.tracklist.set_single.call_count, 0)
def test_single(self):
config = {'playbackdefaults': {'default_random': '', 'default_repeat': '', 'default_consume': '', 'default_single': ''}}
core = mock.Mock()
self.assertEqual(core.tracklist.set_random.call_count, 0)
self.assertEqual(core.tracklist.set_repeat.call_count, 0)
self.assertEqual(core.tracklist.set_consume.call_count, 0)
self.assertEqual(core.tracklist.set_single.call_count, 0)
config['playbackdefaults']['default_single'] = True
PlaybackDefaultsFrontend(config, core)
core.tracklist.set_single.assert_called_once_with(True)
config['playbackdefaults']['default_single'] = False
PlaybackDefaultsFrontend(config, core)
self.assertEqual(core.tracklist.set_single.call_count, 2)
core.tracklist.set_single.assert_called_with(False)
self.assertEqual(core.tracklist.set_random.call_count, 0)
self.assertEqual(core.tracklist.set_repeat.call_count, 0)
self.assertEqual(core.tracklist.set_consume.call_count, 0)
|
DavisNT/mopidy-playbackdefaults
|
tests/test_frontend.py
|
Python
|
apache-2.0
| 5,145
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
class TestFakeQuantizeOp(OpTest):
def setUp(self):
self.op_type = "fake_quantize_abs_max"
self.attrs = {'bit_length': 8}
self.inputs = {'X': np.random.random((124, 240)).astype("float32"), }
scale = np.max(np.abs(self.inputs['X'])).astype("float32")
self.outputs = {
'Out': np.round(self.inputs['X'] / scale * (
(1 << (self.attrs['bit_length'] - 1)) - 1)),
'OutScale': np.array(scale).astype("float32"),
}
def test_check_output(self):
self.check_output()
class TestFakeChannelWiseQuantizeOp(OpTest):
def setUp(self):
self.op_type = "fake_channel_wise_quantize_abs_max"
self.attrs = {'bit_length': 8}
self.inputs = {
'X': np.random.random((4, 3, 64, 64)).astype("float32"),
}
scales = []
for i in range(self.inputs['X'].shape[0]):
scales.append(np.max(np.abs(self.inputs['X'][i])).astype("float32"))
outputs = self.inputs['X'].copy()
for i, scale in enumerate(scales):
outputs[i] = np.round(outputs[i] / scale * (
(1 << (self.attrs['bit_length'] - 1)) - 1))
self.outputs = {
'Out': outputs,
'OutScale': np.array(scales).astype("float32"),
}
def test_check_output(self):
self.check_output()
class TestFakeQuantizeRangeAbsMaxOp(OpTest):
def setUp(self):
self.op_type = "fake_quantize_range_abs_max"
self.attrs = {
'bit_length': int(5),
'window_size': int(1),
'is_test': False
}
x = (np.random.random((8, 16, 7, 7)) - 0.5) * 10
x = x.astype("float32")
self.inputs = {
'X': x,
'Iter': np.zeros(1).astype("int64"),
'InScale': np.zeros(1).astype("float32")
}
scale = np.max(np.abs(self.inputs['X'])).astype("float32")
out_scales = np.zeros(self.attrs['window_size']).astype("float32")
out_scales[0] = scale
self.outputs = {
'Out': np.round(self.inputs['X'] / scale * (
(1 << (self.attrs['bit_length'] - 1)) - 1)),
'OutScale': scale,
'OutScales': out_scales,
}
def test_check_output(self):
self.check_output()
class TestFakeQuantizeMovingOp(OpTest):
def setUp(self):
self.op_type = "fake_quantize_moving_average_abs_max"
self.attrs = {
'bit_length': int(5),
'moving_rate': float(0.9),
'is_test': False
}
accum = np.zeros(1).astype("float32")
accum[0] = 1
state = np.zeros(1).astype("float32")
state[0] = 1
scale = np.zeros(1).astype("float32")
scale[0] = 0.001
self.inputs = {
'X': np.random.random((8, 16, 7, 7)).astype("float32"),
'InScale': scale,
'InAccum': accum,
'InState': state,
}
out_accum = np.zeros(1).astype("float32")
out_state = np.zeros(1).astype("float32")
out_scale = np.zeros(1).astype("float32")
out_accum[0] = self.attrs['moving_rate'] * accum[0] + np.max(
np.abs(self.inputs['X'])).astype("float32")
out_state[0] = self.attrs['moving_rate'] * state[0] + 1
out_scale = out_accum / out_state
self.outputs = {
'Out': np.round(self.inputs['X'] / out_scale * (
(1 << (self.attrs['bit_length'] - 1)) - 1)),
'OutAccum': out_accum,
'OutState': out_state,
'OutScale': out_scale,
}
def test_check_output(self):
self.check_output()
class TestFakeQuantizeRangeAbsMaxOp2(OpTest):
def setUp(self):
self.op_type = "fake_quantize_range_abs_max"
self.attrs = {
'bit_length': int(8),
'window_size': int(1),
'is_test': True
}
x = (np.random.random((8, 16, 7, 7)) - 0.5) * 10
x = x.astype("float32")
scale = np.max(np.abs(x)).astype("float32") - 1.0
out_scales = np.zeros(self.attrs['window_size']).astype("float32")
out_scales[0] = scale
self.inputs = {
'X': x,
'Iter': np.zeros(1).astype("int64"),
'InScale': scale.astype("float32")
}
xs = np.clip(x, -scale, scale)
qs = np.round(xs / scale * ((1 << (self.attrs['bit_length'] - 1)) - 1))
self.outputs = {
'Out': qs,
'OutScale': scale.astype("float32"),
'OutScales': out_scales,
}
def test_check_output(self):
self.check_output(no_check_set=set(['OutScale', 'OutScales']))
if __name__ == "__main__":
unittest.main()
|
baidu/Paddle
|
python/paddle/fluid/tests/unittests/test_fake_quantize_op.py
|
Python
|
apache-2.0
| 5,508
|
#!/usr/bin/env python
# coding=utf-8
#
# Copyright (c) 2013-2015 First Flamingo Enterprise B.V.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# TAToolsHandler.py
# firstflamingo/treinenaapje
#
# Created by Berend Schotanus on 06-Feb-13.
#
from google.appengine.ext import db
from google.appengine.api import memcache
import webapp2
import math, logging
from datetime import datetime, timedelta
from ffe import markup
from ffe.ffe_time import mark_cet, utc_from_cet, minutes_from_string, string_from_minutes
from TASeries import TASeries
from TAMission import TAMission, round_mission_offset
from TAStop import TAStop, StopStatuses
from TAScheduledPoint import Direction
MENU_LIST = (('Home', '/console'),
('Series', '/console/series?page=1'),
('Missies zonder serie', '/console/missions?kind=orphans&page=1'),
('Stations', '/console/stations?page=1'),
('Rapportage', '/console/report'))
FIRST_HALF = 0
SECND_HALF = 1
ORD_LABEL = ['Eerste', 'Tweede']
# URL Handlers
class RepatternHandler(webapp2.RequestHandler):
def get(self):
series =TASeries.get(self.request.get('series'))
self.results = [{}, {}]
self.analyzeStops()
self.doc = ToolsDocument('Patroontijden voor serie %s' % series.name)
form = markup.form('/tools/repattern', 'post')
form.add(markup.input('hidden', 'series', self.request.get('series')))
form.add(markup.heading(2, 'Heenrichting'))
form.add(self.patternTimeTable(series, Direction.up))
form.add(markup.heading(2, 'Terugrichting'))
form.add(self.patternTimeTable(series, Direction.down))
form.add(markup.input('submit', value='pas aan'))
self.doc.main.add(form)
self.response.out.write(self.doc.write())
def post(self):
series =TASeries.get(self.request.get('series'))
self.doc = ToolsDocument('Aangepaste tijden voor serie %s' % series.name)
processedObjects = []
processedPoints = {}
table = self.doc.add_table('changestable', ['Station', 'A', 'V', 'A', 'V', ''])
for index in range(len(series.points)):
point = series.points[index]
oldTimes = point.scheduled_times
upArrival = self.request.get('arr_%d_%d' % (Direction.up, index))
upDeparture = self.request.get('dep_%d_%d' % (Direction.up, index))
downArrival = self.request.get('arr_%d_%d' % (Direction.down, index))
downDeparture = self.request.get('dep_%d_%d' % (Direction.down, index))
newTimes = (minutes_from_string(upArrival),
minutes_from_string(upDeparture),
minutes_from_string(downArrival),
minutes_from_string(downDeparture))
row = table.add_row()
row.add_to_cell(0, point.stationName)
row.add_to_cell(1, upArrival)
row.add_to_cell(2, upDeparture)
row.add_to_cell(3, downArrival)
row.add_to_cell(4, downDeparture)
if oldTimes != newTimes:
point.scheduled_times = newTimes
processedPoints[point.id] = point
processedObjects.append(point)
row.add_to_cell(5, 'aangepast')
series.cache_set()
memcache.set_multi(processedPoints, namespace='TAScheduledPoint')
db.put(processedObjects)
self.response.out.write(self.doc.write())
def patternTimeTable(self, series, direction):
table = markup.HTMLTable('timetable_%d' % direction, ['Station', 'A', 'V', 'meting', '#', 'delta', 'A', 'V'])
indexes = range(len(series.points))
if direction == Direction.down: indexes.reverse()
for index in indexes:
point = series.points[index]
station = point.station
planArrival, planDeparture = point.times_in_direction(direction)
row = table.add_row()
row.add_to_cell(0, station.name)
row.add_to_cell(1, string_from_minutes(planArrival))
row.add_to_cell(2, string_from_minutes(planDeparture))
stationDict = self.results[direction].get(station.id, None)
if stationDict == None:
departure, count = ('-', '-')
delta = 0
else:
departure, count = mostCommonItem(stationDict['v'])
delta = departure - planDeparture
departure = string_from_minutes(departure)
row.add_to_cell(3, departure)
row.add_to_cell(4, count)
row.add_to_cell(5, delta)
row.add_to_cell(6, markup.input('text', 'arr_%d_%d' % (direction, index), string_from_minutes(planArrival + delta), size=4))
row.add_to_cell(7, markup.input('text', 'dep_%d_%d' % (direction, index), string_from_minutes(planDeparture + delta), size=4))
return table
def analyzeStops(self):
series_id = self.request.get('series')
query = db.Query(TAArchivedMission).filter('series_id =', series_id)
for mission in query.fetch(50):
if mission.up: direction = Direction.up
else: direction = Direction.down
for stop in mission.stopsList:
stopKey = stop.station_id
if stop.status == StopStatuses.planned:
departureHist = self.histogram(direction, stopKey, 'v')
difference = utc_from_cet(stop.departure) - correctedOffsetUTC(mission)
self.addDataToHistogram(departureHist, difference.seconds // 60)
delayHist = self.histogram(direction, stopKey, 'dv')
self.addDataToHistogram(delayHist, int(stop.delay_dep))
platformHist = self.histogram(direction, stopKey, 'p')
self.addDataToHistogram(platformHist, stop.platform)
def stopDictionary(self, direction, stopKey):
dictionary = self.results[direction].get(stopKey, None)
if dictionary == None:
dictionary = dict()
self.results[direction][stopKey] = dictionary
return dictionary
def histogram(self, direction, stopKey, dataKey):
stopDictionary = self.stopDictionary(direction, stopKey)
dictionary = stopDictionary.get(dataKey, None)
if dictionary == None:
dictionary = dict()
stopDictionary[dataKey] = dictionary
return dictionary
def addDataToHistogram(self, histogram, key):
histogram[key] = histogram.get(key, 0) + 1
class ReoffsetHandler(webapp2.RequestHandler):
tableTitles = ('tijd', 'aantal', 'perc.')
tableFormat = (':%02d', '%d', '%.1f%%')
def get(self):
series =TASeries.get(self.request.get('series'))
self.doc = ToolsDocument('Herschik offsets serie %s' % series.name)
self.writeReport(series)
self.response.out.write(self.doc.write())
def post(self):
series = TASeries.get(self.request.get('series'))
self.deltaOffset = [int(self.request.get('offset_up')), int(self.request.get('offset_down'))]
self.round = [int(self.request.get('round_up')), int(self.request.get('round_down'))]
self.processedObjects = []
self.processedMissions = {}
self.processedPoints = {}
self.doc = ToolsDocument('Aangepaste offsets serie %s' % series.name)
self.doc.main.add(markup.heading(2, 'Aangepaste patroontijden'))
self.processPoints(series)
self.doc.main.add(markup.heading(2, 'Aangepaste offsettijden'))
table = self.doc.add_table('adapted_missions', ['Missie', 'Offset'])
self.processMissions(series.all_mission_ids(Direction.up), Direction.up, table)
self.processMissions(series.all_mission_ids(Direction.down), Direction.down, table)
series.cache_set()
self.saveChanges()
# self.writeReport(series)
self.response.out.write(self.doc.write())
def writeReport(self, series):
self.departure = [series.first_point.upDeparture, series.last_point.downDeparture]
self.startStation = [series.first_point.stationName, series.last_point.stationName]
self.foundOffset = [None, None]
self.doc.main.add(markup.heading(2, 'Heenrichting'))
self.analyzeOffset(series.all_mission_ids(Direction.up))
self.reportOffset(FIRST_HALF, Direction.up)
self.reportOffset(SECND_HALF, Direction.up)
self.doc.main.add(markup.heading(2, 'Terugrichting'))
self.analyzeOffset(series.all_mission_ids(Direction.down))
self.reportOffset(FIRST_HALF, Direction.down)
self.reportOffset(SECND_HALF, Direction.down)
if self.foundOffset[Direction.up] or self.foundOffset[Direction.down]:
self.doc.main.add(markup.heading(2, 'Aanpassen'))
self.proposeChanges()
def analyzeOffset(self, missionIDs):
self.offset = [None, None]
self.data=[[], []]
firstHalfHist = dict()
firstHalfItems = 0
secondHalfHist = dict()
secondHalfItems = 0
for missionID in missionIDs:
mission = TAMission.get(missionID)
num = mission.number
if bool(num % 2): num -= 1
key = mission.offset.minute
if bool(num % 4):
firstHalfHist[key] = firstHalfHist.get(key, 0) + 1
firstHalfItems += 1
else:
secondHalfHist[key] = secondHalfHist.get(key, 0) + 1
secondHalfItems += 1
self.generateData(FIRST_HALF, firstHalfHist, firstHalfItems)
self.generateData(SECND_HALF, secondHalfHist, secondHalfItems)
def generateData(self, halfHour, histogram, count):
maxFrequency = 0
for key, value in histogram.iteritems():
self.data[halfHour].append((int(key), value, 100.0 * value/count))
if value > maxFrequency:
maxFrequency = value
self.offset[halfHour] = int(key)
def reportOffset(self, halfHour, direction):
if self.offset[halfHour] != None:
self.doc.main.add(markup.heading(3, '%s halfuur :%02d' % (ORD_LABEL[halfHour], self.offset[halfHour])))
table = self.doc.add_table('table_%d' % (2 * direction + halfHour), self.tableTitles, self.tableFormat)
table.fill_data(self.data[halfHour])
departure = self.offset[halfHour] + self.departure[direction]
if departure >= 60:
departure -= 60
self.offset[halfHour] -= 60
self.doc.add_paragraph('Vertrek uit %s: %d + %d = :%02d' %
(self.startStation[direction], self.offset[halfHour], self.departure[direction], departure))
if self.foundOffset[direction] == None or self.offset[halfHour] < self.foundOffset[direction]:
self.foundOffset[direction] = self.offset[halfHour]
def proposeChanges(self):
table = markup.HTMLTable('submit_table', ['', 'Offset', 'Afronden'])
form = markup.form('/tools/reoffset', 'post')
form.add(markup.input('hidden', 'series', self.request.get('series')))
form.add(table)
self.doc.main.add(form)
row = table.add_row()
row.add_to_cell(0,'heen')
row.add_to_cell(1, markup.input('text', 'offset_up', str(self.foundOffset[Direction.up]), size=6))
row.add_to_cell(2, markup.input('text', 'round_up', '3', size=6))
row = table.add_row()
row.add_to_cell(0,'terug')
row.add_to_cell(1, markup.input('text', 'offset_down', str(self.foundOffset[Direction.down]), size=6))
row.add_to_cell(2, markup.input('text', 'round_down', '3', size=6))
row = table.add_row()
row.add_to_cell(0, markup.input('submit', value='pas aan'))
def processPoints(self,series):
table = self.doc.add_table('adapted_schedule', ['Station', 'Heen', 'Terug'])
for point in series.points:
# Change arrival and departure times:
oldUp, oldDown = point.times_strings
point.upArrival += self.deltaOffset[Direction.up]
point.upDeparture += self.deltaOffset[Direction.up]
point.downArrival += self.deltaOffset[Direction.down]
point.downDeparture += self.deltaOffset[Direction.down]
newUp, newDown = point.times_strings
# Add point to queue for saveChanges
self.processedPoints[point.id] = point
self.processedObjects.append(point)
# Report the changes:
row = table.add_row()
row.add_to_cell(0, point.stationName)
row.add_to_cell(1, '[%s] %s [%s]' % (oldUp, change_string(self.deltaOffset[Direction.up]), newUp))
row.add_to_cell(2, '[%s] %s [%s]' % (oldDown, change_string(self.deltaOffset[Direction.down]), newDown))
def processMissions(self, missionIDs, direction, table):
if self.deltaOffset[direction]:
for missionID in missionIDs:
# Change mission offset time:
mission = TAMission.get(missionID)
oldOffset = datetime(2002, 2, 2).replace(hour=mission.offset.hour, minute=mission.offset.minute)
newOffset = round_mission_offset(oldOffset - timedelta(minutes=self.deltaOffset[direction]), self.round[direction])
mission.offset = newOffset.time()
# Add mission to queue for saveChanges
self.processedMissions[missionID] = mission
self.processedObjects.append(mission)
# Report the changes:
row = table.add_row()
row.add_to_cell(0, missionID)
row.add_to_cell(1, '%s %s %s' % (oldOffset.strftime('%H:%M'),
change_string(-self.deltaOffset[direction]),
newOffset.strftime('%H:%M')))
def saveChanges(self):
memcache.set_multi(self.processedPoints, namespace='TAScheduledPoint')
memcache.set_multi(self.processedMissions, namespace='TAMission')
db.put(self.processedObjects)
# HTML Document
class ToolsDocument(markup.HTMLDocument):
def __init__(self, title, language='en'):
markup.HTMLDocument.__init__(self, title, language)
#Stylesheet
style_element = markup.link('stylesheet', '/web/style.css')
style_element.set_attribute('type', 'css')
style_element.set_attribute('media', 'screen')
self.head.add(style_element)
#Header
self.header = markup.XMLElement('header')
self.header.add(markup.user_id())
self.header.add(markup.heading(1, title))
self.body.add(self.header)
#Paper with two columns: sidebar and main
paper = markup.div('paper')
self.main = markup.div('main_content')
paper.add(self.main)
self.sidebar = markup.element_with_id('aside', 'sidebar')
self.sidebar.add(markup.main_menu(MENU_LIST))
paper.add(self.sidebar)
paper.add(markup.div('pushbottom'))
self.body.add(paper)
#Footer
self.footer = markup.XMLElement('footer')
self.footer.add(markup.paragraph('First Flamingo Enterprise B.V.'))
self.body.add(self.footer)
def add_paragraph(self, paragraphText):
self.main.add(markup.paragraph(paragraphText))
def add_reference(self, href, content):
paragraph = markup.paragraph('')
paragraph.add(markup.anchor(href, content))
self.main.add(paragraph)
def add_table(self, name, columnTitles, format=None):
table = markup.HTMLTable(name, columnTitles)
if format != None: table.format = format
self.main.add(table)
return table
def add_page_navigator(self, currentPage, lastPage, urlFormat):
self.main.add(markup.page_navigator(currentPage, lastPage, urlFormat))
# Helper functions
def change_string(number):
if number < 0: return '- %d =' % -number
else: return'+ %d =' % number
def mostCommonItem(histogram):
maxValue = 0
foundKey = None
for key, value in histogram.iteritems():
if value > maxValue:
foundKey = key
maxValue = value
return (foundKey, maxValue)
def correctedOffsetUTC(archivedMission):
''' Replaces the offset time as stored in the TAArchivedMission with that from the corresponding TAMission,
while retaining the date.
'''
originalMission = TAMission.get('%s.%d' % (archivedMission.country, archivedMission.baseNumber))
offsetCET = mark_cet(datetime.combine(archivedMission.offset_CET.date(), originalMission.offset))
return utc_from_cet(offsetCET)
# WSGI Application
app = webapp2.WSGIApplication([('/tools/repattern.*', RepatternHandler),
('/tools/reoffset.*', ReoffsetHandler)
], debug=True)
|
firstflamingo/treinenaapje
|
app/TAToolsHandler.py
|
Python
|
apache-2.0
| 17,879
|
#!/usr/bin/env python
from threaded_ssh import ThreadedClients
from ServerConfig import Aim
from ServerConfig import TellStore
from ServerConfig import General
from ServerConfig import Storage
from ServerConfig import Kudu
def hostToIp(host):
return General.infinibandIp[host]
def semicolonReduce(x, y):
return x + ';' + y
def startAimServers(observers = []):
Aim.rsyncBuild()
numChunks = (len(Storage.servers) + len(Storage.servers1)) * Aim.numRTAClients * 16
chunkSize = ((TellStore.scanMemory // numChunks) // 8) * 8
serverExec = ""
if Storage.storage == Kudu:
serverExec = "aim_kudu -P {0} -s {1}".format((len(Storage.servers) + len(Storage.servers1)) * 2, Storage.master)
elif Storage.storage == TellStore:
serverExec = 'aim_server -M {0} -m {1} -c "{2}" -s "{3}" --processing-threads {4}'.format(numChunks, chunkSize, TellStore.getCommitManagerAddress(), TellStore.getServerList(), Aim.serverthreads)
cmd = '{0}/watch/aim-benchmark/{3} -f {1} -b {2}'.format(Aim.builddir, Aim.schemaFile, Aim.batchSize, serverExec)
client0 = ThreadedClients(Aim.sepservers0 + Aim.rtaservers0, "numactl -m 0 -N 0 {0}".format(cmd), root=True)
client1 = ThreadedClients(Aim.sepservers1 + Aim.rtaservers1, "numactl -m 1 -N 1 {0} -p 8715 -u 8716".format(cmd), root=True)
client0.start()
client1.start()
return [client0, client1]
if __name__ == '__main__':
clients = startAimServers()
for c in clients:
c.join()
|
tellproject/helper_scripts
|
aim_server.py
|
Python
|
apache-2.0
| 1,508
|
#!/usr/bin/env python
# Copyright 2016 Sam Yaple
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copied and licensed from https://github.com/SamYaple/osdk
import argparse
import os
import sys
from ekko.manifest import structure as manifest_structure
from six.moves import range
from stevedore import driver
def parse_args():
parser = argparse.ArgumentParser(description='Backup Block Device')
parser.add_argument('--backupsize', required=True, type=int,
help='Size of backup for manifest gen (size in GB)')
parser.add_argument('--manifest', required=True,
help='manifest file')
parser.add_argument('--cbt', required=False,
help='change block tracking info')
parser.add_argument('--driver', required=False, default='sqlite',
choices=['osdk', 'sqlite'], help='manifest driver')
return parser.parse_args()
def read_segments(segments, metadata):
for segment in segments:
yield manifest_structure.Segment(
metadata.backupset_id,
metadata.incremental,
segment,
0,
0,
os.urandom(20)
)
def check_manifest(manifest_file):
return os.path.isfile(manifest_file)
def main():
args = parse_args()
if check_manifest(args.manifest):
print('manifest exists; exiting')
return
manifest = driver.DriverManager(
namespace='ekko.manifest.drivers',
name=args.driver,
invoke_on_load=True,
invoke_args=[args.manifest]
).driver
size_of_disk = args.backupsize * 1024**3 # Convert GB to B
incremental = 0
metadata = manifest_structure.Metadata(incremental, size_of_disk)
manifest.initialize()
manifest.put_metadata(metadata)
num_of_segments = int(size_of_disk / metadata.segment_size)
segments = read_segments(range(0, num_of_segments - 1), metadata)
manifest.put_segments(segments, metadata)
if __name__ == '__main__':
sys.exit(main())
|
brk3/ekko
|
tools/generate_manifest.py
|
Python
|
apache-2.0
| 2,544
|
from __future__ import absolute_import
from __future__ import unicode_literals
import glob
import imp
import inspect
import os
import sys
from mb.config.config import get_default_config_file
from mb.lib import logger
from mb.lib import process
_log = logger.get_logger('[Ioc]')
# plugin types
from mb import build_context # BuildContext # NOQA
from mb import command # Command # NOQA
from mb import template_engine # TemplateEngine # NOQA
from mb import version_scheme #VersionScheme # NOQA
from mb.config.config import PluginConfig # NOQA
def rchop(thestring, ending):
if thestring.endswith(ending):
return thestring[:-len(ending)]
return thestring
def _is_plugin_type(object_attr, plugin_type):
try:
if object_attr == plugin_type:
return False
return issubclass(object_attr, plugin_type)
except:
return False
_plugin_modules = [build_context, command, template_engine, version_scheme]
_plugin_types = [build_context.BuildContext, command.Command, template_engine.TemplateEngine, version_scheme.VersionScheme]
_loaded_plugin_definitions = {}
_plugin_instances = {}
_config = get_default_config_file()
if os.path.isdir(_config.plugin_dir):
os.chdir(_config.plugin_dir)
for file in glob.glob("*.py"):
plugin_module_name_template = "silverbp_mb_plugin_" + os.path.splitext(file)[0] + "_%d"
for plugin_name_suffix in range(len(sys.modules)):
plugin_module_name = plugin_module_name_template % plugin_name_suffix
if plugin_module_name not in sys.modules:
break
with open(file, "r") as plugin_file:
_plugin_modules.append(imp.load_module(plugin_module_name, plugin_file, file, ("py", "r", imp.PY_SOURCE)))
for module in _plugin_modules:
for module_attr in (getattr(module, name) for name in dir(module)):
for plugin_type in _plugin_types:
if not _is_plugin_type(module_attr, plugin_type):
continue
_loaded_plugin_definitions[module_attr.__name__] = module_attr
_defined_commands = _config.commands
_defined_commands['_prerun'] = PluginConfig('MBPreRunCommand', {}, _config)
command_plugins = [k for (k, v) in _loaded_plugin_definitions.items() if _is_plugin_type(v, command.Command)]
for (k, v) in _config.commands.items():
if v.name not in command_plugins:
_log.warn('The following Command: {0} was not found and will not be available'.format(k))
del _defined_commands[k]
_log.debug('The following commands will be available: {0}'.format([k for (k, v) in _defined_commands.items() if not k.startswith('_')]))
def _load_plugin(plugin):
if plugin.name in _plugin_instances.keys():
return _plugin_instances[plugin.name]
plugin_definition = _loaded_plugin_definitions[plugin.name]
arguments = []
# if the plugin doesn't have a constructor, there's nothing to inject
if '__init__' in getattr(plugin_definition, '__dict__', None).keys():
for arg in inspect.getargspec(plugin_definition.__init__)[0][1:]:
arguments.append(load_dependency(arg))
instance = plugin_definition(*arguments)
available_properties = [x for x, y in inspect.getmembers(instance.__class__, lambda x: isinstance(x, property))]
for (key, value) in plugin.config.items():
if key in available_properties:
try:
setattr(instance, key, value)
except Exception as err:
_log.warn('There was a problem setting the plugin config: \'{0}\' on \'{1}\' with \'{2}\'.'.format(plugin.name, key, value))
_log.debug('Exception occured while trying to set a plugin config value: {0}'.format(err))
else:
_log.warn('The following plugin config: {0}, is not an option to set on {1}'.format(key, plugin.name))
_plugin_instances[plugin.name] = instance
return instance
def load_dependency(name):
if name == 'config':
return _config
if name == 'process':
return process
return _load_plugin(getattr(_config, name))
def get_commands():
return [k for (k, v) in _defined_commands.items() if not k.startswith('_')]
def load_command(name):
if name in _defined_commands.keys():
plugin = _defined_commands[name]
else:
raise StandardError('The following command: {0} is not available'.format(name))
return _load_plugin(plugin)
|
silverbp/master-builder
|
mb/lib/ioc.py
|
Python
|
apache-2.0
| 4,431
|
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Runs the main function in detokenize.py."""
from pw_tokenizer import detokenize
detokenize.main()
|
google/pigweed
|
pw_tokenizer/py/pw_tokenizer/__main__.py
|
Python
|
apache-2.0
| 687
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from senlin.common import consts
from senlin.engine.actions import base as ab
from senlin.engine.actions import cluster_action as ca
from senlin.engine import cluster as cm
from senlin.engine import dispatcher
from senlin.objects import action as ao
from senlin.objects import dependency as dobj
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
@mock.patch.object(cm.Cluster, 'load')
class ClusterCheckTest(base.SenlinTestCase):
def setUp(self):
super(ClusterCheckTest, self).setUp()
self.ctx = utils.dummy_context()
@mock.patch.object(ao.Action, 'update')
@mock.patch.object(ab.Action, 'create')
@mock.patch.object(dobj.Dependency, 'create')
@mock.patch.object(dispatcher, 'start_action')
@mock.patch.object(ca.ClusterAction, '_wait_for_dependents')
def test_do_check(self, mock_wait, mock_start, mock_dep, mock_action,
mock_update, mock_load):
node1 = mock.Mock(id='NODE_1')
node2 = mock.Mock(id='NODE_2')
cluster = mock.Mock(id='FAKE_ID', status='old status',
status_reason='old reason')
cluster.nodes = [node1, node2]
cluster.do_check.return_value = True
mock_load.return_value = cluster
mock_action.side_effect = ['NODE_ACTION_1', 'NODE_ACTION_2']
action = ca.ClusterAction('FAKE_CLUSTER', 'CLUSTER_CHECK', self.ctx)
action.id = 'CLUSTER_ACTION_ID'
mock_wait.return_value = (action.RES_OK, 'Everything is Okay')
# do it
res_code, res_msg = action.do_check()
# assertions
self.assertEqual(action.RES_OK, res_code)
self.assertEqual('Cluster checking completed.', res_msg)
mock_load.assert_called_once_with(action.context, 'FAKE_CLUSTER')
cluster.do_check.assert_called_once_with(action.context)
mock_action.assert_has_calls([
mock.call(action.context, 'NODE_1', 'NODE_CHECK',
name='node_check_NODE_1',
cause=consts.CAUSE_DERIVED,
inputs={}),
mock.call(action.context, 'NODE_2', 'NODE_CHECK',
name='node_check_NODE_2',
cause=consts.CAUSE_DERIVED,
inputs={})
])
mock_dep.assert_called_once_with(action.context,
['NODE_ACTION_1', 'NODE_ACTION_2'],
'CLUSTER_ACTION_ID')
mock_update.assert_has_calls([
mock.call(action.context, 'NODE_ACTION_1', {'status': 'READY'}),
mock.call(action.context, 'NODE_ACTION_2', {'status': 'READY'}),
])
mock_start.assert_called_once_with()
mock_wait.assert_called_once_with()
cluster.eval_status.assert_called_once_with(
action.context, consts.CLUSTER_CHECK)
@mock.patch.object(ao.Action, 'update')
@mock.patch.object(ab.Action, 'create')
@mock.patch.object(ao.Action, 'delete_by_target')
@mock.patch.object(dobj.Dependency, 'create')
@mock.patch.object(dispatcher, 'start_action')
@mock.patch.object(ca.ClusterAction, '_wait_for_dependents')
def test_do_check_need_delete(self, mock_wait, mock_start, mock_dep,
mock_delete, mock_action, mock_update,
mock_load):
node1 = mock.Mock(id='NODE_1')
node2 = mock.Mock(id='NODE_2')
cluster = mock.Mock(id='FAKE_ID', status='old status',
status_reason='old reason')
cluster.nodes = [node1, node2]
cluster.do_check.return_value = True
mock_load.return_value = cluster
mock_action.side_effect = ['NODE_ACTION_1', 'NODE_ACTION_2']
action = ca.ClusterAction('FAKE_CLUSTER', 'CLUSTER_CHECK', self.ctx,
inputs={'delete_check_action': True})
action.id = 'CLUSTER_ACTION_ID'
mock_wait.return_value = (action.RES_OK, 'Everything is Okay')
# do it
res_code, res_msg = action.do_check()
# assertions
self.assertEqual(action.RES_OK, res_code)
self.assertEqual('Cluster checking completed.', res_msg)
mock_load.assert_called_once_with(action.context, 'FAKE_CLUSTER')
cluster.do_check.assert_called_once_with(action.context)
mock_delete.assert_has_calls([
mock.call(action.context, 'NODE_1', action=['NODE_CHECK'],
status=['SUCCEEDED', 'FAILED']),
mock.call(action.context, 'NODE_2', action=['NODE_CHECK'],
status=['SUCCEEDED', 'FAILED'])
])
mock_action.assert_has_calls([
mock.call(action.context, 'NODE_1', 'NODE_CHECK',
name='node_check_NODE_1',
cause=consts.CAUSE_DERIVED,
inputs={'delete_check_action': True}),
mock.call(action.context, 'NODE_2', 'NODE_CHECK',
name='node_check_NODE_2',
cause=consts.CAUSE_DERIVED,
inputs={'delete_check_action': True})
])
mock_dep.assert_called_once_with(action.context,
['NODE_ACTION_1', 'NODE_ACTION_2'],
'CLUSTER_ACTION_ID')
mock_update.assert_has_calls([
mock.call(action.context, 'NODE_ACTION_1', {'status': 'READY'}),
mock.call(action.context, 'NODE_ACTION_2', {'status': 'READY'}),
])
mock_start.assert_called_once_with()
mock_wait.assert_called_once_with()
cluster.eval_status.assert_called_once_with(
action.context, consts.CLUSTER_CHECK)
def test_do_check_cluster_empty(self, mock_load):
cluster = mock.Mock(id='FAKE_ID', nodes=[], status='old status',
status_reason='old reason')
cluster.do_check.return_value = True
mock_load.return_value = cluster
action = ca.ClusterAction(cluster.id, 'CLUSTER_CHECK', self.ctx)
# do it
res_code, res_msg = action.do_check()
self.assertEqual(action.RES_OK, res_code)
self.assertEqual('Cluster checking completed.', res_msg)
cluster.do_check.assert_called_once_with(self.ctx)
cluster.eval_status.assert_called_once_with(
action.context, consts.CLUSTER_CHECK)
@mock.patch.object(ao.Action, 'update')
@mock.patch.object(ab.Action, 'create')
@mock.patch.object(dobj.Dependency, 'create')
@mock.patch.object(dispatcher, 'start_action')
@mock.patch.object(ca.ClusterAction, '_wait_for_dependents')
def test_do_check_failed_waiting(self, mock_wait, mock_start, mock_dep,
mock_action, mock_update, mock_load):
node = mock.Mock(id='NODE_1')
cluster = mock.Mock(id='CLUSTER_ID', status='old status',
status_reason='old reason')
cluster.do_recover.return_value = True
cluster.nodes = [node]
mock_load.return_value = cluster
mock_action.return_value = 'NODE_ACTION_ID'
action = ca.ClusterAction('FAKE_CLUSTER', 'CLUSTER_CHECK', self.ctx)
action.id = 'CLUSTER_ACTION_ID'
mock_wait.return_value = (action.RES_TIMEOUT, 'Timeout!')
res_code, res_msg = action.do_check()
self.assertEqual(action.RES_TIMEOUT, res_code)
self.assertEqual('Timeout!', res_msg)
mock_load.assert_called_once_with(self.ctx, 'FAKE_CLUSTER')
cluster.do_check.assert_called_once_with(action.context)
mock_action.assert_called_once_with(
action.context, 'NODE_1', 'NODE_CHECK',
name='node_check_NODE_1',
inputs={},
cause=consts.CAUSE_DERIVED,
)
mock_dep.assert_called_once_with(action.context, ['NODE_ACTION_ID'],
'CLUSTER_ACTION_ID')
mock_update.assert_called_once_with(action.context, 'NODE_ACTION_ID',
{'status': 'READY'})
mock_start.assert_called_once_with()
mock_wait.assert_called_once_with()
cluster.eval_status.assert_called_once_with(
action.context, consts.CLUSTER_CHECK)
|
openstack/senlin
|
senlin/tests/unit/engine/actions/test_check.py
|
Python
|
apache-2.0
| 8,927
|
#!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
SPELL=u'xiàjùxū'
CN=u'下巨虚'
NAME=u'xiajuxu441'
CHANNEL='stomach'
CHANNEL_FULLNAME='StomachChannelofFoot-Yangming'
SEQ='ST39'
if __name__ == '__main__':
pass
|
sinotradition/meridian
|
meridian/acupoints/xiajuxu441.py
|
Python
|
apache-2.0
| 241
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
import webob
from nova.compute import flavors
from nova import test
from nova.tests.unit.api.openstack import fakes
FAKE_FLAVORS = {
'flavor 1': {
"flavorid": '1',
"name": 'flavor 1',
"memory_mb": '256',
"root_gb": '10',
"swap": '5',
"disabled": False,
"ephemeral_gb": '20',
"rxtx_factor": '1.0',
"vcpus": 1,
},
'flavor 2': {
"flavorid": '2',
"name": 'flavor 2',
"memory_mb": '512',
"root_gb": '10',
"swap": '10',
"ephemeral_gb": '25',
"rxtx_factor": None,
"disabled": False,
"vcpus": 1,
},
}
def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
return FAKE_FLAVORS['flavor %s' % flavorid]
def fake_get_all_flavors_sorted_list(context=None, inactive=False,
filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
return [
fake_flavor_get_by_flavor_id(1),
fake_flavor_get_by_flavor_id(2)
]
class FlavorRxtxTestV21(test.NoDBTestCase):
content_type = 'application/json'
_prefix = "/v2/fake"
def setUp(self):
super(FlavorRxtxTestV21, self).setUp()
ext = ('nova.api.openstack.compute.contrib'
'.flavor_rxtx.Flavor_rxtx')
self.flags(osapi_compute_extension=[ext])
fakes.stub_out_nw_api(self)
self.stubs.Set(flavors, "get_all_flavors_sorted_list",
fake_get_all_flavors_sorted_list)
self.stubs.Set(flavors,
"get_flavor_by_flavor_id",
fake_flavor_get_by_flavor_id)
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
res = req.get_response(self._get_app())
return res
def _get_app(self):
return fakes.wsgi_app_v21(init_only=('servers',
'flavors', 'os-flavor-rxtx'))
def _get_flavor(self, body):
return jsonutils.loads(body).get('flavor')
def _get_flavors(self, body):
return jsonutils.loads(body).get('flavors')
def assertFlavorRxtx(self, flavor, rxtx):
self.assertEqual(str(flavor.get('rxtx_factor')), rxtx)
def test_show(self):
url = self._prefix + '/flavors/1'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
self.assertFlavorRxtx(self._get_flavor(res.body), '1.0')
def test_detail(self):
url = self._prefix + '/flavors/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
flavors = self._get_flavors(res.body)
self.assertFlavorRxtx(flavors[0], '1.0')
self.assertFlavorRxtx(flavors[1], '')
|
bigswitch/nova
|
nova/tests/unit/api/openstack/compute/test_flavor_rxtx.py
|
Python
|
apache-2.0
| 3,454
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008-2014,2016-2019 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Location formatter."""
from aquilon.aqdb.model import Location, Rack, Building, Room
from aquilon.worker.formats.formatters import ObjectFormatter
class LocationFormatter(ObjectFormatter):
def format_raw(self, location, indent="", embedded=True,
indirect_attrs=True):
details = [indent + "{0:c}: {0.name}".format(location)]
if location.fullname:
details.append(indent + " Fullname: {}".format(location.fullname))
if hasattr(location, 'timezone'):
details.append(indent + " Timezone: {}".format(location.timezone))
# Rack could have been a separate formatter, but since this is
# the only difference...
if isinstance(location, Rack):
details.append(indent + " Row: {}".format(location.rack_row))
details.append(indent + " Column: {}".format(location.rack_column))
elif isinstance(location, Building):
details.append(indent + " Address: {}".format(location.address))
details.append(indent + " Next Rack ID: {}".format(location.next_rackid))
details.append(indent + " Network Devices Require Racks: {}".format(location.netdev_rack))
elif isinstance(location, Room) and location.floor:
details.append(indent + " Floor: {}".format(location.floor))
if location.uri:
details.append(indent + " Location URI: {}".format(location.uri))
if location.comments:
details.append(indent + " Comments: {}".format(location.comments))
if location.parents:
details.append(indent + " Location Parents: [{}]".format(", ".join(format(p) for p in location.parents)))
if location.default_dns_domain:
details.append(indent + " Default DNS Domain: {0.name}".format(location.default_dns_domain))
return "\n".join(details)
def fill_proto(self, loc, skeleton, embedded=True, indirect_attrs=True):
skeleton.name = loc.name
# Backwards compatibility
if loc.location_type == "organization":
skeleton.location_type = "company"
else:
skeleton.location_type = loc.location_type
skeleton.fullname = loc.fullname
if isinstance(loc, Rack) and loc.rack_row and loc.rack_column:
skeleton.row = loc.rack_row
skeleton.col = loc.rack_column
if hasattr(loc, "timezone"):
skeleton.timezone = loc.timezone
if hasattr(loc, "uri") and loc.uri:
skeleton.uri = loc.uri
if indirect_attrs:
for p in loc.parents:
parent = skeleton.parents.add()
parent.name = p.name
# Backwards compatibility
if p.location_type == "organization":
parent.location_type = "company"
else:
parent.location_type = p.location_type
def csv_fields(self, location):
"""Yield a CSV-ready list of selected attribute values for location."""
# Columns 0 and 1
details = [location.location_type, location.name]
# Columns 2 and 3
if location.parent:
details.append(location.parent.location_type)
details.append(location.parent.name)
else:
details.extend([None, None])
# Columns 4 and 5
if isinstance(location, Rack):
details.append(location.rack_row)
details.append(location.rack_column)
else:
details.extend([None, None])
# Column 6
if hasattr(location, 'timezone'):
details.append(location.timezone)
else:
details.append(None)
# Column 7
details.append(location.fullname)
# Column 8
if location.default_dns_domain:
details.append(location.default_dns_domain)
else:
details.append(None)
yield details
for location_type, mapper in Location.__mapper__.polymorphic_map.items():
ObjectFormatter.handlers[mapper.class_] = LocationFormatter()
|
quattor/aquilon
|
lib/aquilon/worker/formats/location.py
|
Python
|
apache-2.0
| 4,816
|
from telnetlib import Telnet
class James:
def __init__(self, app):
self.app = app
def ensure_user_exists(self, username, password):
james_config = self.app.config['james']
session = James.Session(james_config['host'], james_config['port'], james_config['username'], james_config['password'])
if session.is_user_registered(username):
session.reset_password(username, password)
else:
session.create_user(username, password)
session.quit()
class Session:
def __init__(self, host, port, username, password):
self.telnet = Telnet(host, port, 10)
self.read_until("login id:")
self.write(username + '\n')
self.read_until("Password:")
self.write(password + '\n')
self.read_until("Welcome root. HELP for a list of commands")
def is_user_registered(self, username):
self.write("verify %s\n" % username)
res = self.telnet.expect([b"exists", b"does not exist"])
return res[0] == 0
def create_user(self, username, password):
self.write("adduser %s %s\n" % (username, password))
self.read_until("User %s added" % username)
def reset_password(self, username, password):
self.write("setpassword %s %s\n" % (username, password))
self.read_until("Password for %s reset" % username)
def quit(self):
self.write("quit\n")
def read_until(self, text):
self.telnet.read_until(text.encode("ascii"), 5)
def write(self, text):
self.telnet.write(text.encode("ascii"))
|
esemin83/python_training_mantis
|
fixture/james.py
|
Python
|
apache-2.0
| 1,687
|
#!/usr/bin/env python3
# Copyright 2015-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os, subprocess, re
def config_vcs_tag(infile, outfile, fallback, source_dir, replace_string, regex_selector, cmd):
try:
output = subprocess.check_output(cmd, cwd=source_dir)
new_string = re.search(regex_selector, output.decode()).group(1).strip()
except Exception:
new_string = fallback
with open(infile) as f:
new_data = f.read().replace(replace_string, new_string)
if os.path.exists(outfile):
with open(outfile) as f:
needs_update = (f.read() != new_data)
else:
needs_update = True
if needs_update:
with open(outfile, 'w') as f:
f.write(new_data)
def run(args):
infile, outfile, fallback, source_dir, replace_string, regex_selector = args[0:6]
command = args[6:]
config_vcs_tag(infile, outfile, fallback, source_dir, replace_string, regex_selector, command)
if __name__ == '__main__':
sys.exit(run(sys.argv[1:]))
|
centricular/meson
|
mesonbuild/scripts/vcstagger.py
|
Python
|
apache-2.0
| 1,568
|
# This file implements all actions performed by agent to start execution script on exec host and sar data collection
# from all exec and stat hosts. Each procedure is mapped with particular daytona command used by scheduler to
# communicate with agent. Upon recieving command from daytona scheduler, agent execute below procedure
# which is mapped with that particular daytona command
#!/usr/bin/env python
import subprocess
import threading
import common
import os
import time
import shutil
from shutil import copyfile
import sys
import testobj
import client
import config
import signal
import envelope
import system_metrics_gather
from logger import LOG
lctx = None
cfg = config.CFG("DaytonaHost", lctx)
cfg.readCFG("config.ini")
EXEC_SCRIPT_DIR = cfg.execscript_location
# Agent on a particular host maintains a list of tests it is currently executing and it also keep updating test data.
# It's a key-value pair map in which each class object is associated with test ID
running_tests = {}
action_lock = threading.Lock()
exec_script_pid = {}
exec_script_lock = threading.Lock()
class activeTest:
"""
This class defines a test object which capture all the information of the test. Agent save these test objects
in a queue to maintain information of all running tests.
"""
def __init__(self, testid, actionID, exec_thread, testobj):
self.testid = testid
self.actionID = actionID
self.exec_thread = exec_thread
self.tobj = testobj
self.stream = None
self.status = ""
self.serverip = ""
self.stathostip = ""
self.serverport = 0
self.stathostport = 0
self.execdir = ""
self.logdir = ""
self.resultsdir = ""
self.statsdir = ""
self.archivedir = ""
self.execscriptfile = ""
self.hostname = ""
def clear(self):
lctx.info("Clearing object contents")
self.cleanup()
def cleanup(self):
lctx.info("Clearing FS, processes")
class commandThread(threading.Thread):
"""
This class creates child thread for starting execution script or executing any other linux based command to get
output from the system
"""
def __init__(self, cmdstr, dcmdstr, streamfile, cdir, testid):
self.cmd = cmdstr
self.dcmd = dcmdstr
self.sfile = streamfile
self.cwd = cdir
self.paused = False
self._stop = threading.Event()
self.stdout = None
self.stderr = None
self.testid = testid
threading.Thread.__init__(self)
def resume(self):
with self.state:
self.paused = False
self.state.notify() # unblock self if waiting
def pause(self):
with self.state:
self.paused = True # make self block and wait
def check(self):
with self.state:
if self.paused:
self.state.wait() # block until notified
if self._stop.isSet():
return False
def stop(self):
self._stop.set()
def __del__(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def run(self):
lctx.debug(self.cmd)
ca = self.cmd.split(" ")
lctx.debug(ca)
# os.setsid is used for creating a new pid group for this exec script excuting so that any subsequent
# child thread or another script invocation will remain in same PID group. In the event of timer expire or if
# something goes wrong, we will just kill this PID group to kill everything.
p = subprocess.Popen(ca, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=self.cwd,
preexec_fn=os.setsid)
# Saving PID information for keeping track of PID group
exec_script_lock.acquire()
exec_script_pid[self.testid] = p
exec_script_lock.release()
while True:
out = p.stdout.read(1)
if out == '' and p.poll() is not None:
break
if out != '':
sys.stdout.write(out)
sys.stdout.flush()
if self.sfile is not None:
self.sfile.flush()
self.sfile.flush()
def get_test(testid):
"""
This command get the test object from the running queue of agent. It accquire lock on the queue to avoid mutual
exclusion situtation. Mutilple threads might be excuting actions for a particular test
:param testid: It takes test ID as argument to fetch the test object
:return: test object if found in the queue
"""
found = False
current_test = None
action_lock.acquire()
if testid in running_tests:
current_test = running_tests[testid]
found = True
action_lock.release()
if found:
return current_test
else:
return
def save_test(testid, test):
"""
This procedure is called to update test information in agent queue.
:param testid: Test ID is a key in running test queue
:param test: Updated test object which need to be saved in running queue
:return: true if update is successfull
"""
found = False
action_lock.acquire()
if testid in running_tests:
running_tests[testid] = test
found = True
action_lock.release()
return found
def delete_test(testid):
"""
This procedure delete the test information from the running queue. This will happen if test execution ends or
something goes wrong with the test
:param testid: Test ID to identify test in running queue
:return: NA
"""
action_lock.acquire()
if testid in running_tests:
del running_tests[testid]
action_lock.release()
def exec_cmd(cmd, daytona_cmd, sync, obj, actionid, current_test):
"""
This procedure does the setup for starting execution script. It creates object of child process which execute
startup script
"""
lctx.debug("Execute cmd : " + cmd)
sfile = None
cl = None
########
if daytona_cmd == "DAYTONA_START_TEST":
cl = client.TCPClient(LOG.getLogger("clientlog", "Agent"))
(current_test.stream, sfile) = cl.stream_start(current_test.serverip, current_test.serverport,
str(current_test.tobj.testobj.TestInputData.exec_log_path))
########
if sfile is not None:
sfile.flush()
cthread = commandThread(cmd, daytona_cmd, sfile, current_test.execdir, current_test.testid)
current_test.exec_thread = cthread
cthread.start()
(t, aid, tst, ts) = (None, None, None, None)
if sync == "T":
lctx.debug("Execute cmd in Sync ctx")
cthread.join()
if sfile is not None:
sfile.flush()
else:
# async action entry in the server table (need this to check self alive below)
for tp in obj.async_actions:
if tp[1] == actionid:
(t, aid, tst, ts) = tp
lctx.debug("Execute cmd in asSync ctx : " + str(actionid))
timer_expire = False
while True:
lctx.debug("waiting for async action to complete : " + str(actionid))
if cthread.stdout is not None:
lctx.debug("printting output of stream ")
if sfile is not None:
sfile.flush()
if tst.testobj.TestInputData.timeout > 0:
if time.time() - ts > tst.testobj.TestInputData.timeout:
lctx.error("Timer expired for this test, need to end this async action")
timer_expire = True
# Exit from this while loop if timer expires or execution script ends
if t.check() == False or cthread.is_alive() == False or timer_expire:
if daytona_cmd == "DAYTONA_START_TEST":
if cthread.is_alive():
exec_script_lock.acquire()
if current_test.testid in exec_script_pid:
p = exec_script_pid[current_test.testid]
del exec_script_pid[current_test.testid]
exec_script_lock.release()
if p:
os.killpg(p.pid, signal.SIGTERM)
lctx.debug("end stream")
cl.stream_end(current_test.serverip, current_test.serverport,
str(current_test.tobj.testobj.TestInputData.exec_log_path), current_test.stream,
sfile)
# callback
# removeactionid
lctx.debug("Callback here removing item")
obj.removeActionItem(actionid)
break
time.sleep(3)
if daytona_cmd == "DAYTONA_START_TEST":
if timer_expire:
current_test.status = "TIMEOUT"
else:
lctx.debug("Setting current test status to TESTEND")
current_test.status = "TESTEND"
lctx.debug(daytona_cmd + " END [" + str(actionid) + "]")
if save_test(current_test.testid, current_test):
return "SUCCESS"
else:
return "ERROR"
def scheduler_handshake(current_test):
"""
This procedure is a part of 2-way handshake between scheduler and agent. If agent receive handshake message from
scheduler, then agent also send a handshake message to scheduler to check if agent can communicate with scheduler
on scheduler port. This part is important as later we need to transfer log files to scheduler using scheduler port
:param current_test: Test object
:return: true if scheduler respond otherwise false
"""
cl = client.TCPClient(LOG.getLogger("clientlog", "Agent"))
env = envelope.DaytonaEnvelope()
ret = cl.send(current_test.serverip, current_test.serverport, env.construct("DAYTONA_HANDSHAKE", "handshake2"))
if ret == "SUCCESS":
return True
else:
return False
def setupTest(self, *args):
"""
Test setup is called when scheduler send "DAYTONA_SETUP_TEST" message to agent. In this procedure agent create
all necessary file system path string and update in test object. After creating file path string it execute command
for making all these file system directories so that agent can later save SAR data. On exec host, it copies execution
script from Execscript folder to test specific directory in order to keep execution script seperate in case of
multiple test execution
:param self:
:param args: tuple of arguments containing obj, command, parameter sent by scheduler to agent for this command,
actionID and sync flag to denote if we need to execute this procedure in sync or async mode
:return: SUCCESS in case everything goes well otherwise it throws ERROR
"""
(obj, command, params, actionID, sync) = (args[0], args[1], args[2], args[3], args[4])
test_serialized = params.split(",")[0]
host_type = params.split(",")[1]
t2 = testobj.testDefn()
t2.deserialize(test_serialized)
current_test = get_test(t2.testobj.TestInputData.testid)
test_logger = None
try:
if current_test:
test_logger = LOG.gettestlogger(current_test, "STAT")
lctx.debug("TEST SETUP | " + str(current_test.testid) + " | START")
test_logger.info("Test setup started")
current_test.tobj = testobj.testDefn()
current_test.tobj = t2
current_test.testid = current_test.tobj.testobj.TestInputData.testid
cfg = config.CFG("DaytonaHost", lctx)
cfg.readCFG("config.ini")
dir = cfg.daytona_agent_root + "/" + current_test.tobj.testobj.TestInputData.frameworkname + "/" + str(
current_test.tobj.testobj.TestInputData.testid)
shutil.rmtree(dir, ignore_errors=True)
prefix = cfg.daytona_agent_root + "/" + current_test.tobj.testobj.TestInputData.frameworkname + "/" + str(
current_test.tobj.testobj.TestInputData.testid) + "/results/"
if host_type == "EXEC":
current_test.execdir = prefix + current_test.tobj.testobj.TestInputData.exechostname
current_test.logdir = prefix + current_test.tobj.testobj.TestInputData.exechostname + "/application"
current_test.statsdir = prefix + current_test.stathostip + "/sar/"
current_test.resultsdir = cfg.daytona_agent_root + "/" + \
current_test.tobj.testobj.TestInputData.frameworkname + "/" + \
str(current_test.tobj.testobj.TestInputData.testid) + "/results"
current_test.archivedir = cfg.daytona_agent_root + "/" + \
current_test.tobj.testobj.TestInputData.frameworkname + "/" + \
str(current_test.tobj.testobj.TestInputData.testid) + "/"
if host_type == "EXEC":
common.createdir(current_test.execdir, self.lctx)
common.createdir(current_test.logdir, self.lctx)
common.createdir(current_test.resultsdir, self.lctx)
common.createdir(current_test.statsdir, self.lctx)
test_logger.info("Test directory created")
if host_type == "EXEC":
execscript = current_test.tobj.testobj.TestInputData.execution_script_location
lctx.debug("TEST SETUP : " + str(execscript))
current_test.execscriptfile = current_test.execdir + "/" + execscript
lctx.debug(current_test.execscriptfile)
# check if execution script is present in EXEC_SCRIPT_DIR - execute script only if it present at
# this location
execscript_location = EXEC_SCRIPT_DIR + execscript
execscript_location = os.path.realpath(execscript_location)
valid_path = os.path.commonprefix([execscript_location, EXEC_SCRIPT_DIR]) == EXEC_SCRIPT_DIR
if valid_path:
if os.path.isfile(execscript_location):
ret = shutil.copytree(os.path.dirname(execscript_location),
os.path.dirname(current_test.execscriptfile))
else:
raise Exception(
"Execution script not found at Daytona Execution Script Location : " + EXEC_SCRIPT_DIR)
else:
raise Exception(
"Access Denied : Use Daytona Execution Script Location '" + EXEC_SCRIPT_DIR + "' for executing "
"exec scripts")
os.chmod(current_test.execscriptfile, 0744)
test_logger.info("Execution script copied successfully")
save_test(current_test.testid, current_test)
test_logger.info("Test setup complete")
lctx.debug("TEST SETUP | " + str(current_test.testid) + " | COMPLETE")
return "SUCCESS"
else:
raise Exception("Invalid Test ID")
except Exception as e:
lctx.error(e)
if test_logger:
test_logger.error(e)
return "ERROR"
# create dirs
# get exec script name
# cp the exec script
# set exec perm
# update cur test obj with exec script
# exec any custom setup script
def startTest(self, *args):
"""
This procedure is invoked for STRACE and PERF profiler setup on exec/stat host. On exec host, after setting up
profilier it starts execution of exec script
"""
(obj, command, params, actionID, sync) = (args[0], args[1], args[2], args[3], args[4])
testid = int(params.split(",")[0])
host_type = params.split(",")[1]
current_test = get_test(testid)
strace_config = None
perf_config = dict()
test_logger = None
try:
if current_test:
test_logger = LOG.gettestlogger(current_test, "STAT")
lctx.debug("TESTSTART | " + str(current_test.testid) + " | START")
test_logger.info("Starting test")
current_test.status = "RUNNING"
current_test.actionID = actionID
save_test(current_test.testid, current_test)
if current_test.tobj.testobj.TestInputData.strace:
strace_config = dict()
strace_config["delay"] = str(current_test.tobj.testobj.TestInputData.strace_delay)
strace_config["duration"] = str(current_test.tobj.testobj.TestInputData.strace_duration)
strace_config["process"] = current_test.tobj.testobj.TestInputData.strace_process
perf_config["delay"] = str(current_test.tobj.testobj.TestInputData.perf_delay)
perf_config["duration"] = str(current_test.tobj.testobj.TestInputData.perf_duration)
if current_test.tobj.testobj.TestInputData.perf_process:
perf_config["process"] = current_test.tobj.testobj.TestInputData.perf_process
test_logger.info("Configuring perf profiler - " + str(perf_config))
if strace_config is not None:
test_logger.info("Configuring strace profiler - " + str(strace_config))
# Setting up STRACE and PERF configuration
system_metrics_gather.perf_strace_gather(current_test.testid, perf_config, strace_config)
test_logger.info("Profiler started")
if host_type == "EXEC":
# Copied execscript
execscript = current_test.execscriptfile
args = ""
for a in current_test.tobj.testobj.TestInputData.execScriptArgs:
args = args + " \"" + a[3] + "\""
execline = execscript + args
lctx.debug("Execution line:" + execline)
test_logger.info("Execution script started")
# execute the exec script here
exec_cmd(execline, command, sync, obj, actionID, current_test)
lctx.debug("TESTSTART | " + str(current_test.testid) + " | COMPLETE")
return "SUCCESS"
else:
raise Exception("Invalid Test ID")
except Exception as e:
lctx.error(e)
if test_logger:
test_logger.error(e)
return "ERROR"
def stopTest(self, *args):
"""
This procedure is invoked by agent when it receives DAYTONA_STOP_TEST message from scheduler. In current test
life cycle stat host agent receive this message from scheduler. In this procedure, agent changes the state of a
test from RUNNING to TESTEND and update running queue
"""
(obj, command, params, actionID, sync) = (args[0], args[1], args[2], args[3], args[4])
testid = int(params)
current_test = get_test(testid)
test_logger = None
try:
if current_test:
test_logger = LOG.gettestlogger(current_test, "STAT")
current_test.status = "TESTEND"
save_test(current_test.testid, current_test)
test_logger.info("Test stop")
return "SUCCESS"
else:
raise Exception("Test not running : " + str(current_test.testid))
except Exception as e:
lctx.error(e)
if test_logger:
test_logger.error(e)
return "ERROR"
def cleanup(self, *args):
"""
This procedure is called on test completion or timer expiry or if something goes wrong with test. It perform
below tasks:
* Download agent side test execution life cycle logs
* remove the logger object for this particular test
* Delete the test logs file system
* Delete the test from running queue of agent
"""
(obj, command, params, actionID, sync) = (args[0], args[1], args[2], args[3], args[4])
testid = int(params)
current_test = get_test(testid)
test_logger = None
try:
if current_test:
test_logger = LOG.gettestlogger(current_test, "STAT")
lctx.debug("CLEANUP | " + str(current_test.testid) + " | START")
test_logger.info("Test cleanup")
downloadTestLogs(testid)
LOG.removeLogger(current_test.tobj)
shutil.rmtree(current_test.resultsdir, ignore_errors=True)
delete_test(testid)
lctx.debug("CLEANUP | " + str(current_test.testid) + " | COMPLETE")
return "SUCCESS"
else:
raise Exception("Invalid Test ID")
except Exception as e:
lctx.error(e)
if test_logger:
test_logger.error(e)
return "ERROR"
def abortTest(self, *args):
"""
This procedure is invoked by agent whenever scheduler send DAYTONA_ABORT_TEST message. This happend in case
something goes wrong on exec host or user cancelled test execution and scheduler want to terminate this test on
all the hosts it has started this particular test. It basically stop execution script thread to stop execution and
call cleanup procedure for downloading logs and other cleanups
"""
(obj, command, params, actionID, sync) = (args[0], args[1], args[2], args[3], args[4])
testid = int(params)
current_test = get_test(testid)
t2 = current_test.tobj
(t, aid, tst, ts) = (None, None, None, None)
lctx.debug(args)
abort_action = False
for tp in obj.async_actions:
(t, aid, tst, ts) = tp
if tst.testobj.TestInputData.testid == t2.testobj.TestInputData.testid:
lctx.debug("Found ASYNC action pending for this test, Aborting it")
abort_action = True
break
if abort_action:
t.stop()
t.join()
lctx.debug("Stopped ASYNC action pending for this test : " + str(tst.testobj.TestInputData.testid))
else:
lctx.debug("No ASYNC action pending for this test : " + str(t2.testobj.TestInputData.testid))
cleanup(self, self, command, params, actionID, sync)
lctx.debug(command + "[" + str(actionID) + "]")
return "SUCCESS"
def heartbeat(self, *args):
"""
It send "ALIVE" message in response to any hearbeat message received.
"""
return "ALIVE"
def setFinish(self, *args):
"""
Agent invoke this procedure when scheduler send DAYTONA_FINISH_TEST message for gracefully ending test on all the
hosts. It just calls cleanup procedure for test cleanup and test life cycle logs download
"""
(obj, command, params, actionID, sync) = (args[0], args[1], args[2], args[3], args[4])
testid = int(params)
cleanup(self, self, command, params, actionID, sync)
return "SUCCESS"
def getStatus(self, *args):
"""
Agent execute this procedure whenever scheduler want to check state of a test by sending DAYTONA_GET_STATUS
message to agent. In this procedure we fetch the test from running queue and return saved test state information
"""
(obj, command, params, actionID, sync) = (args[0], args[1], args[2], args[3], args[4])
testid = int(params)
current_test = get_test(testid)
if current_test:
test_logger = LOG.gettestlogger(current_test, "STAT")
lctx.debug(str(current_test.testid) + ":" + current_test.status)
test_logger.info("Test Status : " + current_test.status)
return current_test.status
else:
return "TESTNA"
def fileDownload(self, *args):
"""
On test completion, agent execute this procedure when it receive DAYTONA_FILE_DOWNLOAD message from scheduler.
We create a TAR file called results.tgz and save it test location, then we send this file to scheduler and save it
in scheduler side file system
"""
cl = client.TCPClient(LOG.getLogger("clientlog", "Agent"))
testid = int(args[2])
current_test = get_test(testid)
test_logger = None
try:
if current_test:
test_logger = LOG.gettestlogger(current_test, "STAT")
lctx.debug("FILE DOWNLOAD | " + str(current_test.testid) + " | START")
lctx.debug("Preparing TAR file of system metric folder")
test_logger.info("Preparing TAR file of system metric folder")
common.make_tarfile(current_test.archivedir + "results.tgz", current_test.resultsdir + "/")
dest = current_test.tobj.testobj.TestInputData.stats_results_path[current_test.stathostip]
download_file = current_test.archivedir + "results.tgz"
test_logger.info("Sending TAR file to daytona host")
cl.sendFile(current_test.serverip, current_test.serverport, download_file, dest.strip())
lctx.debug("FILE DOWNLOAD | " + str(current_test.testid) + " | COMPLETE")
return "SUCCESS"
else:
raise Exception("Invalid Test ID")
except Exception as e:
lctx.error(e)
if test_logger:
test_logger.error(e)
return "ERROR"
def downloadTestLogs(testid):
"""
This procedure just send test life cycle log file to scheduler upon test cleanup. This file provide user
information about test execution sequence on agent
"""
cl = client.TCPClient(LOG.getLogger("clientlog", "Agent"))
current_test = get_test(testid)
test_logger = None
try:
if current_test:
test_logger = LOG.gettestlogger(current_test, "STAT")
test_logger.info("Sending test log to daytona host")
dest = current_test.tobj.testobj.TestInputData.stats_results_path[current_test.stathostip]
download_file = current_test.agent_log_file
cl.sendFile(current_test.serverip, current_test.serverport, download_file, dest.strip())
test_logger.info("Test log file transfer complete")
return "SUCCESS"
else:
raise Exception("Invalid Test ID")
except Exception as e:
lctx.error(e)
if test_logger:
test_logger.error(e)
return "ERROR"
|
deepeshmittal/daytona
|
Scheduler+Agent/action.py
|
Python
|
apache-2.0
| 25,730
|
#import RPi.GPIO as GPIO
import time
def ToString (List): # Coverts List to String
return ''.join(List)
def Setup ():
def Wait ():
reading_file=open('DataStore.txt', 'r')
lines=reading_file.readlines()
#print lines
GoodLine = lines[len(lines) - 1] #GoodLine is the last line of the file!
if len(lines) > len(oldLinesGood): # If there are more lines in the new one one was added. So then that line should be read
return True
else:
return False
OldGood = GoodLine # Resets Vars For comparison
oldLinesGood = lines
|
BostonA/SpudnikPi
|
Server.py
|
Python
|
apache-2.0
| 564
|
from unittest import TestCase
from chess import get_potential_moves
class ChessTestCase(TestCase):
def setup(self):
pass
def teardown(self):
pass
def test_knight(self):
response = get_potential_moves('knight', 'd2')
response = [each.strip() for each in response.split(',')]
possible_moves = ['b1', 'f1', 'b3', 'f3', 'c4', 'e4']
self.assertEqual(len(response), len(possible_moves))
for each in possible_moves:
self.assertTrue(each in response)
def test_rook(self):
response = get_potential_moves('rook', 'd5')
response = [each.strip() for each in response.split(',')]
possible_moves = ['a5', 'b5', 'c5', 'e5', 'f5', 'g5', 'h5',
'd1', 'd2', 'd3', 'd4', 'd6', 'd7', 'd8']
self.assertEqual(len(response), len(possible_moves))
for each in possible_moves:
self.assertTrue(each in response)
def test_queen(self):
response = get_potential_moves('queen', 'd4')
response = [each.strip() for each in response.split(',')]
possible_moves = ['a4', 'b4', 'c4', 'e4', 'f4', 'g4', 'h4',
'd1', 'd2', 'd3', 'd5', 'd6', 'd7', 'd8',
'a7', 'b6', 'c5', 'e3', 'f2', 'g1',
'a1', 'b2', 'c3', 'e5', 'f6', 'g7', 'h8']
for each in possible_moves:
self.assertTrue(each in response)
|
jgoodell/chess-algebra
|
tests.py
|
Python
|
apache-2.0
| 1,483
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting creation and use of content using XBlocks.
Dependencies:
1. XBlock (https://github.com/edx/XBlock)
2. App Engine XBlock runtime
(https://github.com/google/appengine_xblock_runtime)
The appropriate versions of both of these libraries must be installed the the
lib/ folder. See README.rst for more details.
"""
__author__ = 'John Orr (jorr@google.com)'
import cgi
from cStringIO import StringIO
import logging
import mimetypes
import os
import re
import tarfile
import urllib
import uuid
from xml.etree import cElementTree
import appengine_config
from appengine_xblock_runtime import store
import appengine_xblock_runtime.runtime
from common import jinja_utils
from common import safe_dom
from common import schema_fields
from common import tags
from controllers import sites
from controllers import utils
import dbmodels
import django.conf
import django.template.loader
from lxml import etree
import messages
from models import courses
from models import custom_modules
from models import jobs
from models import transforms
import models.models as m_models
from modules.dashboard import filer
from modules.dashboard import unit_lesson_editor
import modules.dashboard.dashboard as dashboard
from modules.oeditor import oeditor
import webapp2
import workbench.runtime
import xblock.core
import xblock.exceptions
import xblock.field_data
import xblock.fields
import xblock.fragment
import xblock.plugin
import xblock.runtime
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext import ndb
# URI routing for resources belonging to this module
RESOURCES_URI = '/modules/xblock_module/resources'
# Base URI routing used by Course Builder for XBlock static resources
XBLOCK_RESOURCES_URI = '/modules/xblock_module/xblock_resources'
# Base URI routing used by Course Builder for XBlock static resources
XBLOCK_LOCAL_RESOURCES_URI = '/modules/xblock_module/xblock_local_resources'
# URI routing used by Course Builder for call-backs to server-side XBlock code
HANDLER_URI = '/modules/xblock_module/handler'
# URI routing the the MathJax package
MATHJAX_URI = '/modules/xblock_module/MathJax'
# Allow images of up to 5Mb
MAX_ASSET_UPLOAD_SIZE_K = 5 * 1024
# The location of the static workbench files used by the XBlocks
WORKBENCH_STATIC_PATH = os.path.normpath('lib/XBlock/workbench/static')
# The location of the DJango templates used by XBlocks
XBLOCK_TEMPLATES_PATH = 'lib/XBlock/xblock/templates'
# XSRF protection token for handler callbacks
XBLOCK_XSRF_TOKEN_NAME = 'xblock_handler'
XBLOCK_EVENT_SOURCE_NAME = 'xblock-event'
XBLOCK_TAG_EVENT_SOURCE_NAME = 'tag-xblock-event'
XBLOCK_WHITELIST = [
'sequential = cb_xblocks_core.cb_xblocks_core:SequenceBlock',
'video = cb_xblocks_core.cb_xblocks_core:VideoBlock',
'cbquestion = cb_xblocks_core.cb_xblocks_core:QuestionBlock',
'html = cb_xblocks_core.cb_xblocks_core:HtmlBlock',
'vertical = cb_xblocks_core.cb_xblocks_core:VerticalBlock',
'problem = cb_xblocks_core.problem:ProblemBlock'
]
# XBlock runtime section
class StudentFieldData(xblock.field_data.SplitFieldData):
"""A field data manager for use in student (i.e., non-admin) context.
This field data manager prevents students from modifying a field which is
stored as UserScope.NONE, even if an XBlock includes code which sets it.
Thus it defends against poorly-written XBlocks which grant students too
wide permissions.
"""
def __init__(self, db_data):
authored_data = xblock.field_data.ReadOnlyFieldData(db_data)
student_data = db_data
super(StudentFieldData, self).__init__({
xblock.fields.Scope.content: authored_data,
xblock.fields.Scope.settings: authored_data,
xblock.fields.Scope.parent: authored_data,
xblock.fields.Scope.children: authored_data,
xblock.fields.Scope.user_state_summary: student_data,
xblock.fields.Scope.user_state: student_data,
xblock.fields.Scope.user_info: student_data,
xblock.fields.Scope.preferences: student_data})
class ForbiddenXBlockError(Exception):
"""Raised when a non-whitelisted XBlock is requested."""
def select_xblock(identifier, entry_points):
"""Hook called when loading XBlock classes, which enforces whitelist."""
entry_point = xblock.plugin.default_select(identifier, entry_points)
if str(entry_point) not in XBLOCK_WHITELIST:
raise ForbiddenXBlockError(
'Attempted to load forbidden XBlock: %s' % str(entry_point))
return entry_point
class MemoryIdManager(xblock.runtime.MemoryIdManager):
def create_usage(self, def_id, usage_id=None):
"""Extend the method definition to allow a specified usage_id."""
usage_id = usage_id or appengine_xblock_runtime.runtime.generate_id()
self._usages[usage_id] = def_id
return usage_id
def create_definition(self, block_type, def_id=None):
"""Extend the method definition to allow a specified def_id."""
def_id = def_id or appengine_xblock_runtime.runtime.generate_id()
self._definitions[def_id] = block_type
return def_id
class Runtime(appengine_xblock_runtime.runtime.Runtime):
"""A XBlock runtime which uses the App Engine datastore."""
def __init__(
self, handler, id_reader=None, field_data=None, student_id=None,
is_admin=False):
field_data = field_data or xblock.runtime.KvsFieldData(
store.KeyValueStore())
if is_admin:
pass
elif student_id:
field_data = StudentFieldData(field_data)
else:
field_data = xblock.field_data.ReadOnlyFieldData(field_data)
def get_jinja_template(template_name, dirs):
locale = handler.app_context.get_environ()['course']['locale']
return jinja_utils.get_template(template_name, dirs, locale=locale)
services = {'jinja': get_jinja_template}
super(Runtime, self).__init__(
id_reader=id_reader, field_data=field_data, student_id=student_id,
services=services, select=select_xblock)
self.handler = handler
def render_template(self, template_name, **kwargs):
"""Loads the django template for `template_name."""
template = django.template.loader.get_template(template_name)
return template.render(django.template.Context(kwargs))
def wrap_child(self, block, unused_view, frag, unused_context):
wrapped = xblock.fragment.Fragment()
wrapped.add_javascript_url(
self.resource_url('js/vendor/jquery.min.js'))
wrapped.add_javascript_url(
self.resource_url('js/vendor/jquery.cookie.js'))
data = {}
if frag.js_init_fn:
# Patch to accommodate jqueryui tabs (used by sequence XBlock)in a
# page with <base> tag set. See:
# http://stackoverflow.com/questions/13837304/jquery-ui-non-ajax-tab-loading-whole-website-into-itself
wrapped.add_javascript("""
$(function() {
$(".xblock .tabs ul li a").each(function() {
var href = $(this).attr("href");
if (href && href.charAt(0) == "#") {
$(this).attr("href", location.href.toString() + href);
}
});
});
""")
wrapped.add_javascript_url(
self.resource_url('js/runtime/%s.js' % frag.js_init_version))
wrapped.add_javascript_url(RESOURCES_URI + '/runtime.js')
data = {
'data-init': frag.js_init_fn,
'data-runtime-version': str(frag.js_init_version),
'data-usage': block.scope_ids.usage_id,
'data-block-type': block.scope_ids.block_type,
'data-xsrf-token': utils.XsrfTokenManager.create_xsrf_token(
XBLOCK_XSRF_TOKEN_NAME)}
if block.name:
data['data-name'] = block.name
class FragmentText(safe_dom.Text):
"""Class to insert the fragment content into the safe_dom node."""
def __init__(self, value):
self._value = unicode(value)
@property
def sanitized(self):
return self._value
div = safe_dom.Element('div', className='xblock', **data)
div.add_child(FragmentText(frag.body_html()))
wrapped.add_content(unicode(div))
wrapped.add_frag_resources(frag)
return wrapped
def _usage_id_from_node(self, node, parent_id, _id_generator):
"""Override import method from XBlock runtime."""
block_type = node.tag
usage_id = node.get('usage_id')
if usage_id is None:
# In Course Builder the usages and defs are in 1-1
# correspondence so for definiteness, make id's the same
def_id = _id_generator.create_definition(block_type)
usage_id = _id_generator.create_usage(def_id, usage_id=def_id)
else:
# Test whether or not the usage is already in the datastore. If it
# is not present, there will be a NoSuchUsage exception.
try:
def_id = self.id_reader.get_definition_id(usage_id)
except xblock.exceptions.NoSuchUsage:
# In Course Builder the usages and defs are in 1-1
# correspondence so for definiteness, make id's the same
def_id = usage_id
def_id = _id_generator.create_definition(
block_type, def_id=def_id)
_id_generator.create_usage(def_id, usage_id=usage_id)
keys = xblock.fields.ScopeIds(
xblock.fields.UserScope.NONE, block_type, def_id, usage_id)
block_class = self.mixologist.mix(self.load_block_type(block_type))
# Load the block's fields and clear out any existing children
block = self.construct_xblock_from_class(block_class, keys)
if hasattr(block, 'children'):
# We need to force an explict save of the 'children' field
# and so first we have to make it dirty
block.children = ['dirt']
block.save()
block.children = []
block.save()
# Reload the block and attach new children
block = block_class.parse_xml(node, self, keys, _id_generator)
block.parent = parent_id
block.save()
return usage_id
def export_to_xml(self, block, xmlfile):
"""Override export method from XBlock runtime."""
root = etree.Element('unknown_root', usage_id=block.scope_ids.usage_id)
tree = etree.ElementTree(root)
block.export_xml(root)
tree.write(
xmlfile, xml_declaration=True, encoding='utf8', pretty_print=True)
def add_block_as_child_node(self, block, node):
"""Override export method from XBlock runtime."""
child = etree.SubElement(
node, 'unknown', usage_id=block.scope_ids.usage_id)
block.export_xml(child)
def query(self, block):
# pylint: disable=protected-access
return workbench.runtime._BlockSet(self, [block])
# pylint: enable=protected-access
def handler_url(self, block, handler_name, suffix='', query=''):
return self.handler.canonicalize_url('%s?%s' % (
HANDLER_URI, urllib.urlencode({
'usage': block.scope_ids.usage_id,
'handler': handler_name,
'xsrf_token': utils.XsrfTokenManager.create_xsrf_token(
XBLOCK_XSRF_TOKEN_NAME)})))
def resource_url(self, resource):
return '%s/%s' % (XBLOCK_RESOURCES_URI, resource)
def local_resource_url(self, block, uri):
return '%s/%s/%s' % (
XBLOCK_LOCAL_RESOURCES_URI, block.scope_ids.block_type, uri)
def publish(self, block, event):
"""Log an XBlock event to the event stream.
Args:
block: XBlock. The XBlock which emitted the event.
event: dict. A JSON serializable dict containing the event data.
"""
if self.user_id is None:
return
wrapper = {
'usage': block.scope_ids.usage_id,
'type': block.scope_ids.block_type,
'event': event}
if utils.CAN_PERSIST_TAG_EVENTS.value:
m_models.EventEntity(
source=XBLOCK_EVENT_SOURCE_NAME,
user_id=self.user_id,
data=transforms.dumps(wrapper)).put()
def parse_xml_string(
self, xml_str, unused_id_generator, orig_xml_str=None,
dry_run=False, log=None):
"""Override parse_xml_string to make it asynchronous.
Calls to this method will execute using NDB's asynchronous API. In order
to ensure all the Datastore RPC's terminate successfully, it is
essential that some method higher up the call stack (e.g., the request
handler) should be decorated with @ndb.toplevel.
Args:
xml_str: str. The string of XML which will be parsed as XBlocks.
unused_id_generator: IdGenerator. The XBlock API allows the runtime
to use different usage- and definition-generators, but in this
implementation, the only write target is the App Engine
Datastore.
orig_xml_str: str. The XML representation of the existing block in
the datastore, if it exists.
dry_run: bool. If set True, then parse the XML but do not do any
datastore writes.
log: file-like. A buffer to write back the XML representation of the
XBlock tree which has been assembled.
Returns:
str. The usage id of the root block of the XML tree.
"""
if orig_xml_str is None:
orig_xml_str = ''
if log is None:
log = StringIO()
id_manager = MemoryIdManager()
dict_key_value_store = xblock.runtime.DictKeyValueStore()
old_id_reader = self.id_reader
self.id_reader = id_manager
old_field_data = self.field_data
self.field_data = xblock.runtime.KvsFieldData(dict_key_value_store)
try:
root_usage_id = super(Runtime, self).parse_xml_string(
xml_str, id_manager)
block = self.get_block(root_usage_id)
self.export_to_xml(block, log)
finally:
self.id_reader = old_id_reader
self.field_data = old_field_data
if dry_run or log.getvalue() == orig_xml_str:
return root_usage_id
entities = []
for key, value in dict_key_value_store.db_dict.iteritems():
ndb_key = ndb.Key(store.KeyValueEntity, store.key_string(key))
kv_entity = store.KeyValueEntity(key=ndb_key)
kv_entity.value = value
entities.append(kv_entity)
for def_id, block_type in id_manager._definitions.iteritems():
ndb_key = ndb.Key(store.DefinitionEntity, def_id)
def_entity = store.DefinitionEntity(key=ndb_key)
def_entity.block_type = block_type
entities.append(def_entity)
for usage_id, def_id in id_manager._usages.iteritems():
ndb_key = ndb.Key(store.UsageEntity, usage_id)
usage_entity = store.UsageEntity(key=ndb_key)
usage_entity.definition_id = def_id
entities.append(usage_entity)
ndb.put_multi_async(entities)
return root_usage_id
class XBlockActionHandler(utils.BaseHandler):
def _handle_request(self):
def fix_ajax_request_body(body):
# The XBlock ajax clients send JSON strings in the POST body, but if
# the content-type is not explicitly set to application/json then
# the handler receives name=value pairs in url-encoded
# strings.
return urllib.unquote(
body[:-1]) if body and body[-1] == '=' else body
student_id = get_enrolled_user_id_or_guest_user_id(self)
token = self.request.get('xsrf_token')
if not utils.XsrfTokenManager.is_xsrf_token_valid(
token, XBLOCK_XSRF_TOKEN_NAME):
self.error(400)
return
usage_id = self.request.get('usage')
handler_name = self.request.get('handler')
rt = Runtime(self, student_id=student_id)
block = rt.get_block(usage_id)
self.request.body = fix_ajax_request_body(self.request.body)
response = block.runtime.handle(block, handler_name, self.request)
self.response.body = response.body
self.response.headers.update(response.headers)
def get(self):
self._handle_request()
def post(self):
self._handle_request()
# Data model section
class RootUsageEntity(m_models.BaseEntity):
"""Datastore entiry for root usage objects.
Application code should not access this object direct. Use RootUsageDto
and RootUsageDao instead.
"""
data = db.TextProperty(indexed=False)
class RootUsageDto(object):
"""A root usage identifies the root of a tree of XBlocks.
Application code should use this data transfer object (DTO) class and the
associated DAO to interact with the datastore.
"""
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict
@property
def description(self):
return self.dict.get('description', '')
@property
def usage_id(self):
return self.dict.get('usage_id', '')
@property
def is_imported(self):
"""Whether the usage was created as an import of an archive file.
Imported root usage entities are wiped and re-inserted when a new
archive is merged in; non-imported entities are left alone.
Returns:
bool. Whether the usage was created as part of an import.
"""
return self.dict.get('is_imported', False)
class RootUsageDao(m_models.BaseJsonDao):
"""DAO for CRUD operations on root usage objects."""
DTO = RootUsageDto
ENTITY = RootUsageEntity
# XBlock editor section
EDITOR_HANDLERS = ['add_xblock', 'edit_xblock', 'import_xblock']
_orig_get_template = dashboard.DashboardHandler.get_template
def _get_template(the_dashboard, template_name, dirs):
return _orig_get_template(
the_dashboard, template_name, dirs + [os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'xblock_module')])
def _add_editor_to_dashboard():
for handler in EDITOR_HANDLERS:
dashboard.DashboardHandler.get_actions.append(handler)
setattr(
dashboard.DashboardHandler, 'get_%s' % handler,
globals()['_get_%s' % handler])
setattr(dashboard.DashboardHandler, 'get_template', _get_template)
dashboard.DashboardHandler.contrib_asset_listers.append(list_xblocks)
dashboard.DashboardHandler.child_routes.append(
[XBlockEditorRESTHandler.URI, XBlockEditorRESTHandler])
dashboard.DashboardHandler.child_routes.append(
[XBlockArchiveRESTHandler.URI, XBlockArchiveRESTHandler])
dashboard.DashboardHandler.child_routes.append(
[XBlockArchiveProgressQueryHandler.URI, XBlockArchiveProgressQueryHandler])
def _remove_editor_from_dashboard():
for handler in EDITOR_HANDLERS:
dashboard.DashboardHandler.get_actions.remove(handler)
delattr(dashboard.DashboardHandler, 'get_%s' % handler)
setattr(dashboard.DashboardHandler, 'get_template', _orig_get_template)
dashboard.DashboardHandler.contrib_asset_listers.remove(list_xblocks)
dashboard.DashboardHandler.child_routes.remove(
[XBlockEditorRESTHandler.URI, XBlockEditorRESTHandler])
dashboard.DashboardHandler.child_routes.remove(
[XBlockArchiveRESTHandler.URI, XBlockArchiveRESTHandler])
dashboard.DashboardHandler.child_routes.remove(
[XBlockArchiveProgressQueryHandler.URI, XBlockArchiveProgressQueryHandler])
def list_xblocks(the_dashboard):
"""Prepare a list of the root XBlock usages installed."""
if not filer.is_editable_fs(the_dashboard.app_context):
return safe_dom.NodeList()
output = safe_dom.NodeList()
import_button_text = 'Import'
if courses.Course(the_dashboard).get_units():
import_button_text = 'Merge'
output.append(
safe_dom.Element(
'a', className='gcb-button gcb-pull-right',
href='dashboard?action=import_xblock'
).add_text(import_button_text)
)
output.append(
safe_dom.Element(
'a', className='gcb-button gcb-pull-right',
href='dashboard?action=add_xblock'
).add_text('Add XBlock')
).append(
safe_dom.Element('div', style='clear: both; padding-top: 2px;')
).append(safe_dom.Element('h3').add_text('XBlocks'))
root_usages = sorted(
RootUsageDao.get_all(), key=lambda x: x.description.lower())
if root_usages:
ol = safe_dom.Element('ol')
for root_usage in root_usages:
edit_url = 'dashboard?action=edit_xblock&key=%s' % root_usage.id
li = safe_dom.Element('li')
li.add_text(root_usage.description).add_child(
safe_dom.Entity(' ')
).add_child(
safe_dom.Element('a', href=edit_url).add_text('[Edit]'))
ol.add_child(li)
output.append(ol)
else:
output.append(safe_dom.Element('blockquote').add_text('< none >'))
return output
def _render_editor(the_dashboard, key=None, title=None, description=None):
key = key or ''
rest_url = the_dashboard.canonicalize_url(XBlockEditorRESTHandler.URI)
exit_url = the_dashboard.canonicalize_url('/dashboard?action=assets')
delete_url = None
if key:
delete_url = '%s?%s' % (
the_dashboard.canonicalize_url(XBlockEditorRESTHandler.URI),
urllib.urlencode({
'key': key,
'xsrf_token': cgi.escape(the_dashboard.create_xsrf_token(
XBlockEditorRESTHandler.XSRF_TOKEN))}))
main_content = oeditor.ObjectEditor.get_html_for(
the_dashboard,
XBlockEditorRESTHandler.SCHEMA.get_json_schema(),
XBlockEditorRESTHandler.SCHEMA.get_schema_dict(),
key, rest_url, exit_url,
delete_url=delete_url, delete_method='delete',
required_modules=XBlockEditorRESTHandler.REQUIRED_MODULES)
template_values = {
'page_title': the_dashboard.format_title(title),
'page_title_linked': the_dashboard.format_title(title, as_link=True),
'page_description': description,
'main_content': main_content}
the_dashboard.render_page(template_values)
def _get_add_xblock(the_dashboard):
_render_editor(
the_dashboard, title=messages.ADD_XBLOCK_TITLE,
description=messages.ADD_XBLOCK_DESCRIPTION)
def _get_edit_xblock(the_dashboard):
_render_editor(
the_dashboard, key=the_dashboard.request.get('key'),
title=messages.EDIT_XBLOCK_TITLE,
description=messages.EDIT_XBLOCK_DESCRIPTION)
def _get_import_xblock(the_dashboard):
"""Render the screen for uploading an XBlock course tar.gx file."""
rest_url = the_dashboard.canonicalize_url(XBlockArchiveRESTHandler.URI)
exit_url = the_dashboard.canonicalize_url('/dashboard?action=assets')
extra_js_files = []
extra_js_files.append('resources/import.js')
if courses.Course(the_dashboard).get_units():
extra_js_files.append('resources/merge.js')
main_content = oeditor.ObjectEditor.get_html_for(
the_dashboard,
XBlockArchiveRESTHandler.SCHEMA.get_json_schema(),
XBlockArchiveRESTHandler.SCHEMA.get_schema_dict(),
None, rest_url, exit_url,
delete_url=None,
auto_return=False,
save_method='upload',
save_button_caption='Import',
required_modules=XBlockArchiveRESTHandler.REQUIRED_MODULES,
extra_css_files=['resources/import.css'],
extra_js_files=extra_js_files)
template_values = {
'page_title': messages.IMPORT_COURSE_PAGE_TITLE,
'page_description': messages.IMPORT_COURSE_PAGE_DESCRIPTION,
'main_content': main_content}
the_dashboard.render_page(template_values)
class XBlockEditorRESTHandler(utils.BaseRESTHandler):
URI = '/rest/xblock'
SCHEMA = schema_fields.FieldRegistry('XBlock', description='XBlock XML')
SCHEMA.add_property(
schema_fields.SchemaField('xml', 'XML', 'text', optional=True))
SCHEMA.add_property(
schema_fields.SchemaField(
'description', 'Description', 'string', optional=True,
description=messages.XBLOCK_DESCRIPTION_FIELD))
REQUIRED_MODULES = []
XSRF_TOKEN = 'xblock-edit'
def get(self):
key = self.request.get('key')
if not unit_lesson_editor.CourseOutlineRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
payload_dict = {'xml': '', 'description': ''}
if key:
root_usage = RootUsageDao.load(key)
rt = Runtime(self, is_admin=True)
block = rt.get_block(root_usage.usage_id)
xml_buffer = StringIO()
rt.export_to_xml(block, xml_buffer)
payload_dict = {
'xml': xml_buffer.getvalue(),
'description': root_usage.description}
transforms.send_json_response(
self, 200, 'Success',
payload_dict=payload_dict,
xsrf_token=utils.XsrfTokenManager.create_xsrf_token(
self.XSRF_TOKEN))
def import_and_validate(self, key, unvalidated_dict):
errors = []
try:
validated_dict = transforms.json_to_dict(
unvalidated_dict, self.SCHEMA.get_json_schema_dict())
except ValueError as err:
errors.append(str(err))
return (None, errors)
if not validated_dict.get('description'):
errors.append('Missing description field')
descriptions = {
root.description for root in RootUsageDao.get_all()
if not key or root.id != long(key)}
if validated_dict['description'] in descriptions:
errors.append(
'The description must be different from existing XBlocks.')
if not validated_dict.get('xml'):
errors.append('Missing XML data')
return validated_dict, errors
@ndb.toplevel
def put(self):
request = transforms.loads(self.request.get('request'))
key = request.get('key') or None
if not self.assert_xsrf_token_or_fail(
request, self.XSRF_TOKEN, {'key': key}):
return
if not unit_lesson_editor.CourseOutlineRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
payload, errors = self.import_and_validate(
key, transforms.loads(request.get('payload')))
if errors:
self.validation_error('\n'.join(errors), key=key)
return
try:
rt = Runtime(self, is_admin=True)
usage_id = rt.parse_xml_string(
unicode(payload['xml']).encode('utf_8'), None)
except Exception as e: # pylint: disable=broad-except
transforms.send_json_response(self, 412, str(e))
return
root_usage = RootUsageDto(
key, {'description': payload['description'], 'usage_id': usage_id})
key = RootUsageDao.save(root_usage)
transforms.send_json_response(
self, 200, 'Saved.', payload_dict={'key': key})
def delete(self):
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, self.XSRF_TOKEN, {'key': key}):
return
if not unit_lesson_editor.CourseOutlineRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
# TODO(jorr): Remove the tree from the UsageStore?
RootUsageDao.delete(RootUsageDto(key, {}))
transforms.send_json_response(self, 200, 'Deleted.')
class XBlockArchiveRESTHandler(utils.BaseRESTHandler):
"""Provide the REST API for importing XBlock archives."""
URI = '/rest/xblock_archive'
SCHEMA = schema_fields.FieldRegistry('XBlock', description='XBlock XML')
SCHEMA.add_property(
schema_fields.SchemaField(
'file', 'File', 'string', optional=True,
description=messages.XBLOCK_ARCHIVE_FIELD,
extra_schema_dict_values={'_type': 'file'}))
SCHEMA.add_property(
schema_fields.SchemaField(
'dry_run', 'Dry Run', 'boolean', optional=True,
description=messages.XBLOCK_ARCHIVE_DRY_RUN))
REQUIRED_MODULES = ['inputex-file', 'io-upload-iframe', 'inputex-checkbox']
XSRF_TOKEN = 'xblock-import'
def get(self):
"""Provide empty inital content for import editor."""
transforms.send_json_response(
self, 200, 'Success',
payload_dict={
'file': '',
'upload_url': blobstore.create_upload_url(
self.canonicalize_url(self.URI)),
'poller_url': self.canonicalize_url(
XBlockArchiveProgressQueryHandler.URI)
},
xsrf_token=utils.XsrfTokenManager.create_xsrf_token(
self.XSRF_TOKEN))
def post(self):
assert courses.is_editable_fs(self.app_context)
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(
request, self.XSRF_TOKEN, {'key': ''}):
return
if (not unit_lesson_editor.CourseOutlineRights.can_edit(self) or
not filer.FilesRights.can_add(self)):
transforms.send_file_upload_response(
self, 401, 'Access denied.')
return
try:
payload = transforms.json_to_dict(
transforms.loads(request.get('payload')),
self.SCHEMA.get_json_schema_dict())
except ValueError as err:
transforms.send_file_upload_response(self, 412, str(err))
return
dry_run = payload.get('dry_run', False)
upload = self.request.POST['file']
if not isinstance(upload, cgi.FieldStorage):
transforms.send_file_upload_response(
self, 403, 'No file specified.')
return
blob_key = blobstore.parse_blob_info(upload).key()
XBlockArchiveJob(
self.app_context, blob_key=blob_key, dry_run=dry_run).submit()
# Pass a new upload url back to the page for future uploads
new_upload_url = blobstore.create_upload_url(
self.canonicalize_url(self.URI))
transforms.send_file_upload_response(
self, 200, 'Processing upload...',
payload_dict={'new_upload_url': new_upload_url})
class XBlockArchiveJob(jobs.DurableJob):
"""The offline job which handles installing an uploaded archive file."""
def __init__(self, app_context, blob_key=None, dry_run=True):
super(XBlockArchiveJob, self).__init__(app_context)
self.app_context = app_context
self.blob_key = blob_key
self.dry_run = dry_run
@ndb.toplevel
def run(self):
def status(success_flag, message):
return {
'success': success_flag,
'message': message}
blob_info = blobstore.BlobInfo.get(self.blob_key)
try:
fileobj = blobstore.BlobReader(
self.blob_key, buffer_size=1024 * 1024)
archive = tarfile.open(fileobj=fileobj, mode='r:gz')
except Exception as e: # pylint: disable=broad-except
return status(False, 'Unable to read the archive file: %s' % e)
try:
course = courses.Course(None, app_context=self.app_context)
rt = Runtime(self, is_admin=True)
journal = []
importer = Importer(
archive=archive, course=course, fs=self.app_context.fs.impl,
rt=rt, dry_run=self.dry_run, journal=journal)
importer.parse()
validation_errors = importer.validate()
if validation_errors:
return status(
False, 'Import failed: %s' % '\n'.join(validation_errors))
importer.do_import()
if self.dry_run:
return status(
True,
'Upload successfully validated:\n%s' % '\n'.join(journal))
course.save()
except Exception as e: # pylint: disable=broad-except
logging.exception('Import failed')
return status(False, 'Import failed: %s' % e)
finally:
archive.close()
return status(
True, 'Upload successfully imported:\n%s' % '\n'.join(journal))
class XBlockArchiveProgressQueryHandler(utils.BaseRESTHandler):
"""A handler to respond to Ajax polling on the progress of the import."""
URI = '/rest/xblock_archive_progress'
def get(self):
job = XBlockArchiveJob(self.app_context)
if job.is_active():
payload_dict = {'complete': False}
else:
payload_dict = {
'complete': True,
'output': job.load().output}
transforms.send_json_response(
self, 200, 'Polling', payload_dict=payload_dict)
class BadImportException(Exception):
"""Exception raised when in Importer."""
pass
class Differ(object):
"""Base class for tracking the difference between two lists of objects.
The types of object in the two lists need not be the same, and so subclasses
must implements methods which extract an 'id' from members of the 'old list'
and the 'new list'. The result will be three classes:
unbound: the set of objects in the old list whioch have no ids.
bindings: a dict of mappings of ids from the new list to objects with the
same id in the old list.
orphans: the set of objects in the old list which have ids but are do not
correspond to the ids of any elements in the new list.
"""
def __init__(self, new_objects, old_objects):
self.unbound = set()
self._new_ids = set()
self.bindings = {}
self.orphans = set()
for new in new_objects:
_id = self.get_new_id(new)
assert _id
self._new_ids.add(_id)
for old in old_objects:
_id = self.get_old_id(old)
if not _id:
self.unbound.add(old)
elif _id in self._new_ids:
self.bindings[_id] = old
else:
self.orphans.add(old)
def get_new_id(self, new):
raise NotImplementedError()
def get_old_id(self, old):
raise NotImplementedError()
def bind(self, new, old):
raise NotImplementedError()
class Sequential2LessonMapper(Differ):
"""A class that handles mapping sequentials to lessons."""
def __init__(self, importer, chapter, unit):
super(Sequential2LessonMapper, self).__init__(
chapter, importer.course.get_lessons(unit.unit_id))
def get_new_id(self, sequential):
return sequential.attrib['usage_id']
def get_old_id(self, lesson):
return lesson.properties.get('xblock.usage_id')
def bind(self, sequential, lesson):
lesson.properties['xblock.usage_id'] = sequential.attrib['usage_id']
class Chapter2UnitMapper(Differ):
"""A class that handles mapping chapters to units."""
def __init__(self, importer):
super(Chapter2UnitMapper, self).__init__(
importer.course_root, importer.course.get_units())
def get_new_id(self, chapter):
return chapter.attrib['usage_id']
def get_old_id(self, unit):
return unit.properties.get('xblock.usage_id')
def bind(self, chapter, unit):
unit.properties['xblock.usage_id'] = chapter.attrib['usage_id']
class Importer(object):
"""Manages the import of an XBlock archive file."""
def __init__(
self, archive=None, course=None, fs=None, rt=None, dry_run=False,
journal=None):
self.archive = archive
self.course = course
self.fs = fs
self.rt = rt
self.dry_run = dry_run
self.base = self._get_base_folder_name()
self.course_root = None
self.journal = journal if journal is not None else []
def parse(self):
"""Assemble the XML files in the archive into a single DOM."""
course_file = self.archive.extractfile('%s/course.xml' % self.base)
self.course_root = self._walk_tree(
cElementTree.parse(course_file).getroot())
def validate(self):
"""Check that the course structure is compatible with CB."""
errors = []
# the root must be a course
if self.course_root.tag != 'course':
errors.append('There is no root course tag.')
# The immediate children must be chapters
for child in self.course_root:
if child.tag != 'chapter':
errors.append('All content must be in chapters.')
break
# The grandchildren must be sequentials
for grandchild in child:
if grandchild.tag != 'sequential':
errors.append('Chapters may only contain sequentials.')
break
return errors
def _update_unit(self, chapter, unit):
new_title = chapter.attrib['display_name']
old_title = unit.title
unit.title = new_title
self.journal.append('Update unit title from \'%s\' to \'%s\'' % (
old_title, new_title))
def _create_unit(self, chapter):
assert chapter.tag == 'chapter'
unit = self.course.add_unit()
unit.title = chapter.attrib['display_name']
self.journal.append('Create unit \'%s\'' % unit.title)
return unit
def _update_lesson(self, sequential, lesson):
new_title = sequential.attrib['display_name']
old_title = lesson.title
lesson.title = new_title
self.journal.append('Update lesson title from \'%s\' to \'%s\'' % (
old_title, new_title))
def _create_lesson(self, sequential, unit):
assert sequential.tag == 'sequential'
lesson = self.course.add_lesson(unit)
lesson.title = sequential.attrib['display_name']
self.journal.append('Create lesson \'%s\'' % lesson.title)
return lesson
def _update_lesson_xblock_content(self, sequential, unit, lesson):
xml_buffer = StringIO()
cElementTree.ElementTree(element=sequential).write(xml_buffer)
orig_xml_buff = StringIO()
new_xml_buff = StringIO()
# Get the original XML repr of this sequential for comparison
usage_id = sequential.attrib['usage_id']
try:
orig_xml = self.rt.get_block(usage_id)
self.rt.export_to_xml(orig_xml, orig_xml_buff)
except xblock.exceptions.NoSuchUsage:
pass # Buffer will be empty
usage_id = self.rt.parse_xml_string(
xml_buffer.getvalue(), None, orig_xml_str=orig_xml_buff.getvalue(),
dry_run=self.dry_run, log=new_xml_buff)
# Journal the effect of the update
if orig_xml_buff.getvalue() == new_xml_buff.getvalue():
action = 'unchanged'
elif not orig_xml_buff.getvalue():
action = 'inserted'
else:
action = 'updated'
self.journal.append(
'XBlock content %(action)s in \'%(title)s\' (%(id)s)' % {
'action': action, 'title': lesson.title, 'id': usage_id})
# Insert a RootUsageEntity to link the lesson to the XBlock
description = 'Unit %s, Lesson %s: %s' % (
unit.index, lesson.index, lesson.title)
root_usage = RootUsageDto(
None, {
'description': description,
'usage_id': usage_id,
'is_imported': True})
root_id = RootUsageDao.save(root_usage) if not self.dry_run else 'xxx'
# insert the xblock asset into lesson content
lesson.objectives = '<xblock root_id="%s"></xblock>' % root_id
def _delete_all_imported_root_usage_dtos(self):
dao = RootUsageDao()
for dto in RootUsageDao.get_all():
if dto.is_imported:
dao.delete(dto)
def do_import(self):
"""Perform the import and create resources in CB."""
finalize_writes_callback = self._import_static_files()
if not self.dry_run:
self._delete_all_imported_root_usage_dtos()
cu_mapper = Chapter2UnitMapper(self)
for chapter in self.course_root:
chapter_usage_id = chapter.attrib['usage_id']
unit = cu_mapper.bindings.get(chapter_usage_id)
if unit:
self._update_unit(chapter, unit)
else:
unit = self._create_unit(chapter)
cu_mapper.bind(chapter, unit)
sl_mapper = Sequential2LessonMapper(self, chapter, unit)
for sequential in chapter:
sequential_usage_id = sequential.attrib['usage_id']
lesson = sl_mapper.bindings.get(sequential_usage_id)
if lesson:
self._update_lesson(sequential, lesson)
else:
lesson = self._create_lesson(sequential, unit)
sl_mapper.bind(sequential, lesson)
self._update_lesson_xblock_content(sequential, unit, lesson)
for lesson in sl_mapper.orphans:
self.journal.append('Delete lesson \'%s\'' % lesson.title)
self.course.delete_lesson(lesson)
for unit in cu_mapper.orphans:
self.journal.append('Delete unit \'%s\'' % unit.title)
self.course.delete_unit(unit)
# Wait for async db operations to complete
finalize_writes_callback()
def _get_base_folder_name(self):
for member in self.archive.getmembers():
if member.isdir() and '/' not in member.name:
return member.name
return None
def _walk_tree(self, node):
if 'url_name' in node.attrib:
# If the node refers to another file. open it and merge it in
target_path = '%s/%s/%s.xml' % (
self.base, node.tag, node.attrib['url_name'])
target_file = self.archive.extractfile(target_path)
sub_tree = self._walk_tree(
cElementTree.parse(target_file).getroot())
sub_tree.attrib['usage_id'] = node.attrib['url_name']
return sub_tree
elif node.tag == 'html':
if 'filename' in node.attrib:
# If the node is an <html/> block with externalized content,
# read it in.
target_path = '%s/html/%s.html' % (
self.base, node.attrib['filename'])
target_file = self.archive.extractfile(target_path)
node.append(tags.html_string_to_element_tree(
target_file.read().decode('utf8')))
del node.attrib['filename']
self._rebase_html_refs(node)
return node
else:
for index, child in enumerate(node):
new_child = self._walk_tree(child)
node.remove(child)
node.insert(index, new_child)
return node
def _rebase_html_refs(self, node):
"""Rebase HTML references based on /static to use CB namespace."""
for attr in ['href', 'src']:
if node.attrib.get(attr, '').startswith('/static/'):
node.attrib[attr] = 'assets/img%s' % node.attrib[attr]
for child in node:
self._rebase_html_refs(child)
def _import_static_files(self):
filedata_list = []
for member in self.archive.getmembers():
if member.isfile() and member.name.startswith(
'%s/static/' % self.base):
self._insert_filedata(filedata_list, member)
return self.fs.put_multi_async(filedata_list)
def _insert_filedata(self, filedata_list, member):
"""Extract the tarfile member into /assets/img/static."""
ph_path = '/assets/img/%s' % member.name[len(self.base) + 1:]
path = self.fs.physical_to_logical(ph_path)
if self.fs.isfile(path):
self.journal.append('Updating file \'%s\'' % ph_path)
else:
self.journal.append('Inserting file \'%s\'' % ph_path)
if member.size > MAX_ASSET_UPLOAD_SIZE_K * 1024:
raise BadImportException(
'Cannot upload files bigger than %s K' %
MAX_ASSET_UPLOAD_SIZE_K)
if self.dry_run:
return
filedata_list.append((path, self.archive.extractfile(member)))
# XBlock component tag section
GUEST_USER_SESSION_COOKIE = 'cb-guest-session'
GUEST_USER_SESSION_COOKIE_MAX_AGE_SEC = 48 * 60 * 60 # 48 hours
def get_session_id_for_guest_user(handler):
session_cookie = handler.request.cookies.get(
GUEST_USER_SESSION_COOKIE, '')
# If the session cookie is missing or invalid, generate a new one
if not re.match('^[0-9a-f]{32}$', session_cookie):
session_cookie = uuid.uuid4().hex
handler.response.set_cookie(
GUEST_USER_SESSION_COOKIE, session_cookie,
max_age=GUEST_USER_SESSION_COOKIE_MAX_AGE_SEC)
return 'guest-%s' % session_cookie
def get_enrolled_user_id_or_guest_user_id(handler):
"""Return a workable user id in every case.
If there is a user in session who has registered for the course, then return
their user id. Otherwise return a guest user id.
Args:
handler: BaseHandler. The request handler for the user session.
Returns:
string. A user ID.
"""
user = handler.get_user()
if user is None:
return get_session_id_for_guest_user(handler)
elif m_models.Student.get_enrolled_student_by_email(user.email()) is None:
return get_session_id_for_guest_user(handler)
else:
return str(user.user_id())
class XBlockTag(tags.ContextAwareTag):
binding_name = 'xblock'
@classmethod
def name(cls):
return 'Embedded XBlocks'
@classmethod
def vendor(cls):
return 'google'
def get_icon_url(self):
return RESOURCES_URI + '/xblock.png'
def get_schema(self, unused_handler):
"""Get the schema for specifying the question."""
root_list = [
(unicode(root.id), root.description)
for root in RootUsageDao.get_all()]
root_list.sort(key=lambda x: x[1].lower())
if not root_list:
return self.unavailable_schema('No XBlocks available')
reg = schema_fields.FieldRegistry('XBlocks')
reg.add_property(schema_fields.SchemaField(
'root_id', messages.XBLOCK_INSTANCE, 'string', optional=True,
select_data=root_list))
return reg
def render(self, node, context):
root_id = node.attrib.get('root_id')
usage_id = RootUsageDao.load(root_id).usage_id
student_id = get_enrolled_user_id_or_guest_user_id(context.handler)
runtime = Runtime(context.handler, student_id=student_id)
block = runtime.get_block(usage_id)
fragment = runtime.render(block, 'student_view')
fragment_list = context.env.get('fragment_list')
if fragment_list is None:
fragment_list = []
context.env['fragment_list'] = fragment_list
fragment_list.append(fragment)
return tags.html_string_to_element_tree(
'<div>%s</div>' % fragment.body_html())
def rollup_header_footer(self, context):
wrapper = xblock.fragment.Fragment()
for frag in context.env.get('fragment_list', []):
wrapper.add_frag_resources(frag)
return (
tags.html_string_to_element_tree(
'<div>%s</div>' % wrapper.head_html()),
tags.html_string_to_element_tree(
'<div>%s</div>' % wrapper.foot_html()))
class XBlockResourcesHandler(tags.ResourcesHandler):
"""Resource handler to serve static files from XBlock workbench."""
def rebase_path(self, path):
assert path.startswith(XBLOCK_RESOURCES_URI)
return os.path.join(
WORKBENCH_STATIC_PATH,
os.path.normpath(path[len(XBLOCK_RESOURCES_URI) + 1:]))
class XBlockLocalResourceHandler(webapp2.RequestHandler):
"""Router for requests for a block's local resources."""
def get(self, block_type, resource):
xblock_class = xblock.core.XBlock.load_class(block_type)
mimetype = mimetypes.guess_type(resource)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
self.response.status = 200
self.response.headers['Content-Type'] = mimetype
self.response.cache_control.no_cache = None
self.response.cache_control.public = 'public'
self.response.cache_control.max_age = 600
self.response.write(xblock_class.open_local_resource(resource).read())
# Data sanitization section
XBLOCK_EVENT_EXPORT_WHITELIST = {
'sequential', 'video', 'cbquestion', 'html', 'vertical'}
_orig_event_entity_for_export = None
def _set_new_event_entity_for_export_method():
"""Register the new for_export method on EventEntity."""
global _orig_event_entity_for_export
_orig_event_entity_for_export = m_models.EventEntity.for_export
m_models.EventEntity.for_export = _event_entity_for_export
def _set_orig_event_entity_for_export_method():
"""Restore the original for_export method on EventEntity."""
global _orig_event_entity_for_export
m_models.EventEntity.for_export = _orig_event_entity_for_export
_orig_event_entity_for_export = None
def _event_entity_for_export(model, transform_fn):
global _orig_event_entity_for_export
model = _orig_event_entity_for_export(model, transform_fn)
if model.source == XBLOCK_EVENT_SOURCE_NAME:
wrapper = transforms.loads(model.data)
if wrapper.get('type') not in XBLOCK_EVENT_EXPORT_WHITELIST:
model.data = transforms.dumps({
'usage': wrapper.get('usage'),
'type': wrapper.get('type'),
'event': transform_fn(transforms.dumps(wrapper.get('event')))
})
elif model.source == XBLOCK_TAG_EVENT_SOURCE_NAME:
wrapper = transforms.loads(model.data)
model.data = transforms.dumps({
'event': wrapper.get('event'),
'message': transform_fn(wrapper.get('message')),
'location': wrapper.get('location'),
'data': transform_fn(transforms.dumps(wrapper.get('data')))})
return model
# Module registration section
custom_module = None
def register_module():
"""Registers this module for use."""
def on_module_disabled():
_remove_editor_from_dashboard()
tags.Registry.remove_tag_binding(XBlockTag.binding_name)
for entity in [
dbmodels.DefinitionEntity, dbmodels.UsageEntity,
dbmodels.KeyValueEntity, RootUsageEntity]:
courses.COURSE_CONTENT_ENTITIES.remove(entity)
_set_orig_event_entity_for_export_method()
def on_module_enabled():
_add_editor_to_dashboard()
tags.Registry.add_tag_binding(
XBlockTag.binding_name, XBlockTag)
if not django.conf.settings.configured:
django.conf.settings.configure(
TEMPLATE_DIRS=[XBLOCK_TEMPLATES_PATH])
courses.COURSE_CONTENT_ENTITIES += [
dbmodels.DefinitionEntity, dbmodels.UsageEntity,
dbmodels.KeyValueEntity, RootUsageEntity]
_set_new_event_entity_for_export_method()
global_routes = [
(RESOURCES_URI + '/.*', tags.ResourcesHandler),
(XBLOCK_RESOURCES_URI + '/.*', XBlockResourcesHandler),
(
XBLOCK_LOCAL_RESOURCES_URI + r'/([^/]*)/(.*)',
XBlockLocalResourceHandler),
(MATHJAX_URI + '/(fonts/.*)', sites.make_zip_handler(os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'MathJax-fonts.zip'))),
(MATHJAX_URI + '/(.*)', sites.make_zip_handler(os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'MathJax.zip')))]
namespaced_routes = [(HANDLER_URI, XBlockActionHandler)]
global custom_module
custom_module = custom_modules.Module(
'Support for XBlocks within Course Builder',
'Adds the ability to use XBlock content within Course Builder.',
global_routes, namespaced_routes,
notify_module_disabled=on_module_disabled,
notify_module_enabled=on_module_enabled,
)
return custom_module
|
google/coursebuilder_xblock_module
|
src/modules/xblock_module/xblock_module.py
|
Python
|
apache-2.0
| 54,112
|
import poplib
import email
import time
class MailHelper:
def __init__(self, app):
self.app = app
def get_mail(self, username, password, subject):
for i in range (5):
pop = poplib.POP3(self.app.config['james']['host'])
pop.user(username)
pop.pass_(password)
num = pop.stat()[0]
if num>0:
for n in range(num):
msglines = pop.retr(n+1)[1]
msgtext = "\n".join(map(lambda x: x.decode('utf-8'), msglines))
msg = email.message_from_string(msgtext)
if msg.get('Subject') == subject:
pop.dele(n+1)
pop.close()
return msg.get_payload()
pop.close()
time.sleep(3)
return None
|
Droriel/python_training_mantis
|
fixture/mail.py
|
Python
|
apache-2.0
| 858
|
from .main import main, zdkb
|
fprimex/zdkb
|
zdkb/__init__.py
|
Python
|
apache-2.0
| 29
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Appier Framework
# Copyright (c) 2008-2021 Hive Solutions Lda.
#
# This file is part of Hive Appier Framework.
#
# Hive Appier Framework is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Appier Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Appier Framework. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2021 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import os
import imp
import sys
import inspect
import functools
import itertools
import contextlib
import collections
import urllib #@UnusedImport
ArgSpec = collections.namedtuple(
"ArgSpec",
["args", "varargs", "keywords", "defaults"]
)
@contextlib.contextmanager
def ctx_absolute():
root = sys.path.pop(0)
try: yield
finally: sys.path.insert(0, root)
with ctx_absolute():
try: import urllib2
except ImportError: urllib2 = None
with ctx_absolute():
try: import httplib
except ImportError: httplib = None
with ctx_absolute():
try: import http
except ImportError: http = None
with ctx_absolute():
try: import urllib.error
except ImportError: pass
with ctx_absolute():
try: import urllib.request
except ImportError: pass
with ctx_absolute():
try: import http.client
except ImportError: pass
try: import HTMLParser
except ImportError: import html.parser; HTMLParser = html.parser
try: import cPickle
except ImportError: import pickle; cPickle = pickle
try: import cStringIO
except ImportError: import io; cStringIO = io
try: import StringIO as _StringIO
except ImportError: import io; _StringIO = io
try: import urlparse as _urlparse
except ImportError: import urllib.parse; _urlparse = urllib.parse
PYTHON_3 = sys.version_info[0] >= 3
""" Global variable that defines if the current Python
interpreter is at least Python 3 compliant, this is used
to take some of the conversion decision for runtime """
PYTHON_35 = sys.version_info[0] >= 3 and sys.version_info[1] >= 5
""" Global variable that defines if the current Python
interpreter is at least Python 3.5 compliant """
PYTHON_36 = sys.version_info[0] >= 3 and sys.version_info[1] >= 6
""" Global variable that defines if the current Python
interpreter is at least Python 3.6 compliant """
PYTHON_39 = sys.version_info[0] >= 3 and sys.version_info[1] >= 9
""" Global variable that defines if the current Python
interpreter is at least Python 3.9 compliant """
PYTHON_ASYNC = PYTHON_35
""" Global variable that defines if the current Python
interpreter support the async/await syntax responsible
for the easy to use async methods """
PYTHON_ASYNC_GEN = PYTHON_36
""" Global variable that defines if the current Python
interpreter support the async/await generator syntax
responsible for the async generator methods """
PYTHON_V = int("".join([str(v) for v in sys.version_info[:3]]))
""" The Python version integer describing the version of
a the interpreter as a set of three integer digits """
if PYTHON_3: LONG = int
else: LONG = long #@UndefinedVariable
if PYTHON_3: BYTES = bytes
else: BYTES = str #@UndefinedVariable
if PYTHON_3: UNICODE = str
else: UNICODE = unicode #@UndefinedVariable
if PYTHON_3: OLD_UNICODE = None
else: OLD_UNICODE = unicode #@UndefinedVariable
if PYTHON_3: STRINGS = (str,)
else: STRINGS = (str, unicode) #@UndefinedVariable
if PYTHON_3: ALL_STRINGS = (bytes, str)
else: ALL_STRINGS = (bytes, str, unicode) #@UndefinedVariable
if PYTHON_3: INTEGERS = (int,)
else: INTEGERS = (int, long) #@UndefinedVariable
# saves a series of global symbols that are going to be
# used latter for some of the legacy operations
_ord = ord
_chr = chr
_str = str
_bytes = bytes
_range = range
try: _xrange = xrange #@UndefinedVariable
except Exception: _xrange = None
if PYTHON_3: Request = urllib.request.Request
else: Request = urllib2.Request
if PYTHON_3: HTTPHandler = urllib.request.HTTPHandler
else: HTTPHandler = urllib2.HTTPHandler
if PYTHON_3: HTTPError = urllib.error.HTTPError
else: HTTPError = urllib2.HTTPError
if PYTHON_3: HTTPConnection = http.client.HTTPConnection #@UndefinedVariable
else: HTTPConnection = httplib.HTTPConnection
if PYTHON_3: HTTPSConnection = http.client.HTTPSConnection #@UndefinedVariable
else: HTTPSConnection = httplib.HTTPSConnection
try: _execfile = execfile #@UndefinedVariable
except Exception: _execfile = None
try: _reduce = reduce #@UndefinedVariable
except Exception: _reduce = None
try: _reload = reload #@UndefinedVariable
except Exception: _reload = None
try: _unichr = unichr #@UndefinedVariable
except Exception: _unichr = None
def with_meta(meta, *bases):
return meta("Class", bases, {})
def eager(iterable):
if PYTHON_3: return list(iterable)
return iterable
def iteritems(associative):
if PYTHON_3: return associative.items()
return associative.iteritems()
def iterkeys(associative):
if PYTHON_3: return associative.keys()
return associative.iterkeys()
def itervalues(associative):
if PYTHON_3: return associative.values()
return associative.itervalues()
def items(associative):
if PYTHON_3: return eager(associative.items())
return associative.items()
def keys(associative):
if PYTHON_3: return eager(associative.keys())
return associative.keys()
def values(associative):
if PYTHON_3: return eager(associative.values())
return associative.values()
def xrange(start, stop = None, step = 1):
if PYTHON_3: return _range(start, stop, step) if stop else _range(start)
return _xrange(start, stop, step) if stop else _range(start)
def range(start, stop = None, step = None):
if PYTHON_3: return eager(_range(start, stop, step)) if stop else eager(_range(start))
return _range(start, stop, step) if stop else _range(start)
def ord(value):
if PYTHON_3 and type(value) == int: return value
return _ord(value)
def chr(value):
if PYTHON_3: return _bytes([value])
if type(value) in INTEGERS: return _chr(value)
return value
def chri(value):
if PYTHON_3: return value
if type(value) in INTEGERS: return _chr(value)
return value
def bytes(value, encoding = "latin-1", errors = "strict", force = False):
if not PYTHON_3 and not force: return value
if value == None: return value
if type(value) == _bytes: return value
return value.encode(encoding, errors)
def str(value, encoding = "latin-1", errors = "strict", force = False):
if not PYTHON_3 and not force: return value
if value == None: return value
if type(value) in STRINGS: return value
return value.decode(encoding, errors)
def u(value, encoding = "utf-8", errors = "strict", force = False):
if PYTHON_3 and not force: return value
if value == None: return value
if type(value) == UNICODE: return value
return value.decode(encoding, errors)
def ascii(value, encoding = "utf-8", errors = "replace"):
if is_bytes(value): value = value.decode(encoding, errors)
else: value = UNICODE(value)
value = value.encode("ascii", errors)
value = str(value)
return value
def orderable(value):
if not PYTHON_3: return value
return Orderable(value)
def is_str(value):
return type(value) == _str
def is_unicode(value):
if PYTHON_3: return type(value) == _str
else: return type(value) == unicode #@UndefinedVariable
def is_bytes(value):
if PYTHON_3: return type(value) == _bytes
else: return type(value) == _str #@UndefinedVariable
def is_string(value, all = False):
target = ALL_STRINGS if all else STRINGS
return type(value) in target
def is_generator(value):
if inspect.isgenerator(value): return True
if type(value) in (itertools.chain,): return True
if hasattr(value, "_is_generator"): return True
return False
def is_async_generator(value):
if not hasattr(inspect, "isasyncgen"): return False
return inspect.isasyncgen(value)
def is_unittest(name = "unittest"):
current_stack = inspect.stack()
for stack_frame in current_stack:
for program_line in stack_frame[4]:
is_unittest = not name in program_line
if is_unittest: continue
return True
return False
def execfile(path, global_vars, local_vars = None, encoding = "utf-8"):
if local_vars == None: local_vars = global_vars
if not PYTHON_3: return _execfile(path, global_vars, local_vars)
file = open(path, "rb")
try: data = file.read()
finally: file.close()
data = data.decode(encoding)
code = compile(data, path, "exec")
exec(code, global_vars, local_vars) #@UndefinedVariable
def walk(path, visit, arg):
for root, dirs, _files in os.walk(path):
names = os.listdir(root)
visit(arg, root, names)
for dir in list(dirs):
exists = dir in names
not exists and dirs.remove(dir)
def getargspec(func):
has_full = hasattr(inspect, "getfullargspec")
if has_full: return ArgSpec(*inspect.getfullargspec(func)[:4])
else: return inspect.getargspec(func)
def reduce(*args, **kwargs):
if PYTHON_3: return functools.reduce(*args, **kwargs)
return _reduce(*args, **kwargs)
def reload(*args, **kwargs):
if PYTHON_3: return imp.reload(*args, **kwargs)
return _reload(*args, **kwargs)
def unichr(*args, **kwargs):
if PYTHON_3: return _chr(*args, **kwargs)
return _unichr(*args, **kwargs)
def urlopen(*args, **kwargs):
if PYTHON_3: return urllib.request.urlopen(*args, **kwargs)
else: return urllib2.urlopen(*args, **kwargs) #@UndefinedVariable
def build_opener(*args, **kwargs):
if PYTHON_3: return urllib.request.build_opener(*args, **kwargs)
else: return urllib2.build_opener(*args, **kwargs) #@UndefinedVariable
def urlparse(*args, **kwargs):
return _urlparse.urlparse(*args, **kwargs)
def urlunparse(*args, **kwargs):
return _urlparse.urlunparse(*args, **kwargs)
def parse_qs(*args, **kwargs):
return _urlparse.parse_qs(*args, **kwargs)
def urlencode(*args, **kwargs):
if PYTHON_3: return urllib.parse.urlencode(*args, **kwargs)
else: return urllib.urlencode(*args, **kwargs) #@UndefinedVariable
def quote(*args, **kwargs):
if PYTHON_3: return urllib.parse.quote(*args, **kwargs)
else: return urllib.quote(*args, **kwargs) #@UndefinedVariable
def quote_plus(*args, **kwargs):
if PYTHON_3: return urllib.parse.quote_plus(*args, **kwargs)
else: return urllib.quote_plus(*args, **kwargs) #@UndefinedVariable
def unquote(*args, **kwargs):
if PYTHON_3: return urllib.parse.unquote(*args, **kwargs)
else: return urllib.unquote(*args, **kwargs) #@UndefinedVariable
def unquote_plus(*args, **kwargs):
if PYTHON_3: return urllib.parse.unquote_plus(*args, **kwargs)
else: return urllib.unquote_plus(*args, **kwargs) #@UndefinedVariable
def cmp_to_key(*args, **kwargs):
if PYTHON_3: return dict(key = functools.cmp_to_key(*args, **kwargs)) #@UndefinedVariable
else: return dict(cmp = args[0])
def tobytes(self, *args, **kwargs):
if PYTHON_3: return self.tobytes(*args, **kwargs)
else: return self.tostring(*args, **kwargs)
def tostring(self, *args, **kwargs):
if PYTHON_3: return self.tobytes(*args, **kwargs)
else: return self.tostring(*args, **kwargs)
def StringIO(*args, **kwargs):
if PYTHON_3: return cStringIO.StringIO(*args, **kwargs)
else: return _StringIO.StringIO(*args, **kwargs)
def BytesIO(*args, **kwargs):
if PYTHON_3: return cStringIO.BytesIO(*args, **kwargs)
else: return cStringIO.StringIO(*args, **kwargs)
class Orderable(tuple):
"""
Simple tuple type wrapper that provides a simple
first element ordering, that is compatible with
both the Python 2 and Python 3+ infra-structures.
"""
def __cmp__(self, value):
return self[0].__cmp__(value[0])
def __lt__(self, value):
return self[0].__lt__(value[0])
|
hivesolutions/appier
|
src/appier/legacy.py
|
Python
|
apache-2.0
| 13,083
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Yue-Wen FANG'
__maintainer__ = "Yue-Wen FANG"
__email__ = 'fyuewen@gmail.com'
__license__ = 'Apache License 2.0'
__creation_date__= 'Dec. 28, 2018'
"""
9-3. Users: Make a class called User . Create two attributes called first_name and last_name, and then create
several other attributes that are typically stored in a user profile . Make a method called describe_user()
that prints a summary of the user’s information . Make another method called greet_user() that prints a
personalized greeting to the user .
Create several instances representing different users, and call both methods for each user .t mv dog.py
"""
class User:
"""
a class for User
"""
def __init__(self, first_name, last_name, gender, age, email='f@cn'):
self.name = first_name + last_name
self.gender = gender
self.age = age
self.email = email # if no email is specified, the default will be used
def describe_use(self):
print('The profile of ' + self.name + ":")
print('Gender: ', self.gender)
print('Age: ', self.age)
print('Email: ', self.email)
Tiantian_Li = User('Tiantian', 'Li', 'Male', '20', email='Li@cn')
Tiantian_Li.describe_use()
|
yw-fang/readingnotes
|
machine-learning/Matthes-crash-course/chapt09/scripts/user_03.py
|
Python
|
apache-2.0
| 1,263
|
from givabit.backend.errors import AlreadyExistsException
from givabit.backend.user import User
from givabit.backend.user_repository import UserRepository
from givabit.webapp.base_page import BasePage
from givabit.webapp.url import Url
class SignupPage(BasePage):
def __init__(self, request, response, user_repository=None):
BasePage.__init__(self, request, response)
self.user_repo = user_repository if user_repository is not None else UserRepository()
def get(self):
self.write_template('signup', {'title': 'Givabit - Sign up'})
def post(self):
POST = self.request.POST
email = POST['email']
user = User(email=email)
try:
self.user_repo.create_unconfirmed_user(user=user, send_email=True)
response = self.redirect(Url().for_page('signedup'))
return response
except AlreadyExistsException:
self.write_template('signup', {'title': 'Givabit - Sign up', 'success': False, 'error': 'User already exists'})
|
illicitonion/givabit
|
src/givabit/webapp/signup_page.py
|
Python
|
apache-2.0
| 1,031
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubevirt
from kubevirt.rest import ApiException
from kubevirt.models.v1_i6300_esb_watchdog import V1I6300ESBWatchdog
class TestV1I6300ESBWatchdog(unittest.TestCase):
""" V1I6300ESBWatchdog unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1I6300ESBWatchdog(self):
"""
Test V1I6300ESBWatchdog
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubevirt.models.v1_i6300_esb_watchdog.V1I6300ESBWatchdog()
pass
if __name__ == '__main__':
unittest.main()
|
kubevirt/client-python
|
test/test_v1_i6300_esb_watchdog.py
|
Python
|
apache-2.0
| 927
|
import os, sys
sys.path = [os.path.join(os.getcwd(), "..") ] + sys.path
sys.path = [os.path.join(os.getcwd(), "..", "..") ] + sys.path
from flTile.amConfig import CreateAMConfig
def run():
tileConfig = CreateAMConfig()
#hostname = gethostname()
#machineDesc = tileConfig.getMachineDescByHostname(hostname)
print "Machine, local rects, absolute rects"
for machineDesc in tileConfig.machines:
localRects = []
absoluteRects = []
for tile in machineDesc.tiles:
localRects.append(tileConfig.getLocalDrawRect(tile.uid))
absoluteRects.append(tileConfig.getAbsoluteFullDisplayRect(tile.uid))
print machineDesc.hostname, localRects, absoluteRects
fullRect = tileConfig.getMainDisplayRect()
print "FULL DISPLAY:", fullRect.width, fullRect.height
if __name__ == "__main__":
run()
|
rpwagner/tiled-display
|
flTile/test/printTileConfigCoords.py
|
Python
|
apache-2.0
| 868
|
# pylint: skip-file
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-09-15 20:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('passive_data_kit', '0068_remove_deviceissue_platform_version'),
]
operations = [
migrations.AlterField(
model_name='deviceissue',
name='state',
field=models.CharField(choices=[('opened', 'Opened'), ('in-progress', 'In Progress'), ('resolved', 'Resolved'), ('wont-fix', "Won't Fix")], default='opened', max_length=1024),
),
]
|
audaciouscode/PassiveDataKit-Django
|
migrations/0069_auto_20190915_1605.py
|
Python
|
apache-2.0
| 600
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import six
from gbpservice.nfp.core import log as nfp_logging
LOG = nfp_logging.getLogger(__name__)
deque = collections.deque
class SequencerEmpty(Exception):
pass
class SequencerBusy(Exception):
pass
"""Sequences the events. """
class EventSequencer(object):
class Sequencer(object):
def __init__(self):
# Events not scheduled are queued
self._waitq = deque()
# Currently scheduled event
self._scheduled = None
def _is_busy(self):
if self._scheduled:
raise SequencerBusy
def _is_empty(self):
if not len(self._waitq):
raise SequencerEmpty
def sequence(self, event):
self._waitq.append(event)
def run(self):
"""Run to get event to be scheduled.
If sequencer is busy - i.e, an event is already
scheduled and in progress raises busy except.
If sequencer is empty - i.e, no event in sequencer
raises empty except.
"""
self._is_busy()
self._is_empty()
# Pop the first element in the queue - FIFO
self._scheduled = self._waitq.popleft()
return self._scheduled
def is_scheduled(self, event):
if self._scheduled:
return self._scheduled.desc.uuid == event.desc.uuid and (
self._scheduled.id == event.id)
return True
def release(self):
self._scheduled = None
def pop(self):
self.release()
events = list(self._waitq)
self._waitq.clear()
return events
def __init__(self):
# Sequence of related events
# {key: sequencer()}
self._sequencer = {}
def sequence(self, key, event):
try:
self._sequencer[key].sequence(event)
except KeyError:
self._sequencer[key] = self.Sequencer()
self._sequencer[key].sequence(event)
message = "Sequenced event - %s" % (event.identify())
LOG.debug(message)
def run(self):
events = []
# Loop over copy and delete from original
sequencers = dict(self._sequencer)
for key, sequencer in six.iteritems(sequencers):
try:
event = sequencer.run()
if event:
message = "Desequenced event - %s" % (
event.identify())
LOG.debug(message)
event.sequence = False
events.append(event)
except SequencerBusy as exc:
pass
except SequencerEmpty as exc:
exc = exc
message = "Sequencer empty"
LOG.debug(message)
del self._sequencer[key]
return events
def pop(self):
events = []
sequencers = dict(self._sequencer)
for key, sequencer in six.iteritems(sequencers):
events += sequencer.pop()
return events
def release(self, key, event):
try:
message = "(event - %s) checking to release" % (event.identify())
LOG.debug(message)
if self._sequencer[key].is_scheduled(event):
message = "(event - %s) Releasing sequencer" % (
event.identify())
LOG.debug(message)
self._sequencer[key].release()
except KeyError:
return
|
noironetworks/group-based-policy
|
gbpservice/nfp/core/sequencer.py
|
Python
|
apache-2.0
| 4,161
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Portions Copyright © 2013 Hortonworks, Inc.
import logging
import os
from django.contrib.auth.models import User
from django.core import management
from django.core.management.base import NoArgsCommand
from django.utils.translation import ugettext as _
from hadoop import cluster
from hadoop.fs.hadoopfs import Hdfs
from liboozie.conf import REMOTE_DEPLOYMENT_DIR
from oozie.conf import LOCAL_SAMPLE_DATA_DIR, LOCAL_SAMPLE_DIR, REMOTE_SAMPLE_DIR
LOG = logging.getLogger(__name__)
class Command(NoArgsCommand):
def handle_noargs(self, **options):
fs = cluster.get_hdfs()
remote_dir = create_directories(fs)
# Copy examples binaries
for name in os.listdir(LOCAL_SAMPLE_DIR.get()):
local_dir = fs.join(LOCAL_SAMPLE_DIR.get(), name)
remote_data_dir = fs.join(remote_dir, name)
LOG.info(_('Copying examples %(local_dir)s to %(remote_data_dir)s\n') % {
'local_dir': local_dir, 'remote_data_dir': remote_data_dir})
fs.do_as_user(fs.DEFAULT_USER, fs.copyFromLocal, local_dir, remote_data_dir)
# Copy sample data
local_dir = LOCAL_SAMPLE_DATA_DIR.get()
remote_data_dir = fs.join(remote_dir, 'data')
LOG.info(_('Copying data %(local_dir)s to %(remote_data_dir)s\n') % {
'local_dir': local_dir, 'remote_data_dir': remote_data_dir})
fs.do_as_user(fs.DEFAULT_USER, fs.copyFromLocal, local_dir, remote_data_dir)
# Load jobs
sample, created = User.objects.get_or_create(username='sample')
management.call_command('loaddata', 'initial_oozie_examples.json', verbosity=2)
from oozie.models import Job
Job.objects.filter(owner__id=1100713).update(owner=sample) # 11OOZIE
def create_directories(fs):
# If needed, create the remote home, deployment and data directories
directories = (REMOTE_DEPLOYMENT_DIR.get(), REMOTE_SAMPLE_DIR.get())
for directory in directories:
if not fs.do_as_user("hdfs", fs.exists, directory):
remote_home_dir = Hdfs.join('/user', "hdfs")
if directory.startswith(remote_home_dir):
# Home is 755
fs.do_as_user("hdfs", fs.create_home_dir, remote_home_dir)
# Shared by all the users
fs.do_as_user("hdfs", fs.mkdir, directory, 511)
fs.do_as_user("hdfs", fs.chmod, directory, 511) # To remove after https://issues.apache.org/jira/browse/HDFS-3491
return REMOTE_SAMPLE_DIR.get()
|
hortonworks/hortonworks-sandbox
|
apps/oozie/src/oozie/management/commands/oozie_setup.py
|
Python
|
apache-2.0
| 3,188
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
LOGIN_EMAIL = 'your@email.here'
LOGIN_PASSWORD = 'your_password_here'
SUB_DOMAIN = 'www'
DOMAIN = 'your-domain.here'
|
leeyiw/dnspod_ddns
|
config.py
|
Python
|
apache-2.0
| 163
|
import unittest
from rx import Observable
from rx.testing import TestScheduler, ReactiveTest, is_prime, MockDisposable
from rx.disposables import Disposable, SerialDisposable
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class RxException(Exception):
pass
# Helper function for raising exceptions within lambdas
def _raise(ex):
raise RxException(ex)
class TestWithLatestFrom(unittest.TestCase):
def test_with_latest_from_never_never(self):
scheduler = TestScheduler()
e1 = Observable.never()
e2 = Observable.never()
def create():
return e1.with_latest_from(e2, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal()
def test_with_latest_from_never_empty(self):
scheduler = TestScheduler()
msgs = [on_next(150, 1), on_completed(210)]
e1 = Observable.never()
e2 = scheduler.create_hot_observable(msgs)
def create():
return e1.with_latest_from(e2, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal()
def test_with_latest_from_empty_never(self):
scheduler = TestScheduler()
msgs = [on_next(150, 1), on_completed(210)]
e1 = Observable.never()
e2 = scheduler.create_hot_observable(msgs)
def create():
return e2.with_latest_from(e1, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(210))
def test_with_latest_from_empty_empty(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(210)]
msgs2 = [on_next(150, 1), on_completed(210)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e2.with_latest_from(e1, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(210))
def test_with_latest_from_empty_return(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(210)]
msgs2 = [on_next(150, 1), on_next(215, 2), on_completed(220)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.with_latest_from(e2, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(210))
def test_with_latest_from_return_empty(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(210)]
msgs2 = [on_next(150, 1), on_next(215, 2), on_completed(220)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e2.with_latest_from(e1, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(220))
def test_with_latest_from_never_return(self):
scheduler = TestScheduler()
msgs = [on_next(150, 1), on_next(215, 2), on_completed(220)]
e1 = scheduler.create_hot_observable(msgs)
e2 = Observable.never()
def create():
return e1.with_latest_from(e2, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(220))
def test_with_latest_from_return_never(self):
scheduler = TestScheduler()
msgs = [on_next(150, 1), on_next(215, 2), on_completed(210)]
e1 = scheduler.create_hot_observable(msgs)
e2 = Observable.never()
def create():
return e2.with_latest_from(e1, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal()
def test_with_latest_from_return_return(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(215, 2), on_completed(230)]
msgs2 = [on_next(150, 1), on_next(220, 3), on_completed(240)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.with_latest_from(e2, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(230))
def test_with_latest_from_empty_error(self):
ex = 'ex'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(230)]
msgs2 = [on_next(150, 1), on_error(220, ex)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.with_latest_from(e2, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_error(220, ex))
def test_with_latest_from_error_empty(self):
ex = 'ex'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_completed(230)]
msgs2 = [on_next(150, 1), on_error(220, ex)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e2.with_latest_from(e1, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_error(220, ex))
def test_with_latest_from_return_throw(self):
ex = 'ex'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(210, 2), on_completed(230)]
msgs2 = [on_next(150, 1), on_error(220, ex)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.with_latest_from(e2, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_error(220, ex))
def test_with_latest_from_throw_return(self):
ex = 'ex'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(210, 2), on_completed(230)]
msgs2 = [on_next(150, 1), on_error(220, ex)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e2.with_latest_from(e1, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_error(220, ex))
def test_with_latest_from_throw_throw(self):
ex1 = 'ex1'
ex2 = 'ex2'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_error(220, ex1)]
msgs2 = [on_next(150, 1), on_error(230, ex2)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.with_latest_from(e2, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_error(220, ex1))
def test_with_latest_from_error_throw(self):
ex1 = 'ex1'
ex2 = 'ex2'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(210, 2), on_error(220, ex1)]
msgs2 = [on_next(150, 1), on_error(230, ex2)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.with_latest_from(e2, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_error(220, ex1))
def test_with_latest_from_throw_error(self):
ex1 = 'ex1'
ex2 = 'ex2'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(210, 2), on_error(220, ex1)]
msgs2 = [on_next(150, 1), on_error(230, ex2)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e2.with_latest_from(e1, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_error(220, ex1))
def test_with_latest_from_never_throw(self):
ex = 'ex'
scheduler = TestScheduler()
msgs = [on_next(150, 1), on_error(220, ex)]
e1 = Observable.never()
e2 = scheduler.create_hot_observable(msgs)
def create():
return e1.with_latest_from(e2, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_error(220, ex))
def test_with_latest_from_throw_never(self):
ex = 'ex'
scheduler = TestScheduler()
msgs = [on_next(150, 1), on_error(220, ex)]
e1 = Observable.never()
e2 = scheduler.create_hot_observable(msgs)
def create():
return e2.with_latest_from(e1, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_error(220, ex))
def test_with_latest_from_some_throw(self):
ex = 'ex'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(215, 2), on_completed(230)]
msgs2 = [on_next(150, 1), on_error(220, ex)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.with_latest_from(e2, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_error(220, ex))
def test_with_latest_from_throw_some(self):
ex = 'ex'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(215, 2), on_completed(230)]
msgs2 = [on_next(150, 1), on_error(220, ex)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e2.with_latest_from(e1, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_error(220, ex))
def test_with_latest_from_no_throw_after_complete_left(self):
ex = 'ex'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(215, 2), on_completed(220)]
msgs2 = [on_next(150, 1), on_error(230, ex)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.with_latest_from(e2, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(220))
def test_with_latest_from_throw_after_complete_right(self):
ex = 'ex'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(215, 2), on_completed(220)]
msgs2 = [on_next(150, 1), on_error(230, ex)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e2.with_latest_from(e1, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_error(230, ex))
def test_with_latest_from_interleaved_with_tail(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(215, 2), on_next(225, 4), on_completed(230)]
msgs2 = [on_next(150, 1), on_next(220, 3), on_next(230, 5), on_next(235, 6), on_next(240, 7), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.with_latest_from(e2, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_next(225, 3 + 4), on_completed(230))
def test_with_latest_from_consecutive(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(215, 2), on_next(225, 4), on_completed(230)]
msgs2 = [on_next(150, 1), on_next(235, 6), on_next(240, 7), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.with_latest_from(e2, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(230))
def test_with_latest_from_consecutive_end_with_error_left(self):
ex = 'ex'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(215, 2), on_next(225, 4), on_error(230, ex)]
msgs2 = [on_next(150, 1), on_next(235, 6), on_next(240, 7), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.with_latest_from(e2, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_error(230, ex))
def test_with_latest_from_consecutive_end_with_error_right(self):
ex = 'ex'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(215, 2), on_next(225, 4), on_completed(230)]
msgs2 = [on_next(150, 1), on_next(235, 6), on_next(240, 7), on_error(245, ex)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e2.with_latest_from(e1, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_next(235, 4 + 6), on_next(240, 4 + 7), on_error(245, ex))
def test_with_latest_from_selector_throws(self):
ex = 'ex'
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(225, 2), on_completed(230)]
msgs2 = [on_next(150, 1), on_next(220, 3), on_completed(240)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.with_latest_from(e2, lambda x, y: _raise(ex))
results = scheduler.start(create)
results.messages.assert_equal(on_error(225, ex))
def test_with_latest_from_repeat_last_left_value(self):
scheduler = TestScheduler()
msgs1 = [on_next(150, 1), on_next(215, 2), on_next(225, 4), on_next(230, 5), on_completed(235)]
msgs2 = [on_next(150, 1), on_next(220, 3), on_completed(250)]
e1 = scheduler.create_hot_observable(msgs1)
e2 = scheduler.create_hot_observable(msgs2)
def create():
return e1.with_latest_from(e2, lambda x, y: x + y)
results = scheduler.start(create)
results.messages.assert_equal(on_next(225, 3 + 4), on_next(230, 3 + 5), on_completed(235))
if __name__ == '__main__':
unittest.main()
|
dbrattli/RxPY
|
tests/test_observable/test_withlatestfrom.py
|
Python
|
apache-2.0
| 14,723
|
#!/usr/bin/env python
from __future__ import print_function
from collections import Counter
from operator import itemgetter
import os
_path = os.path.abspath(os.path.dirname(__file__))
SOURCE = os.path.join(_path, 'poems_for_wordcount.txt')
DESTINATION = os.path.join(_path, 'poem_words_out.txt')
def sort_word_counts(word_dict):
# first sort to get k by alpha
sorted_by_key = sorted(word_dict.items(), key=itemgetter(0))
# then reverse sort on number of occurrences (v) to get list in desc order
return sorted(sorted_by_key, key=itemgetter(1), reverse=1)
def main():
with open(SOURCE, 'rb') as source, open(DESTINATION, 'wb') as destination:
word_counts = Counter(source.read().lower().split())
for item in sort_word_counts(word_counts):
print("{} {}".format(*item), file=destination)
def test_sort_word_counts():
word_list = 'you watch the brown fox jumped over the fence'.split()
word_counts = Counter(word_list)
sorted_list = sort_word_counts(word_counts)
assert sorted_list[0][0] == 'the'
assert sorted_list[1][0] == 'brown'
assert sorted_list[-1][0] == 'you'
def test_output():
main()
output = open(DESTINATION, 'rb').readlines()
word, count = output[0].split()
assert len(output) == 3518
assert word == 'the'
assert int(count) == 1085
if __name__ == '__main__':
main()
|
clarkkarenl/brautbot
|
wordcount.py
|
Python
|
artistic-2.0
| 1,390
|
from rethinkengine.fields import *
import unittest2 as unittest
class PrimaryKeyFieldTestCase(unittest.TestCase):
def test_default(self):
f = ObjectIdField()
self.assertEqual(f._default, None)
with self.assertRaises(TypeError):
ObjectIdField(default='')
def test_required(self):
with self.assertRaises(TypeError):
ObjectIdField(required=False)
def test_is_valid(self):
f = ObjectIdField()
self.assertTrue(f.is_valid('cdc14784-3327-492b-a1db-ad8a3b8abcef'))
def test_too_short(self):
f = ObjectIdField()
self.assertFalse(f.is_valid('cdc14784-3327-492b-a1db-ad8a3b8abce'))
def test_too_long(self):
f = ObjectIdField()
self.assertFalse(f.is_valid('cdc14784-3327-492b-a1db-ad8a3b8abcefa'))
def test_wrong_chars(self):
f = ObjectIdField()
self.assertFalse(f.is_valid('zzzzzzzz-3327-492b-a1db-ad8a3b8abcef'))
def test_wrong_type(self):
f = ObjectIdField()
self.assertFalse(f.is_valid(123))
class StringFieldTestCase(unittest.TestCase):
def test_default(self):
f = StringField()
self.assertEqual(f._default, None)
f = StringField(default='foo')
self.assertEqual(f._default, 'foo')
def test_none(self):
f = StringField(required=False)
self.assertTrue(f.is_valid(None))
f = StringField(required=True)
self.assertFalse(f.is_valid(None))
def test_is_valid(self):
f = StringField()
self.assertTrue(f.is_valid('foo'))
self.assertTrue(f.is_valid(''))
def test_wrong_type(self):
f = StringField()
self.assertFalse(f.is_valid(123))
class IntegerFieldTestCase(unittest.TestCase):
def test_default(self):
f = IntegerField()
self.assertEqual(f._default, None)
f = IntegerField(default=42)
self.assertEqual(f._default, 42)
def test_none(self):
f = IntegerField(required=False)
self.assertTrue(f.is_valid(None))
f = IntegerField(required=True)
self.assertFalse(f.is_valid(None))
def test_is_valid(self):
f = IntegerField()
self.assertTrue(f.is_valid(123))
def test_wrong_type(self):
f = IntegerField()
self.assertFalse(f.is_valid('foo'))
class FloatFieldTestCase(unittest.TestCase):
def test_default(self):
f = FloatField()
self.assertEqual(f._default, None)
f = FloatField(default=4.2)
self.assertEqual(f._default, 4.2)
def test_none(self):
f = FloatField(required=False)
self.assertTrue(f.is_valid(None))
f = FloatField(required=True)
self.assertFalse(f.is_valid(None))
def test_is_valid(self):
f = FloatField()
self.assertTrue(f.is_valid(123.456))
def test_wrong_type(self):
f = FloatField()
self.assertFalse(f.is_valid('foo'))
self.assertFalse(f.is_valid(0))
class ListFieldTestCase(unittest.TestCase):
def test_default(self):
f = ListField()
self.assertEqual(f._default, None)
f = ListField(default=[1, 2, 3])
self.assertEqual(f._default, [1, 2, 3])
def test_none(self):
f = ListField(required=False)
self.assertTrue(f.is_valid(None))
f = ListField(required=True)
self.assertFalse(f.is_valid(None))
def test_is_valid(self):
f = ListField()
self.assertTrue(f.is_valid([1, 2, 3]))
def test_is_valid_tuple(self):
f = ListField()
self.assertTrue(f.is_valid((1, 2, 3)))
def test_wrong_type(self):
f = ListField()
self.assertFalse(f.is_valid('foo'))
def test_element_type_string(self):
f = ListField(StringField)
self.assertEqual(f._element_type, StringField)
def test_element_type_invalid(self):
with self.assertRaises(TypeError):
f = ListField(str)
def test_element_type_is_valid(self):
f = ListField(StringField)
self.assertTrue(f.is_valid(['foo']))
def test_element_type_is_invalid(self):
f = ListField(StringField)
self.assertFalse(f.is_valid([42]))
class DictFieldTestCase(unittest.TestCase):
def test_default(self):
f = DictField()
self.assertEqual(f._default, None)
f = DictField(default={'foo': 'bar'})
self.assertEqual(f._default, {'foo': 'bar'})
def test_none(self):
f = DictField(required=False)
self.assertTrue(f.is_valid(None))
f = DictField(required=True)
self.assertFalse(f.is_valid(None))
def test_is_valid(self):
f = DictField()
self.assertTrue(f.is_valid({}))
self.assertTrue(f.is_valid({'foo': 1, 'bar': 2}))
def test_wrong_type(self):
f = DictField()
self.assertFalse(f.is_valid('foo'))
class BooleanFieldTestCase(unittest.TestCase):
def test_default(self):
f = BooleanField()
self.assertEqual(f._default, None)
f = BooleanField(default=True)
self.assertEqual(f._default, True)
def test_none(self):
f = BooleanField(required=False)
self.assertTrue(f.is_valid(None))
f = BooleanField(required=True)
self.assertFalse(f.is_valid(None))
def test_is_valid(self):
f = BooleanField()
self.assertTrue(f.is_valid(False))
self.assertTrue(f.is_valid(True))
def test_wrong_type(self):
f = BooleanField()
self.assertFalse(f.is_valid('foo'))
|
OpenSystemsLab/rethinkengine
|
tests/test_fields.py
|
Python
|
bsd-2-clause
| 5,550
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'SurveyQuestion.statistic'
db.delete_column(u'survey_surveyquestion', 'statistic_id')
def backwards(self, orm):
# Adding field 'SurveyQuestion.statistic'
db.add_column(u'survey_surveyquestion', 'statistic',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['statistics.Statistic'], null=True, blank=True),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'clinics.clinic': {
'Meta': {'object_name': 'Clinic'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'code': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_renovated': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'lga_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'pbf_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'year_opened': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
u'clinics.clinicstaff': {
'Meta': {'object_name': 'ClinicStaff'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'staff_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'year_started': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
u'clinics.patient': {
'Meta': {'unique_together': "[('clinic', 'serial')]", 'object_name': 'Patient'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']", 'null': 'True', 'blank': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'serial': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'clinics.service': {
'Meta': {'object_name': 'Service'},
'code': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'clinics.visit': {
'Meta': {'object_name': 'Visit'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Patient']"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Service']", 'null': 'True', 'blank': 'True'}),
'staff': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.ClinicStaff']", 'null': 'True', 'blank': 'True'}),
'survey_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'visit_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'welcome_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'rapidsms.contact': {
'Meta': {'object_name': 'Contact'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'survey.survey': {
'Meta': {'object_name': 'Survey'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'flow_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'survey.surveyquestion': {
'Meta': {'ordering': "['order', 'id']", 'unique_together': "[('survey', 'label')]", 'object_name': 'SurveyQuestion'},
'categories': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'designation': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '8'}),
'for_display': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'question_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'question_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"})
},
u'survey.surveyquestionresponse': {
'Meta': {'unique_together': "[('visit', 'question')]", 'object_name': 'SurveyQuestionResponse'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.SurveyQuestion']"}),
'response': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Service']", 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Visit']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['survey']
|
myvoice-nigeria/myvoice
|
myvoice/survey/migrations/0005_auto__del_field_surveyquestion_statistic.py
|
Python
|
bsd-2-clause
| 12,627
|
#!/usr/bin/env python
"""Process that loads the datastore"""
__author__ = 'Michael Meisinger, Thomas Lennan'
"""
Possible Features
- load objects into different datastores
- load from a directory of YML files in ion-definitions
- load from a ZIP of YMLs
- load an additional directory (not under GIT control)
- change timestamp for resources
- load a subset of objects by type, etc
"""
from pyon.public import CFG, log, ImmediateProcess, iex
from pyon.datastore import datastore_admin
from pyon.core import bootstrap
from pyon.core.bootstrap import get_sys_name
class DatastoreAdmin(ImmediateProcess):
"""
bin/pycc -x ion.process.bootstrap.datastore_loader.DatastoreLoader op=clear prefix=ion
bin/pycc -x ion.process.bootstrap.datastore_loader.DatastoreLoader op=dump path=res/preload/local/my_dump
bin/pycc -fc -x ion.process.bootstrap.datastore_loader.DatastoreLoader op=load path=res/preload/local/my_dump
bin/pycc -x ion.process.bootstrap.datastore_loader.DatastoreLoader op=dumpres
"""
def on_init(self):
pass
def on_start(self):
# print env temporarily to debug cei
import os
log.info('ENV vars: %s' % str(os.environ))
op = self.CFG.get("op", None)
datastore = self.CFG.get("datastore", None)
path = self.CFG.get("path", None)
prefix = self.CFG.get("prefix", get_sys_name()).lower()
log.info("DatastoreLoader: {op=%s, datastore=%s, path=%s, prefix=%s}" % (op, datastore, path, prefix))
self.da = datastore_admin.DatastoreAdmin()
if op:
if op == "load":
self.da.load_datastore(path, datastore, ignore_errors=False)
elif op == "dump":
self.da.dump_datastore(path, datastore)
elif op == "dumpres":
from ion.util.datastore.resources import ResourceRegistryHelper
rrh = ResourceRegistryHelper()
rrh.dump_resources_as_xlsx(path)
elif op == "blame":
# TODO make generic
self.da.get_blame_objects()
elif op == "clear":
self.da.clear_datastore(datastore, prefix)
else:
raise iex.BadRequest("Operation unknown")
else:
raise iex.BadRequest("No operation specified")
def on_quit(self):
pass
DatastoreLoader = DatastoreAdmin
|
scionrep/scioncc
|
src/ion/process/bootstrap/datastore_loader.py
|
Python
|
bsd-2-clause
| 2,396
|
#!/usr/bin/env python
from __future__ import print_function
import time
import pyupm_grove as grove
import pyupm_i2clcd as lcd
import pyupm_th02 as th02
import pyupm_guvas12d as upmUV
import pyupm_grovemoisture as upmMoisture
from phant import Phant
import requests
from iot_utils import *
__author__ = 'KT Kirk'
# Initialize Jhd1313m1 at 0x3E (LCD_ADDRESS) and 0x62 (RGB_ADDRESS)
myLcd = lcd.Jhd1313m1(0, 0x3E, 0x62)
myLcd.setColor(53, 249, 39 ) # Green
myLcd.setCursor(0,0)
myLcd.write('IoT')
# Instantiate a Grove Moisture sensor on analog pin A1
moisture = upmMoisture.GroveMoisture(1)
# Create the light sensor object using AI2 pin 2
light = grove.GroveLight(2)
# Instantiate a UV sensor on analog pin A3
uv = upmUV.GUVAS12D(3);
# analog voltage, usually 3.3 or 5.0
GUVAS12D_AREF = 5.0;
SAMPLES_PER_QUERY = 1024;
# Create the temperature sensor object using AIO pin 0
i2c_th = th02.TH02()
#
p = Phant(keys["publicKey"],
'device', 'temp', 'humidity', 'light', "uv", "moisture",
private_key=keys["privateKey"])
device = open("/factory/serial_number").read().strip('\n')
while(True):
temp = i2c_th.getTemperature()
humid = i2c_th.getHumidity()
lux_val = light.value()
uv_val = uv.value(GUVAS12D_AREF, SAMPLES_PER_QUERY)
moisture_val = moisture.value()
myLcd.setCursor(1, 0)
try:
p.log(device, temp, humid, lux_val, uv_val, moisture_val)
except requests.exceptions.ConnectionError as e:
print("Connection error with data.sparkfun.com")
myLcd.setColor(255, 0, 0) # Red
myLcd.write("Error")
else:
myLcd.setColor(53, 39, 249) # Bl
myLcd.write("Sent Bytes: {}".format(p.remaining_bytes))
#data = p.get()
#print(data['temp'])
time.sleep(60 * 5)
|
ktkirk/HSSI
|
IoT/iot_lcd.py
|
Python
|
bsd-2-clause
| 1,768
|
# -*- coding: utf-8 -*-
"""This module contains the parser/generators (or coders/encoders if you
prefer) for the classes/datatypes that are used in iCalendar:
###########################################################################
# This module defines these property value data types and property parameters
4.2 Defined property parameters are:
ALTREP, CN, CUTYPE, DELEGATED-FROM, DELEGATED-TO, DIR, ENCODING, FMTTYPE,
FBTYPE, LANGUAGE, MEMBER, PARTSTAT, RANGE, RELATED, RELTYPE, ROLE, RSVP,
SENT-BY, TZID, VALUE
4.3 Defined value data types are:
BINARY, BOOLEAN, CAL-ADDRESS, DATE, DATE-TIME, DURATION, FLOAT, INTEGER,
PERIOD, RECUR, TEXT, TIME, URI, UTC-OFFSET
###########################################################################
iCalendar properties has values. The values are strongly typed. This module
defines these types, calling val.to_ical() on them, Will render them as defined
in rfc2445.
If you pass any of these classes a Python primitive, you will have an object
that can render itself as iCalendar formatted date.
Property Value Data Types starts with a 'v'. they all have an to_ical() and
from_ical() method. The to_ical() method generates a text string in the
iCalendar format. The from_ical() method can parse this format and return a
primitive Python datatype. So it should allways be true that:
x == vDataType.from_ical(VDataType(x).to_ical())
These types are mainly used for parsing and file generation. But you can set
them directly.
"""
from datetime import date
from datetime import datetime
from datetime import time
from datetime import timedelta
from datetime import tzinfo
try:
from dateutil.tz import tzutc
except ImportError:
tzutc = None
from icalendar import compat
from icalendar.caselessdict import CaselessDict
from icalendar.parser import Parameters
from icalendar.parser import escape_char
from icalendar.parser import tzid_from_dt
from icalendar.parser import unescape_char
from icalendar.parser_tools import DEFAULT_ENCODING
from icalendar.parser_tools import SEQUENCE_TYPES
from icalendar.parser_tools import to_unicode
import base64
import binascii
import pytz
import re
import time as _time
DATE_PART = r'(\d+)D'
TIME_PART = r'T(?:(\d+)H)?(?:(\d+)M)?(?:(\d+)S)?'
DATETIME_PART = '(?:%s)?(?:%s)?' % (DATE_PART, TIME_PART)
WEEKS_PART = r'(\d+)W'
DURATION_REGEX = re.compile(r'([-+]?)P(?:%s|%s)$'
% (WEEKS_PART, DATETIME_PART))
WEEKDAY_RULE = re.compile('(?P<signal>[+-]?)(?P<relative>[\d]?)'
'(?P<weekday>[\w]{2})$')
####################################################
# handy tzinfo classes you can use.
#
ZERO = timedelta(0)
HOUR = timedelta(hours=1)
STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC.
"""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
class LocalTimezone(tzinfo):
"""Timezone of the machine where the code is running.
"""
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
class vBinary(object):
"""Binary property values are base 64 encoded.
"""
def __init__(self, obj):
self.obj = to_unicode(obj)
self.params = Parameters(encoding='BASE64', value="BINARY")
def __repr__(self):
return "vBinary('%s')" % self.to_ical()
def to_ical(self):
return binascii.b2a_base64(self.obj.encode('utf-8'))[:-1]
@staticmethod
def from_ical(ical):
try:
return base64.b64decode(ical)
except UnicodeError:
raise ValueError('Not valid base 64 encoding.')
class vBoolean(int):
"""Returns specific string according to state.
"""
BOOL_MAP = CaselessDict({'true': True, 'false': False})
def __new__(cls, *args, **kwargs):
self = super(vBoolean, cls).__new__(cls, *args, **kwargs)
self.params = Parameters()
return self
def to_ical(self):
if self:
return b'TRUE'
return b'FALSE'
@classmethod
def from_ical(cls, ical):
try:
return cls.BOOL_MAP[ical]
except:
raise ValueError("Expected 'TRUE' or 'FALSE'. Got %s" % ical)
class vCalAddress(compat.unicode_type):
"""This just returns an unquoted string.
"""
def __new__(cls, value, encoding=DEFAULT_ENCODING):
value = to_unicode(value, encoding=encoding)
self = super(vCalAddress, cls).__new__(cls, value)
self.params = Parameters()
return self
def __repr__(self):
return "vCalAddress('%s')" % self.to_ical()
def to_ical(self):
return self.encode(DEFAULT_ENCODING)
@classmethod
def from_ical(cls, ical):
return cls(ical)
class vFloat(float):
"""Just a float.
"""
def __new__(cls, *args, **kwargs):
self = super(vFloat, cls).__new__(cls, *args, **kwargs)
self.params = Parameters()
return self
def to_ical(self):
return compat.unicode_type(self).encode('utf-8')
@classmethod
def from_ical(cls, ical):
try:
return cls(ical)
except:
raise ValueError('Expected float value, got: %s' % ical)
class vInt(int):
"""Just an int.
"""
def __new__(cls, *args, **kwargs):
self = super(vInt, cls).__new__(cls, *args, **kwargs)
self.params = Parameters()
return self
def to_ical(self):
return compat.unicode_type(self).encode('utf-8')
@classmethod
def from_ical(cls, ical):
try:
return cls(ical)
except:
raise ValueError('Expected int, got: %s' % ical)
class vDDDLists(object):
"""A list of vDDDTypes values.
"""
def __init__(self, dt_list):
if not hasattr(dt_list, '__iter__'):
dt_list = [dt_list]
vDDD = []
tzid = None
for dt in dt_list:
dt = vDDDTypes(dt)
vDDD.append(dt)
if 'TZID' in dt.params:
tzid = dt.params['TZID']
if tzid:
# NOTE: no support for multiple timezones here!
self.params = Parameters({'TZID': tzid})
self.dts = vDDD
def to_ical(self):
dts_ical = (dt.to_ical() for dt in self.dts)
return b",".join(dts_ical)
@staticmethod
def from_ical(ical, timezone=None):
out = []
ical_dates = ical.split(",")
for ical_dt in ical_dates:
out.append(vDDDTypes.from_ical(ical_dt, timezone=timezone))
return out
class vDDDTypes(object):
"""A combined Datetime, Date or Duration parser/generator. Their format
cannot be confused, and often values can be of either types.
So this is practical.
"""
def __init__(self, dt):
if not isinstance(dt, (datetime, date, timedelta, time)):
raise ValueError('You must use datetime, date, timedelta or time')
if isinstance(dt, datetime):
self.params = Parameters(dict(value='DATE-TIME'))
elif isinstance(dt, date):
self.params = Parameters(dict(value='DATE'))
elif isinstance(dt, time):
self.params = Parameters(dict(value='TIME'))
if (isinstance(dt, datetime) or isinstance(dt, time))\
and getattr(dt, 'tzinfo', False):
tzinfo = dt.tzinfo
if tzinfo is not pytz.utc and\
(tzutc is None or not isinstance(tzinfo, tzutc)):
# set the timezone as a parameter to the property
tzid = tzid_from_dt(dt)
if tzid:
self.params.update({'TZID': tzid})
self.dt = dt
def to_ical(self):
dt = self.dt
if isinstance(dt, datetime):
return vDatetime(dt).to_ical()
elif isinstance(dt, date):
return vDate(dt).to_ical()
elif isinstance(dt, timedelta):
return vDuration(dt).to_ical()
elif isinstance(dt, time):
return vTime(dt).to_ical()
else:
raise ValueError('Unknown date type')
@classmethod
def from_ical(cls, ical, timezone=None):
if isinstance(ical, cls):
return ical.dt
u = ical.upper()
if u.startswith('-P') or u.startswith('P'):
return vDuration.from_ical(ical)
try:
return vDatetime.from_ical(ical, timezone=timezone)
except ValueError:
try:
return vDate.from_ical(ical)
except ValueError:
return vTime.from_ical(ical)
class vDate(object):
"""Render and generates iCalendar date format.
"""
def __init__(self, dt):
if not isinstance(dt, date):
raise ValueError('Value MUST be a date instance')
self.dt = dt
self.params = Parameters(dict(value='DATE'))
def to_ical(self):
s = "%04d%02d%02d" % (self.dt.year, self.dt.month, self.dt.day)
return s.encode('utf-8')
@staticmethod
def from_ical(ical):
try:
timetuple = (
int(ical[:4]), # year
int(ical[4:6]), # month
int(ical[6:8]), # day
)
return date(*timetuple)
except:
raise ValueError('Wrong date format %s' % ical)
class vDatetime(object):
"""Render and generates icalendar datetime format.
vDatetime is timezone aware and uses the pytz library, an implementation of
the Olson database in Python. When a vDatetime object is created from an
ical string, you can pass a valid pytz timezone identifier. When a
vDatetime object is created from a python datetime object, it uses the
tzinfo component, if present. Otherwise an timezone-naive object is
created. Be aware that there are certain limitations with timezone naive
DATE-TIME components in the icalendar standard.
"""
def __init__(self, dt):
self.dt = dt
self.params = Parameters()
def to_ical(self):
dt = self.dt
tzid = tzid_from_dt(dt)
s = "%04d%02d%02dT%02d%02d%02d" % (
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second
)
if tzid == 'UTC':
s += "Z"
elif tzid:
self.params.update({'TZID': tzid})
return s.encode('utf-8')
@staticmethod
def from_ical(ical, timezone=None):
tzinfo = None
if timezone:
try:
tzinfo = pytz.timezone(timezone)
except pytz.UnknownTimeZoneError:
pass
try:
timetuple = (
int(ical[:4]), # year
int(ical[4:6]), # month
int(ical[6:8]), # day
int(ical[9:11]), # hour
int(ical[11:13]), # minute
int(ical[13:15]), # second
)
if tzinfo:
return tzinfo.localize(datetime(*timetuple))
elif not ical[15:]:
return datetime(*timetuple)
elif ical[15:16] == 'Z':
return pytz.utc.localize(datetime(*timetuple))
else:
raise ValueError(ical)
except:
raise ValueError('Wrong datetime format: %s' % ical)
class vDuration(object):
"""Subclass of timedelta that renders itself in the iCalendar DURATION
format.
"""
def __init__(self, td):
if not isinstance(td, timedelta):
raise ValueError('Value MUST be a timedelta instance')
self.td = td
self.params = Parameters()
def to_ical(self):
sign = ""
if self.td.days < 0:
sign = "-"
self.td = -self.td
timepart = ""
if self.td.seconds:
timepart = "T"
hours = self.td.seconds // 3600
minutes = self.td.seconds % 3600 // 60
seconds = self.td.seconds % 60
if hours:
timepart += "%dH" % hours
if minutes or (hours and seconds):
timepart += "%dM" % minutes
if seconds:
timepart += "%dS" % seconds
if self.td.days == 0 and timepart:
return (compat.unicode_type(sign).encode('utf-8') + b'P' +
compat.unicode_type(timepart).encode('utf-8'))
else:
return (compat.unicode_type(sign).encode('utf-8') + b'P' +
compat.unicode_type(abs(self.td.days)).encode('utf-8') +
b'D' + compat.unicode_type(timepart).encode('utf-8'))
@staticmethod
def from_ical(ical):
try:
match = DURATION_REGEX.match(ical)
sign, weeks, days, hours, minutes, seconds = match.groups()
if weeks:
value = timedelta(weeks=int(weeks))
else:
value = timedelta(days=int(days or 0),
hours=int(hours or 0),
minutes=int(minutes or 0),
seconds=int(seconds or 0))
if sign == '-':
value = -value
return value
except:
raise ValueError('Invalid iCalendar duration: %s' % ical)
class vPeriod(object):
"""A precise period of time.
"""
def __init__(self, per):
start, end_or_duration = per
if not (isinstance(start, datetime) or isinstance(start, date)):
raise ValueError('Start value MUST be a datetime or date instance')
if not (isinstance(end_or_duration, datetime) or
isinstance(end_or_duration, date) or
isinstance(end_or_duration, timedelta)):
raise ValueError('end_or_duration MUST be a datetime, '
'date or timedelta instance')
by_duration = 0
if isinstance(end_or_duration, timedelta):
by_duration = 1
duration = end_or_duration
end = start + duration
else:
end = end_or_duration
duration = end - start
if start > end:
raise ValueError("Start time is greater than end time")
self.params = Parameters()
# set the timezone identifier
# does not support different timezones for start and end
tzid = tzid_from_dt(start)
if tzid:
self.params['TZID'] = tzid
self.start = start
self.end = end
self.by_duration = by_duration
self.duration = duration
def __cmp__(self, other):
if not isinstance(other, vPeriod):
raise NotImplementedError('Cannot compare vPeriod with %r' % other)
return cmp((self.start, self.end), (other.start, other.end))
def overlaps(self, other):
if self.start > other.start:
return other.overlaps(self)
if self.start <= other.start < self.end:
return True
return False
def to_ical(self):
if self.by_duration:
return (vDatetime(self.start).to_ical() + b'/' +
vDuration(self.duration).to_ical())
return (vDatetime(self.start).to_ical() + b'/' +
vDatetime(self.end).to_ical())
@staticmethod
def from_ical(ical):
try:
start, end_or_duration = ical.split('/')
start = vDDDTypes.from_ical(start)
end_or_duration = vDDDTypes.from_ical(end_or_duration)
return (start, end_or_duration)
except:
raise ValueError('Expected period format, got: %s' % ical)
def __repr__(self):
if self.by_duration:
p = (self.start, self.duration)
else:
p = (self.start, self.end)
return 'vPeriod(%r)' % p
class vWeekday(compat.unicode_type):
"""This returns an unquoted weekday abbrevation.
"""
week_days = CaselessDict({
"SU": 0, "MO": 1, "TU": 2, "WE": 3, "TH": 4, "FR": 5, "SA": 6,
})
def __new__(cls, value, encoding=DEFAULT_ENCODING):
value = to_unicode(value, encoding=encoding)
self = super(vWeekday, cls).__new__(cls, value)
match = WEEKDAY_RULE.match(self)
if match is None:
raise ValueError('Expected weekday abbrevation, got: %s' % self)
match = match.groupdict()
sign = match['signal']
weekday = match['weekday']
relative = match['relative']
if not weekday in vWeekday.week_days or sign not in '+-':
raise ValueError('Expected weekday abbrevation, got: %s' % self)
self.relative = relative and int(relative) or None
self.params = Parameters()
return self
def to_ical(self):
return self.encode(DEFAULT_ENCODING).upper()
@classmethod
def from_ical(cls, ical):
try:
return cls(ical.upper())
except:
raise ValueError('Expected weekday abbrevation, got: %s' % ical)
class vFrequency(compat.unicode_type):
"""A simple class that catches illegal values.
"""
frequencies = CaselessDict({
"SECONDLY": "SECONDLY",
"MINUTELY": "MINUTELY",
"HOURLY": "HOURLY",
"DAILY": "DAILY",
"WEEKLY": "WEEKLY",
"MONTHLY": "MONTHLY",
"YEARLY": "YEARLY",
})
def __new__(cls, value, encoding=DEFAULT_ENCODING):
value = to_unicode(value, encoding=encoding)
self = super(vFrequency, cls).__new__(cls, value)
if not self in vFrequency.frequencies:
raise ValueError('Expected frequency, got: %s' % self)
self.params = Parameters()
return self
def to_ical(self):
return self.encode(DEFAULT_ENCODING).upper()
@classmethod
def from_ical(cls, ical):
try:
return cls(ical.upper())
except:
raise ValueError('Expected frequency, got: %s' % ical)
class vRecur(CaselessDict):
"""Recurrence definition.
"""
frequencies = ["SECONDLY", "MINUTELY", "HOURLY", "DAILY", "WEEKLY",
"MONTHLY", "YEARLY"]
# Mac iCal ignores RRULEs where FREQ is not the first rule part.
# Sorts parts according to the order listed in RFC 5545, section 3.3.10.
canonical_order = ("FREQ", "UNTIL", "COUNT", "INTERVAL",
"BYSECOND", "BYMINUTE", "BYHOUR", "BYDAY",
"BYMONTHDAY", "BYYEARDAY", "BYWEEKNO", "BYMONTH",
"BYSETPOS", "WKST")
types = CaselessDict({
'COUNT': vInt,
'INTERVAL': vInt,
'BYSECOND': vInt,
'BYMINUTE': vInt,
'BYHOUR': vInt,
'BYMONTHDAY': vInt,
'BYYEARDAY': vInt,
'BYMONTH': vInt,
'UNTIL': vDDDTypes,
'BYSETPOS': vInt,
'WKST': vWeekday,
'BYDAY': vWeekday,
'FREQ': vFrequency,
})
def __init__(self, *args, **kwargs):
super(vRecur, self).__init__(*args, **kwargs)
self.params = Parameters()
def to_ical(self):
result = []
for key, vals in self.sorted_items():
typ = self.types[key]
if not isinstance(vals, SEQUENCE_TYPES):
vals = [vals]
vals = b','.join(typ(val).to_ical() for val in vals)
# CaselessDict keys are always unicode
key = key.encode(DEFAULT_ENCODING)
result.append(key + b'=' + vals)
return b';'.join(result)
@classmethod
def parse_type(cls, key, values):
# integers
parser = cls.types.get(key, vText)
return [parser.from_ical(v) for v in values.split(',')]
@classmethod
def from_ical(cls, ical):
if isinstance(ical, cls):
return ical
try:
recur = cls()
for pairs in ical.split(';'):
key, vals = pairs.split('=')
recur[key] = cls.parse_type(key, vals)
return dict(recur)
except:
raise ValueError('Error in recurrence rule: %s' % ical)
class vText(compat.unicode_type):
"""Simple text.
"""
def __new__(cls, value, encoding=DEFAULT_ENCODING):
value = to_unicode(value, encoding=encoding)
self = super(vText, cls).__new__(cls, value)
self.encoding = encoding
self.params = Parameters()
return self
def __repr__(self):
return "vText('%s')" % self.to_ical()
def to_ical(self):
return escape_char(self).encode(self.encoding)
@classmethod
def from_ical(cls, ical):
ical_unesc = unescape_char(ical)
return cls(ical_unesc)
class vTime(object):
"""Render and generates iCalendar time format.
"""
def __init__(self, *args):
if len(args) == 1:
if not isinstance(args[0], (time, datetime)):
raise ValueError('Expected a datetime.time, got: %s' % args[0])
self.dt = args[0]
else:
self.dt = time(*args)
self.params = Parameters(dict(value='TIME'))
def to_ical(self):
return self.dt.strftime("%H%M%S")
@staticmethod
def from_ical(ical):
# TODO: timezone support
try:
timetuple = (int(ical[:2]), int(ical[2:4]), int(ical[4:6]))
return time(*timetuple)
except:
raise ValueError('Expected time, got: %s' % ical)
class vUri(compat.unicode_type):
"""Uniform resource identifier is basically just an unquoted string.
"""
def __new__(cls, value, encoding=DEFAULT_ENCODING):
value = to_unicode(value, encoding=encoding)
self = super(vUri, cls).__new__(cls, value)
self.params = Parameters()
return self
def to_ical(self):
return self.encode(DEFAULT_ENCODING)
@classmethod
def from_ical(cls, ical):
try:
return cls(ical)
except:
raise ValueError('Expected , got: %s' % ical)
class vGeo(object):
"""A special type that is only indirectly defined in the rfc.
"""
def __init__(self, geo):
try:
latitude, longitude = (geo[0], geo[1])
latitude = float(latitude)
longitude = float(longitude)
except:
raise ValueError('Input must be (float, float) for '
'latitude and longitude')
self.latitude = latitude
self.longitude = longitude
self.params = Parameters()
def to_ical(self):
return '%s;%s' % (self.latitude, self.longitude)
@staticmethod
def from_ical(ical):
try:
latitude, longitude = ical.split(';')
return (float(latitude), float(longitude))
except:
raise ValueError("Expected 'float;float' , got: %s" % ical)
class vUTCOffset(object):
"""Renders itself as a utc offset.
"""
def __init__(self, td):
if not isinstance(td, timedelta):
raise ValueError('Offset value MUST be a timedelta instance')
self.td = td
self.params = Parameters()
def to_ical(self):
if self.td < timedelta(0):
sign = '-%s'
td = timedelta(0) - self.td # get timedelta relative to 0
else:
# Google Calendar rejects '0000' but accepts '+0000'
sign = '+%s'
td = self.td
days, seconds = td.days, td.seconds
hours = abs(days * 24 + seconds // 3600)
minutes = abs((seconds % 3600) // 60)
seconds = abs(seconds % 60)
if seconds:
duration = '%02i%02i%02i' % (hours, minutes, seconds)
else:
duration = '%02i%02i' % (hours, minutes)
return sign % duration
@classmethod
def from_ical(cls, ical):
if isinstance(ical, cls):
return ical.td
try:
sign, hours, minutes, seconds = (ical[0:1],
int(ical[1:3]),
int(ical[3:5]),
int(ical[5:7] or 0))
offset = timedelta(hours=hours, minutes=minutes, seconds=seconds)
except:
raise ValueError('Expected utc offset, got: %s' % ical)
if offset >= timedelta(hours=24):
raise ValueError(
'Offset must be less than 24 hours, was %s' % ical)
if sign == '-':
return -offset
return offset
class vInline(compat.unicode_type):
"""This is an especially dumb class that just holds raw unparsed text and
has parameters. Conversion of inline values are handled by the Component
class, so no further processing is needed.
"""
def __new__(cls, value, encoding=DEFAULT_ENCODING):
value = to_unicode(value, encoding=encoding)
self = super(vInline, cls).__new__(cls, value)
self.params = Parameters()
return self
def to_ical(self):
return self.encode(DEFAULT_ENCODING)
@classmethod
def from_ical(cls, ical):
return cls(ical)
class TypesFactory(CaselessDict):
"""All Value types defined in rfc 2445 are registered in this factory
class.
The value and parameter names don't overlap. So one factory is enough for
both kinds.
"""
def __init__(self, *args, **kwargs):
"Set keys to upper for initial dict"
super(TypesFactory, self).__init__(*args, **kwargs)
self.all_types = (
vBinary,
vBoolean,
vCalAddress,
vDDDLists,
vDDDTypes,
vDate,
vDatetime,
vDuration,
vFloat,
vFrequency,
vGeo,
vInline,
vInt,
vPeriod,
vRecur,
vText,
vTime,
vUTCOffset,
vUri,
vWeekday
)
self['binary'] = vBinary
self['boolean'] = vBoolean
self['cal-address'] = vCalAddress
self['date'] = vDDDTypes
self['date-time'] = vDDDTypes
self['duration'] = vDDDTypes
self['float'] = vFloat
self['integer'] = vInt
self['period'] = vPeriod
self['recur'] = vRecur
self['text'] = vText
self['time'] = vTime
self['uri'] = vUri
self['utc-offset'] = vUTCOffset
self['geo'] = vGeo
self['inline'] = vInline
self['date-time-list'] = vDDDLists
#################################################
# Property types
# These are the default types
types_map = CaselessDict({
####################################
# Property value types
# Calendar Properties
'calscale': 'text',
'method': 'text',
'prodid': 'text',
'version': 'text',
# Descriptive Component Properties
'attach': 'uri',
'categories': 'text',
'class': 'text',
'comment': 'text',
'description': 'text',
'geo': 'geo',
'location': 'text',
'percent-complete': 'integer',
'priority': 'integer',
'resources': 'text',
'status': 'text',
'summary': 'text',
# Date and Time Component Properties
'completed': 'date-time',
'dtend': 'date-time',
'due': 'date-time',
'dtstart': 'date-time',
'duration': 'duration',
'freebusy': 'period',
'transp': 'text',
# Time Zone Component Properties
'tzid': 'text',
'tzname': 'text',
'tzoffsetfrom': 'utc-offset',
'tzoffsetto': 'utc-offset',
'tzurl': 'uri',
# Relationship Component Properties
'attendee': 'cal-address',
'contact': 'text',
'organizer': 'cal-address',
'recurrence-id': 'date-time',
'related-to': 'text',
'url': 'uri',
'uid': 'text',
# Recurrence Component Properties
'exdate': 'date-time-list',
'exrule': 'recur',
'rdate': 'date-time-list',
'rrule': 'recur',
# Alarm Component Properties
'action': 'text',
'repeat': 'integer',
'trigger': 'duration',
# Change Management Component Properties
'created': 'date-time',
'dtstamp': 'date-time',
'last-modified': 'date-time',
'sequence': 'integer',
# Miscellaneous Component Properties
'request-status': 'text',
####################################
# parameter types (luckily there is no name overlap)
'altrep': 'uri',
'cn': 'text',
'cutype': 'text',
'delegated-from': 'cal-address',
'delegated-to': 'cal-address',
'dir': 'uri',
'encoding': 'text',
'fmttype': 'text',
'fbtype': 'text',
'language': 'text',
'member': 'cal-address',
'partstat': 'text',
'range': 'text',
'related': 'text',
'reltype': 'text',
'role': 'text',
'rsvp': 'boolean',
'sent-by': 'cal-address',
'tzid': 'text',
'value': 'text',
})
def for_property(self, name):
"""Returns a the default type for a property or parameter
"""
return self[self.types_map.get(name, 'text')]
def to_ical(self, name, value):
"""Encodes a named value from a primitive python type to an icalendar
encoded string.
"""
type_class = self.for_property(name)
return type_class(value).to_ical()
def from_ical(self, name, value):
"""Decodes a named property or parameter value from an icalendar
encoded string to a primitive python type.
"""
type_class = self.for_property(name)
decoded = type_class.from_ical(value)
return decoded
|
untitaker/icalendar
|
src/icalendar/prop.py
|
Python
|
bsd-2-clause
| 30,687
|
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('webapp',
url(r'^/?$', 'views.home', name='home'),
url(r'^auth_redirect$', 'views.auth_redirect', name='auth_redirect'),
url(r'^nights$', 'views.night_index', name='night_index'),
url(r'^song$', 'views.song_index', name='song_index'),
url(r'^create_song$', 'views.song_create', name='song_create'),
url(r'^song/(?P<key>[\w\d]+)$', 'views.song', name='song'),
url(r'^song/(?P<key>[\w\d]+).mp3$', 'views.song_mp3', name='song_mp3'),
url(r'^song/(?P<key>[\w\d]+)/edit$', 'views.song_edit', name='song_edit'),
url(r'^song/(?P<key>[\w\d]+)/wait$', 'views.song_wait_finished', name='song_wait_finished'),
url(r'^sign_out$', 'views.sign_out', name='sign_out'),
)
|
beddit/sleep-musicalization-web
|
webapp/urls.py
|
Python
|
bsd-2-clause
| 806
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from morphforgecontrib.simulation.synapse_templates.exponential_form.expsyn.core import PostSynapticMech_ExpSyn_Base as ExpSynTemplateType
from morphforgecontrib.simulation.synapse_templates.exponential_form.exp2syn.core import PostSynapticMech_Exp2Syn_Base as Exp2SynTemplateType
from morphforgecontrib.simulation.synapse_templates.exponential_form.exp2synnmda.core import PostSynapticMech_Exp2SynNMDA_Base as Exp2NMDASynTemplateType
|
mikehulluk/morphforge
|
src/morphforgecontrib/fake_namespaces/postsynaptictypes.py
|
Python
|
bsd-2-clause
| 1,976
|
# coding: utf-8
'''
This script reads data from various sources to process and store in MongoDB.
'''
import pyexcel
import logging
import models
from transform_date import *
from accent_remover import *
logging.basicConfig(filename='logs/scieloci.info.txt', level=logging.INFO)
logger = logging.getLogger(__name__)
# Add SciELO CI indicators for journals
def scieloci(filename):
sheet = pyexcel.get_sheet(
file_name=filename,
sheet_name='import',
name_columns_by_row=0)
sheet_json = sheet.to_records()
for rec in sheet_json:
# # remove empty keys
# rec = {k: v for k, v in rec.items() if v or v == 0}
query = models.Scielo.objects.filter(issn_list=rec['issn_scielo'])
if len(query) == 1:
print(query[0]['issn_scielo'])
doc = query[0]
data = {'scieloci': {}}
if 'scieloci' in doc:
data['scieloci'] = doc['scieloci']
data['scieloci'].update(dict(rec))
else:
print('não encontrou: ' + str(rec['issn_scielo']))
if data:
doc.modify(**data)
def main():
# SciELO docs counts Network xlsx
# scieloci('data/scielo/td_wos_all_downloads.xlsx')
# scieloci('data/wos/td_wos_all.xlsx')
scieloci('data/wos/td_wos_scieloci_2017_2018.xlsx')
if __name__ == "__main__":
main()
|
scieloorg/journals-catalog
|
jcatalog/transform/scielo_wos_scieloci_update.py
|
Python
|
bsd-2-clause
| 1,379
|
# -*- coding: UTF-8 -*-
import datetime
import os
import os.path
import subprocess
from collections import defaultdict
from django.conf import settings as dsettings
from django.core import exceptions
from django.core.cache import cache
from django.db import connection
from django.db import models
from django.db import transaction
from django.db.models.query import QuerySet
from django.db.models.signals import post_save
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext as _
from django_urls import UrlMixin
import tagging
from tagging.fields import TagField
import conference
import conference.gmap
from conference import settings
from conference import signals
from taggit.models import TagBase, GenericTaggedItemBase, ItemBase
from taggit.managers import TaggableManager
import inspect
import traceback
import logging
log = logging.getLogger('conference.tags')
# ConferenceTag e ConferenceTaggedItem servono per creare un "namespace" per i
# tag relativi a conference. In questo modo non devo preocuparmi di altri
# utilizzi di taggit fatti da altre app.
class ConferenceTagManager(models.Manager):
def get_query_set(self):
return self._QuerySet(self.model)
def __getattr__(self, name):
return getattr(self.all(), name)
class _QuerySet(QuerySet):
def annotate_with_usage(self):
return self\
.annotate(usage=models.Count('conference_conferencetaggeditem_items'))
def order_by_usage(self, asc=False):
key = 'usage' if asc else '-usage'
return self.annotate_with_usage().order_by(key)
class ConferenceTag(TagBase):
objects = ConferenceTagManager()
category = models.CharField(max_length=50, default='', blank=True)
def save(self, **kw):
if not self.pk:
frame = inspect.currentframe()
stack_trace = traceback.format_stack(frame)
log.debug(u'saving new tag {}'.format(self.name))
log.debug(u''.join(stack_trace[:-1]))
# prima di salvare questo tag mi assicuro che non ne esista un
# altro diverso solo per maiuscole/minuscole
try:
c = ConferenceTag.objects.get(name__iexact=self.name)
except ConferenceTag.DoesNotExist:
pass
else:
self.pk = c.pk
return
return super(ConferenceTag, self).save(**kw)
class ConferenceTaggedItem(GenericTaggedItemBase, ItemBase):
tag = models.ForeignKey(ConferenceTag, related_name="%(app_label)s_%(class)s_items")
class Meta:
verbose_name = _("Tagged Item")
verbose_name_plural = _("Tagged Items")
class ConferenceManager(models.Manager):
def current(self):
key = 'CONFERENCE_CURRENT'
data = cache.get(key)
if data is None:
data = self.get(code=settings.CONFERENCE)
# mantengo in cache abbastanza a lungo perchè la query non sia più
# un problema
cache.set(key, data, 60*60*24*7)
return data
@classmethod
def clear_cache(cls, sender, **kwargs):
cache.delete('CONFERENCE_CURRENT')
class Conference(models.Model):
code = models.CharField(max_length=10, primary_key=True)
name = models.CharField(max_length=100)
cfp_start = models.DateField(null=True, blank=True)
cfp_end = models.DateField(null=True, blank=True)
conference_start = models.DateField(null=True, blank=True)
conference_end = models.DateField(null=True, blank=True)
voting_start = models.DateField(null=True, blank=True)
voting_end = models.DateField(null=True, blank=True)
objects = ConferenceManager()
def __unicode__(self):
return self.code
def days(self):
output = []
if self.conference_start and self.conference_end:
d = self.conference_start
step = datetime.timedelta(days=1)
while d<= self.conference_end:
output.append(d)
d += step
return output
def clean(self):
if self.conference_start and self.conference_end:
if self.conference_start > self.conference_end:
raise exceptions.ValidationError('Conference end must be > of conference start')
if self.cfp_start and self.cfp_end:
if self.cfp_start > self.cfp_end:
raise exceptions.ValidationError('Cfp end must be > of cfp start')
if self.voting_start and self.voting_end:
if self.voting_start > self.voting_end:
raise exceptions.ValidationError('Voting end must be > of voting start')
def cfp(self):
today = datetime.date.today()
try:
return self.cfp_start <= today <= self.cfp_end
except TypeError:
# date non impostate
return False
def voting(self):
today = datetime.date.today()
try:
return self.voting_start <= today <= self.voting_end
except TypeError:
# date non impostate
return False
def conference(self):
today = datetime.date.today()
try:
return self.conference_start <= today <= self.conference_end
except TypeError:
raise
# date non impostate
return False
post_save.connect(ConferenceManager.clear_cache, sender=Conference)
class DeadlineManager(models.Manager):
def valid_news(self):
today = datetime.date.today()
return self.all().filter(date__gte = today)
class Deadline(models.Model):
"""
deadline per il pycon
"""
date = models.DateField()
objects = DeadlineManager()
def __unicode__(self):
return "deadline: %s" % (self.date, )
class Meta:
ordering = ['date']
def isExpired(self):
today = datetime.date.today()
return today > self.date
def content(self, lang, fallback=True):
"""
Ritorna il DeadlineContent nella lingua specificata. Se il
DeadlineContent non esiste e fallback è False viene sollevata
l'eccezione ObjectDoesNotExist. Se fallback è True viene ritornato il
primo DeadlineContent disponibile.
"""
contents = dict((c.language, c) for c in self.deadlinecontent_set.exclude(body=''))
if not contents:
raise DeadlineContent.DoesNotExist()
try:
return contents[lang]
except KeyError:
if not fallback:
raise DeadlineContent.DoesNotExist()
return contents.values()[0]
class DeadlineContent(models.Model):
"""
Testo, multilingua, di una deadline
"""
deadline = models.ForeignKey(Deadline)
language = models.CharField(max_length=3)
headline = models.CharField(max_length=200)
body = models.TextField()
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
class MultilingualContentManager(models.Manager):
def setContent(self, object, content, language, body):
if language is None:
language = dsettings.LANGUAGE_CODE.split('-', 1)[0]
object_type = ContentType.objects.get_for_model(object)
try:
mc = self.get(content_type=object_type, object_id=object.pk, content=content, language=language)
except MultilingualContent.DoesNotExist:
mc = MultilingualContent(content_object=object)
mc.content = content
mc.language = language
mc.body = body
mc.save()
def getContent(self, object, content, language):
if language is None:
language = dsettings.LANGUAGE_CODE.split('-', 1)[0]
object_type = ContentType.objects.get_for_model(object)
records = dict(
(x.language, x)
for x in self.exclude(body='').filter(content_type=object_type, object_id=object.pk, content=content)
)
try:
return records[language]
except KeyError:
if not records:
return None
else:
return records.get(dsettings.LANGUAGE_CODE, records.values()[0])
class MultilingualContent(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField(db_index=True)
content_object = generic.GenericForeignKey('content_type', 'object_id')
language = models.CharField(max_length = 3)
content = models.CharField(max_length = 20)
body = models.TextField()
objects = MultilingualContentManager()
def _fs_upload_to(subdir, attr=None, package='conference'):
if attr is None:
attr = lambda i: i.slug
def wrapper(instance, filename):
fpath = os.path.join(package, subdir, '%s%s' % (attr(instance), os.path.splitext(filename)[1].lower()))
ipath = os.path.join(dsettings.MEDIA_ROOT, fpath)
if os.path.exists(ipath):
os.unlink(ipath)
return fpath
return wrapper
def postSaveResizeImageHandler(sender, **kwargs):
tool = os.path.join(os.path.dirname(conference.__file__), 'utils', 'resize_image.py')
null = open('/dev/null')
p = subprocess.Popen(
[tool, settings.STUFF_DIR],
close_fds=True, stdin=null, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p.communicate()
class AttendeeProfileManager(models.Manager):
def findSlugForUser(self, user):
name = '%s %s' % (user.first_name, user.last_name)
slug = slugify(name)
rows = self.filter(models.Q(slug=slug) | models.Q(slug__startswith=slug + '-'))\
.values_list('slug', flat=True)
last = None
for r in rows:
try:
counter = int(r.rsplit('-', 1)[1])
except (ValueError, IndexError):
if last is None:
# l'if mi tutela da slug del tipo "str-str-str"
last = 0
continue
if counter > last:
last = counter
if last is not None:
slug = '%s-%d' % (slug, last+1)
elif not slug:
# slug può essere una stringa vuota solo se lo user ha nome e
# cognome vuoti e se è il primo con questa anomalia.
# impostando lo slug a "-1" risolvo la situazione anche per i
# successivi che trovando un precedente continueranno la sequenza
slug = '-1'
return slug
def randomUUID(self, length=6):
import string
import random
return ''.join(random.sample(string.letters + string.digits, length))
# TODO: usare i savepoint. Ricordarsi che, almeno fino a django 1.4, il
# backend sqlite non supporta i savepoint nonostante sqlite lo faccia da
# tempo, quindi si deve passare da cursor.execute(); se mai passeremo a
# postgres ricordarsi di fare rollback del savepoint nell'except (o
# impostare l'autocommit)
def getOrCreateForUser(self, user):
"""
Ritorna o crea il profilo associato all'utente.
"""
try:
p = AttendeeProfile.objects.get(user=user)
except AttendeeProfile.DoesNotExist:
p = AttendeeProfile(user=user)
else:
return p
from django.db import IntegrityError
slug = None
uuid = None
while True:
if slug is None:
slug = self.findSlugForUser(user)
if uuid is None:
uuid = self.randomUUID()
p.slug = slug
p.uuid = uuid
try:
p.save()
except IntegrityError, e:
msg = str(e)
if 'uuid' in msg:
uuid = None
elif 'slug' in msg:
slug = None
else:
raise
else:
break
return p
ATTENDEEPROFILE_VISIBILITY = (
('x', 'Private (disabled)'),
('m', 'Participants only'),
('p', 'Public'),
)
class AttendeeProfile(models.Model):
"""
È il profilo di un partecipante (inclusi gli speaker) alla conferenza, il
collegamento con la persona è ottenuto tramite la foreign key verso
auth.User.
"""
user = models.OneToOneField('auth.User', primary_key=True)
slug = models.SlugField(unique=True)
uuid = models.CharField(max_length=6, unique=True)
image = models.ImageField(upload_to=_fs_upload_to('profile'), blank=True)
birthday = models.DateField(_('Birthday'), null=True, blank=True)
phone = models.CharField(
_('Phone'),
max_length=30, blank=True,
help_text=_('Enter a phone number where we can contact you in case of administrative issues.<br />Use the international format, eg: +39-055-123456'),
)
personal_homepage = models.URLField(_('Personal homepage'), blank=True)
company = models.CharField(_('Company'), max_length=50, blank=True)
company_homepage = models.URLField(_('Company homepage'), blank=True)
job_title = models.CharField(_('Job title'), max_length=50, blank=True)
location = models.CharField(_('Location'), max_length=100, blank=True)
bios = generic.GenericRelation(MultilingualContent)
visibility = models.CharField(max_length=1, choices=ATTENDEEPROFILE_VISIBILITY, default='x')
objects = AttendeeProfileManager()
def __unicode__(self):
return self.slug
def clean(self):
from django.core.exceptions import ValidationError
if self.visibility != 'p':
if TalkSpeaker.objects\
.filter(speaker__user=self.user_id, talk__status='accepted')\
.count()>0:
raise ValidationError('This profile must be public')
def setBio(self, body, language=None):
MultilingualContent.objects.setContent(self, 'bios', language, body)
def getBio(self, language=None):
return MultilingualContent.objects.getContent(self, 'bios', language)
post_save.connect(postSaveResizeImageHandler, sender=AttendeeProfile)
class Presence(models.Model):
"""
Presenza di un partecipante ad una conferenza.
"""
profile = models.ForeignKey(AttendeeProfile, related_name='presences')
conference = models.CharField(max_length=10)
timestamp = models.DateTimeField(auto_now_add=True)
class Meta:
unique_together = (('profile', 'conference'),)
class AttendeeLinkManager(models.Manager):
def findLinks(self, uid):
return AttendeeLink.objects.filter(
models.Q(attendee1=uid) |
models.Q(attendee2=uid))
def getLink(self, uid1, uid2):
return AttendeeLink.objects.get(
models.Q(attendee1=uid1, attendee2=uid2) |
models.Q(attendee1=uid2, attendee2=uid1))
class AttendeeLink(models.Model):
"""
Collegamento tra due partecipanti
"""
attendee1 = models.ForeignKey(AttendeeProfile, related_name='link1')
attendee2 = models.ForeignKey(AttendeeProfile, related_name='link2')
message = models.TextField(blank=True)
timestamp = models.DateTimeField(auto_now_add=True)
objects = AttendeeLinkManager()
class SpeakerManager(models.Manager):
def byConference(self, conf, only_accepted=True, talk_type=None):
"""
Ritorna tutti gli speaker della conferenza
"""
qs = TalkSpeaker.objects\
.filter(talk__conference=conf)\
.values('speaker')
if only_accepted:
qs = qs.filter(talk__status='accepted')
if talk_type:
if isinstance(talk_type, (list, tuple)):
qs = qs.filter(talk__type__in=talk_type)
else:
qs = qs.filter(talk__type=talk_type)
return Speaker.objects.filter(user__in=qs)
class Speaker(models.Model, UrlMixin):
user = models.OneToOneField('auth.User', primary_key=True)
objects = SpeakerManager()
def __unicode__(self):
return '%s %s' % (self.user.first_name, self.user.last_name)
def talks(self, conference=None, include_secondary=True, status=None):
"""
Restituisce i talk dello speaker filtrandoli per conferenza (se non
None); se include_secondary è True vengono inclusi anche i talk dove
non è lo speaker principale. Se status è diverso da None vengono
ritornati solo i talk con lo stato richiesto.
"""
qs = TalkSpeaker.objects.filter(speaker=self)
if status in ('proposed', 'accepted', 'canceled'):
qs = qs.filter(talk__status=status)
elif status is not None:
raise ValueError('status unknown')
if not include_secondary:
qs = qs.filter(helper=False)
if conference is not None:
qs = qs.filter(talk__conference=conference)
return Talk.objects.filter(id__in=qs.values('talk'))
TALK_LANGUAGES = dsettings.LANGUAGES
TALK_STATUS = (
('proposed', _('Proposed')),
('accepted', _('Accepted')),
('canceled', _('Canceled')),
)
VIDEO_TYPE = (
('viddler_oembed', 'oEmbed (Youtube, Vimeo, ...)'),
('download', 'Download'),
)
TALK_LEVEL = (
('beginner', _('Beginner')),
('intermediate', _('Intermediate')),
('advanced', _('Advanced')),
)
class TalkManager(models.Manager):
def get_query_set(self):
return self._QuerySet(self.model)
def __getattr__(self, name):
return getattr(self.all(), name)
class _QuerySet(QuerySet):
def proposed(self, conference=None):
qs = self.filter(status='proposed')
if conference:
qs = qs.filter(conference=conference)
return qs
def accepted(self, conference=None):
qs = self.filter(status='accepted')
if conference:
qs = qs.filter(conference=conference)
return qs
def canceled(self, conference=None):
qs = self.filter(status='canceled')
if conference:
qs = qs.filter(conference=conference)
return qs
def createFromTitle(self, title, sub_title, conference, speaker, prerequisites, abstract_short, abstract_extra,
status='proposed', language='en', level='beginner', training_available=False, type='t_30'):
slug = slugify(title)
talk = Talk()
talk.title = title
talk.sub_title = sub_title
talk.prerequisites = prerequisites
talk.abstract_short = abstract_short
talk.conference = conference
talk.status = status
talk.language = language
talk.level = level
talk.abstract_extra = abstract_extra
talk.training_available = training_available
talk.type = type
with transaction.commit_on_success():
count = 0
check = slug
while True:
if self.filter(slug=check).count() == 0:
break
count += 1
check = '%s-%d' % (slug, count)
talk.slug = check
talk.save()
# associo qui lo speaker così se c'è qualche problema, ad esempio
# lo speaker non è valido, il tutto avviene in una transazione ed
# il db rimane pulito.
TalkSpeaker(talk=talk, speaker=speaker).save()
return talk
# Previous definition of TALK_TYPE, kept around, since some of the
# code in the system uses the codes to checks.
#
# TALK_TYPE = (
# ('t', 'Talk'),
# ('i', 'Interactive'),
# ('r', 'Training'),
# ('p', 'Poster session'),
# ('n', 'Panel'),
# ('h', 'Help desk'),
# )
# Talk types combined with duration. Note that the system uses the
# first character to identify the generic talk type, so these should
# not be changed from the ones listed above.
TALK_TYPE = (
('t_30', 'Talk (30 mins)'),
('t_45', 'Talk (45 mins)'),
('t_60', 'Talk (60 mins)'),
('i_60', 'Interactive (60 mins)'),
('r_60', 'Training (60 mins)'),
('r_120', 'Training (120 mins)'),
('p_180', 'Poster during coffe breaks (30x4 mins)'),
('h_180', 'Help desk'),
)
# Mapping of TALK_TYPE to duration in minutes
TALK_DURATION = {
't_30': 30,
't_45': 45,
't_60': 60,
'i_60': 60,
'r_60': 60,
'r_120': 120,
'p_180': 30,
'h_180': 180,
}
TALK_ADMIN_TYPE = (
('o', 'Opening session'),
('c', 'Closing session'),
('l', 'Lightning talk'),
('k', 'Keynote'),
('r', 'Recruiting session'),
('m', 'EPS session'),
('s', 'Open space'),
('e', 'Social event'),
('x', 'Reserved slot'),
)
class Talk(models.Model, UrlMixin):
title = models.CharField(_('Talk title'), max_length=80)
sub_title = models.CharField(_('Sub title'), max_length=1000, default="", blank=True)
slug = models.SlugField(max_length=100, unique=True)
prerequisites = models.CharField(_('prerequisites'), help_text="What should attendees know already",default="", blank=True, max_length=150)
conference = models.CharField(help_text='name of the conference', max_length=20)
admin_type = models.CharField(max_length=1, choices=TALK_ADMIN_TYPE, blank=True)
speakers = models.ManyToManyField(Speaker, through='TalkSpeaker')
language = models.CharField(_('Language'), max_length=3, choices=TALK_LANGUAGES)
abstracts = generic.GenericRelation(
MultilingualContent,
verbose_name=_('Talk abstract'),
help_text=_('<p>Please enter a short description of the talk you are submitting. Be sure to includes the goals of your talk and any prerequisite required to fully understand it.</p><p>Suggested size: two or three paragraphs.</p>'))
abstract_short = models.TextField(
verbose_name=_('Talk abstract short'),
help_text=_('<p>Please enter a short description of the talk you are submitting.</p>'), default="")
abstract_extra = models.TextField(
verbose_name=_('Talk abstract extra'),
help_text=_('<p>Please enter instructions for attendees.</p>'), default="")
slides = models.FileField(upload_to=_fs_upload_to('slides'), blank=True)
video_type = models.CharField(max_length=30, choices=VIDEO_TYPE, blank=True)
video_url = models.TextField(blank=True)
video_file = models.FileField(upload_to=_fs_upload_to('videos'), blank=True)
teaser_video = models.URLField(
_('Teaser video'),
blank=True,
help_text=_('Insert the url for your teaser video'))
status = models.CharField(max_length=8, choices=TALK_STATUS)
level = models.CharField(
_('Audience level'),
default='beginner',
max_length=12,
choices=TALK_LEVEL)
training_available = models.BooleanField(default=False)
type = models.CharField(max_length=5, choices=TALK_TYPE, default='t_30')
#def _talk_duration(self):
# "Returns talk duration"
# duration = self.type
# return int(duration.split("_")[1])
#duration = property(_talk_duration)
# Old duration code
# durata totale del talk (include la sessione di Q&A)T
duration = models.IntegerField(
_('Duration'),
default=0,
help_text=_('This is the duration of the talk'))
# durata della sessione di Q&A
# Questi sono i tag che lo speaker suggerisce per il proprio talk, li ho
# messi qui per una questione di tempo (il cfp di BSW2011 incombe) ma la
# cosa giusta sarebbe creare un nuovo modello "Submission" legato al Talk e
# mettere li i dati del cfp
suggested_tags = models.CharField(max_length=100, blank=True)
created = models.DateTimeField(auto_now_add=True)
tags = TaggableManager(through=ConferenceTaggedItem)
objects = TalkManager()
class Meta:
ordering = ['title']
def save(self, *args, **kwargs):
# the duration is taken directly from talk's type
self.duration = 30 #TALK_DURATION[self.type]
super(Talk, self).save(*args, **kwargs)
def __unicode__(self):
return '%s [%s][%s][%s]' % (self.title, self.conference, self.language, self.duration)
@models.permalink
def get_absolute_url(self):
return ('conference-talk', (), { 'slug': self.slug })
get_url_path = get_absolute_url
def get_event(self):
try:
return self.event_set.all()[0]
except IndexError:
return None
def get_all_speakers(self):
return self.speakers.all().select_related('speaker')
def setAbstract(self, body, language=None):
MultilingualContent.objects.setContent(self, 'abstracts', language, body)
def getAbstract(self, language=None):
return MultilingualContent.objects.getContent(self, 'abstracts', language)
class TalkSpeaker(models.Model):
talk = models.ForeignKey(Talk)
speaker = models.ForeignKey(Speaker)
helper = models.BooleanField(default=False)
class Meta:
unique_together = (('talk', 'speaker'),)
class FareManager(models.Manager):
def get_query_set(self):
return self._QuerySet(self.model)
def __getattr__(self, name):
return getattr(self.all(), name)
class _QuerySet(QuerySet):
def available(self, conference=None):
today = datetime.date.today()
q1 = models.Q(start_validity=None, end_validity=None)
q2 = models.Q(start_validity__lte=today, end_validity__gte=today)
qs = self.filter(q1 | q2)
if conference:
qs = qs.filter(conference=conference)
return qs
FARE_TICKET_TYPES = (
('conference', 'Conference ticket'),
('partner', 'Partner Program'),
('event', 'Event'),
('other', 'Other'),
)
FARE_PAYMENT_TYPE = (
('p', 'Payment'),
('v', 'Voucher'),
('d', 'Deposit'),
)
FARE_TYPES = (
('c', 'Company'),
('s', 'Student'),
('p', 'Personal'),
)
class Fare(models.Model):
conference = models.CharField(help_text='Conference code', max_length=20)
code = models.CharField(max_length=10)
name = models.CharField(max_length=100)
description = models.TextField()
price = models.DecimalField(max_digits=6, decimal_places=2)
start_validity = models.DateField(null=True)
end_validity = models.DateField(null=True)
recipient_type = models.CharField(max_length=1, choices=FARE_TYPES, default='p')
ticket_type = models.CharField(max_length=10, choices=FARE_TICKET_TYPES, default='conference', db_index=True)
payment_type = models.CharField(max_length=1, choices=FARE_PAYMENT_TYPE, default='p')
blob = models.TextField(blank=True)
objects = FareManager()
def __unicode__(self):
return '%s - %s' % (self.code, self.conference)
class Meta:
unique_together = (('conference', 'code'),)
def valid(self):
#numb = len(list(Ticket.objects.all()))
today = datetime.date.today()
validity = self.start_validity <= today <= self.end_validity
#validity = numb < settings.MAX_TICKETS
return validity
def fare_type(self):
""" Return the fare type based on the .recipient_type
"""
return dict(FARE_TYPES).get(self.recipient_type, 'Regular')
def calculated_price(self, qty=1, **kw):
from conference.listeners import fare_price
params = dict(kw)
params['qty'] = qty
calc = {
'total': self.price * qty,
'params': params,
}
fare_price.send(sender=self, calc=calc)
return calc['total']
def create_tickets(self, user):
""" Creates and returns the tickets associated with this rate.
Normally each fare involves just one ticket, but this
behavior can be modified by a listener attached to the
signal fare_tickets.
The instances returned by this method have an additional
attribute `fare_description` (volatile) and contains a
description of the fare specific for the single ticket.
"""
from conference.listeners import fare_tickets
params = {
'user': user,
'tickets': []
}
fare_tickets.send(sender=self, params=params)
if not params['tickets']:
t = Ticket(user=user, fare=self)
t.fare_description = self.name
t.save()
params['tickets'].append(t)
return params['tickets']
class TicketManager(models.Manager):
def get_query_set(self):
return self._QuerySet(self.model)
def __getattr__(self, name):
return getattr(self.all(), name)
class _QuerySet(QuerySet):
def conference(self, conference):
return self.filter(fare__conference=conference)
TICKET_TYPE = (
('standard', 'standard'),
('staff', 'staff'),
)
class Ticket(models.Model):
user = models.ForeignKey(
'auth.User',
help_text=_('Buyer of the ticket'))
name = models.CharField(
max_length=60,
blank=True,
help_text=_('Attendee name, i.e. the person who will attend the conference.'))
fare = models.ForeignKey(Fare)
frozen = models.BooleanField(default=False)
ticket_type = models.CharField(max_length=8, choices=TICKET_TYPE, default='standard')
objects = TicketManager()
def __unicode__(self):
return 'Ticket "%s" (%s)' % (self.fare.name, self.fare.code)
class Sponsor(models.Model):
"""
Attraverso l'elenco di SponsorIncome un'istanza di Sponsor è collegata
con le informazioni riguardanti tutte le sponsorizzazioni fatte.
Sempre in SponsorIncome la conferenza è indicata, come in altri posti,
con una chiave alfanumerica non collegata a nessuna tabella.
"""
sponsor = models.CharField(max_length=100, help_text='nome dello sponsor')
slug = models.SlugField()
url = models.URLField(blank=True)
logo = models.ImageField(
upload_to=_fs_upload_to('sponsor'), blank=True,
help_text='Inserire un immagine raster sufficientemente grande da poter essere scalata al bisogno'
)
alt_text = models.CharField(max_length=150, blank=True)
title_text = models.CharField(max_length=150, blank=True)
class Meta:
ordering = ['sponsor']
def __unicode__(self):
return self.sponsor
post_save.connect(postSaveResizeImageHandler, sender=Sponsor)
class SponsorIncome(models.Model):
sponsor = models.ForeignKey(Sponsor)
conference = models.CharField(max_length=20)
income = models.PositiveIntegerField()
tags = TagField()
class Meta:
ordering = ['conference']
class MediaPartner(models.Model):
"""
I media partner sono degli sponsor che non pagano ma che offrono visibilità
di qualche tipo.
"""
partner = models.CharField(max_length=100, help_text='nome del media partner')
slug = models.SlugField()
url = models.URLField(blank=True)
logo = models.ImageField(
upload_to=_fs_upload_to('media-partner'), blank = True,
help_text='Inserire un immagine raster sufficientemente grande da poter essere scalata al bisogno'
)
class Meta:
ordering = ['partner']
def __unicode__(self):
return self.partner
post_save.connect(postSaveResizeImageHandler, sender=MediaPartner)
class MediaPartnerConference(models.Model):
partner = models.ForeignKey(MediaPartner)
conference = models.CharField(max_length = 20)
tags = TagField()
class Meta:
ordering = ['conference']
class ScheduleManager(models.Manager):
def attendees(self, conference, forecast=False):
"""
restituisce il numero di partecipanti per ogni schedule della conferenza.
"""
return settings.SCHEDULE_ATTENDEES(conference, forecast)
def events_score_by_attendance(self, conference):
"""
Utilizzandoi gli EventInterest ritorna un "punteggio di presenza" per
ogni evento; Il punteggio è proporzionale al numero di persone che
hanno esspresso interesse in quell'evento.
"""
# Considero una manifestazione di interesse, interest > 0, come la
# volontà di partecipare ad un evento e aggiungo l'utente tra i
# partecipanti. Se l'utente ha "votato" più eventi contemporanei
# considero la sua presenza in proporzione (quindi gli eventi potranno
# avere "punteggio" frazionario)
events = defaultdict(set)
for x in EventInterest.objects\
.filter(event__schedule__conference=conference, interest__gt=0)\
.select_related('event__schedule'):
events[x.event].add(x.user_id)
# Oltre agli EventInterest tengo conto anche degli EventBooking, la
# confidenza in questi casi è ancora maggiore
for x in EventBooking.objects\
.filter(event__schedule__conference=conference)\
.select_related('event__schedule'):
events[x.event].add(x.user_id)
# associo ad ogni evento il numero di voti che ha ottenuto;
# l'operazione è complicata dal fatto che non tutti i voti hanno lo
# stesso peso; se un utente ha marcato come +1 due eventi che avvengano
# in parallelo ovviamente non potrà partecipare ad entrambi, quindi il
# suo voto deve essere scalato
scores = defaultdict(lambda: 0.0)
for evt, users in events.items():
group = list(Event.objects.group_events_by_times(events, event=evt))[0]
while users:
u = users.pop()
# Quanto vale la presenza di `u` per l'evento `evt`? Se
# `u` non partecipa a nessun'altro evento dello stesso
# gruppo allora 1, altrimenti un valore proporzionale al
# numero di eventi che gli interesssano.
found = [ evt ]
for other in group:
if other != evt:
try:
events[other].remove(u)
except KeyError:
pass
else:
found.append(other)
score = 1.0 / len(found)
for f in found:
scores[f.id] += score
return scores
def expected_attendance(self, conference, factor=0.85):
"""
restituisce per ogni evento la previsione di partecipazione basata
sugli EventInterest.
"""
seats_available = defaultdict(lambda: 0)
for row in EventTrack.objects\
.filter(event__schedule__conference=conference)\
.values('event', 'track__seats'):
seats_available[row['event']] += row['track__seats']
scores = self.events_score_by_attendance(conference)
events = Event.objects\
.filter(schedule__conference=conference)\
.select_related('schedule')
output = {}
# adesso devo fare la previsione dei partecipanti per ogni evento, per
# farla divido il punteggio di un evento per il numero di votanti che
# hanno espresso un voto per un evento *nella medesima fascia
# temporale*; il numero che ottengo è un fattore k che se moltiplicato
# per la previsione di presenze al giorno mi da un'indicazione di
# quante persone sono previste per l'evento.
forecasts = self.attendees(conference, forecast=True)
# per calcolare il punteggio relativo ad una fascia temporale devo fare
# un doppio for sugli eventi, per limitare il numero delle iterazioni
# interno raggruppo gli eventi per giorno
event_by_day = defaultdict(set)
for e in events:
event_by_day[e.schedule_id].add(e)
for event in events:
score = scores[event.id]
group = list(Event.objects\
.group_events_by_times(event_by_day[event.schedule_id], event=event))[0]
group_score = sum([ scores[e.id] for e in group ])
if group_score:
k = score / group_score
else:
k = 0
expected = k * forecasts[event.schedule_id] * factor
seats = seats_available.get(event.id, 0)
output[event.id] = {
'score': score,
'seats': seats,
'expected': expected,
'overbook': seats and expected > seats,
}
return output
class Schedule(models.Model):
"""
Direttamente dentro lo schedule abbiamo l'indicazione della conferenza,
una campo alfanumerico libero, e il giorno a cui si riferisce.
Attraverso le ForeignKey lo schedule è collegato alle track e agli
eventi.
Questi ultimi possono essere dei talk o degli eventi "custom", come la
pyBirra, e sono collegati alle track in modalità "weak", attraverso un
tagfield.
"""
conference = models.CharField(help_text = 'nome della conferenza', max_length = 20)
slug = models.SlugField()
date = models.DateField()
description = models.TextField(blank=True)
objects = ScheduleManager()
class Meta:
ordering = ['date']
def __unicode__(self):
return '{0}: {1}'.format(self.conference, self.date)
def speakers(self):
qs = Event.objects\
.filter(schedule=self, talk__id__isnull=False)\
.values('talk__talkspeaker__speaker')
return Speaker.objects.filter(user__in=qs)
class Track(models.Model):
schedule = models.ForeignKey(Schedule)
track = models.CharField('nome track', max_length=20)
title = models.TextField('titolo della track', help_text='HTML supportato')
seats = models.PositiveIntegerField(default=0)
order = models.PositiveIntegerField('ordine', default=0)
translate = models.BooleanField(default=False)
outdoor = models.BooleanField(default=False)
def __unicode__(self):
return self.track
class EventManager(models.Manager):
def group_events_by_times(self, events, event=None):
"""
Raggruppa gli eventi, ovviamente appartenenti a track diverse, che
si "accavvallano" temporalmente.
Rtorna un generatore che ad ogni iterazione restituisce un gruppo(list)
di eventi.
"""
def overlap(range1, range2):
# http://stackoverflow.com/questions/9044084/efficient-data-range-overlap-calculation-in-python
latest_start = max(range1[0], range2[0])
earliest_end = min(range1[1], range2[1])
_overlap = (earliest_end - latest_start)
return _overlap.days == 0 and _overlap.seconds > 0
def extract_group(event, events):
group = []
r0 = event.get_time_range()
for ix in reversed(range(len(events))):
r1 = events[ix].get_time_range()
if r0[0].date() == r1[0].date() and overlap(r0, r1):
group.append(events.pop(ix))
return group
if event:
group = extract_group(event, list(events))
yield group
else:
sorted_events = sorted(
filter(lambda x: x.get_duration() > 0, events),
key=lambda x: x.get_duration())
while sorted_events:
evt0 = sorted_events.pop()
group = [evt0] + extract_group(evt0, sorted_events)
yield group
class Event(models.Model):
schedule = models.ForeignKey(Schedule)
start_time = models.TimeField()
talk = models.ForeignKey(Talk, blank=True, null=True)
custom = models.TextField(
blank=True,
help_text="title for a custom event (an event without a talk)")
abstract = models.TextField(
blank=True,
help_text="description for a custom event")
duration = models.PositiveIntegerField(
default=0,
help_text='duration of the event (in minutes). Override the talk duration if present')
tags = models.CharField(
max_length=200, blank=True,
help_text='comma separated list of tags. Something like: special, break, keynote')
tracks = models.ManyToManyField(Track, through='EventTrack')
sponsor = models.ForeignKey(Sponsor, blank=True, null=True)
video = models.CharField(max_length=1000, blank=True)
bookable = models.BooleanField(default=False)
seats = models.PositiveIntegerField(
default=0,
help_text='seats available. Override the track default if set')
objects = EventManager()
class Meta:
ordering = ['start_time']
def __unicode__(self):
if self.talk:
return '%s - %smin' % (self.talk.title, self.talk.duration)
else:
return self.custom
def get_duration(self):
if self.duration:
return self.duration
elif self.talk:
return self.talk.duration
else:
return 0
def get_time_range(self):
n = datetime.datetime.combine(self.schedule.date, self.start_time)
return (
n, (n + datetime.timedelta(seconds=self.get_duration() * 60))
)
def get_description(self):
if self.talk:
return self.talk.title
else:
return self.custom
def get_all_tracks_names(self):
from tagging.utils import parse_tag_input
return parse_tag_input(self.track)
def get_track(self):
"""
ritorna la prima istanza di track tra quelle specificate o None se l'evento
è di tipo speciale
"""
# XXX: utilizzare il template tag get_event_track che cacha la query
dbtracks = dict( (t.track, t) for t in self.schedule.track_set.all())
for t in tagging.models.Tag.objects.get_for_object(self):
if t.name in dbtracks:
return dbtracks[t.name]
def split(self, time):
"""
Divide l'evento in più eventi della durata massima di `time` minuti.
"""
if self.talk_id and self.duration == 0:
original = self.talk.duration
else:
original = self.duration
if time >= original:
return 0
myid = self.id
tracks = self.tracks.all()
self.duration = time
original -= time
self.save()
count = 1
while original > 0:
self.id = None
dt = datetime.datetime.combine(datetime.date.today(), self.start_time)
dt += datetime.timedelta(minutes=time)
self.start_time = dt.time()
self.save()
for t in tracks:
EventTrack.objects.create(track=t, event=self)
original -= time
count += 1
self.id = myid
return count
class EventTrack(models.Model):
track = models.ForeignKey(Track)
event = models.ForeignKey(Event)
class Meta:
unique_together = (('track', 'event',),)
class EventInterest(models.Model):
event = models.ForeignKey(Event)
user = models.ForeignKey('auth.User')
interest = models.IntegerField()
class Meta:
unique_together = (('user', 'event'),)
class EventBookingManager(models.Manager):
def booking_status(self, eid):
seats = Event.objects.values('seats').get(id=eid)['seats']
if not seats:
seats = sum(EventTrack.objects\
.filter(event=eid)\
.values_list('track__seats', flat=True))
booked = list(EventBooking.objects\
.filter(event=eid)\
.values_list('user', flat=True))
return {
'seats': seats,
'booked': booked,
'available': seats - len(booked),
}
def booking_available(self, eid, uid):
st = self.booking_status(eid)
return (uid in st['booked']) or (st['available'] > 0)
def book_event(self, eid, uid):
try:
e = EventBooking.objects.get(event=eid, user=uid)
except EventBooking.DoesNotExist:
e = EventBooking(event_id=eid, user_id=uid)
e.save()
signals.event_booked.send(sender=Event, booked=True, event_id=eid, user_id=uid)
return e
def cancel_reservation(self, eid, uid):
try:
e = EventBooking.objects.get(event=eid, user=uid)
except EventBooking.DoesNotExist:
return
e.delete()
signals.event_booked.send(sender=Event, booked=False, event_id=eid, user_id=uid)
class EventBooking(models.Model):
event = models.ForeignKey(Event)
user = models.ForeignKey('auth.User')
objects = EventBookingManager()
class Meta:
unique_together = (('user', 'event'),)
class Hotel(models.Model):
"""
Gli hotel permettono di tenere traccia dei posti convenzionati e non dove
trovare alloggio durante la conferenza.
"""
name = models.CharField('nome dell\'hotel', max_length = 100)
telephone = models.CharField('contatti telefonici', max_length = 50, blank = True)
url = models.URLField(blank = True)
email = models.EmailField('email', blank = True)
availability = models.CharField('Disponibilità', max_length = 50, blank = True)
price = models.CharField('Prezzo', max_length = 50, blank = True)
note = models.TextField('note', blank = True)
affiliated = models.BooleanField('convenzionato', default = False)
visible = models.BooleanField('visibile', default = True)
address = models.CharField('indirizzo', max_length = 200, default = '', blank = True)
lng = models.FloatField('longitudine', default = 0.0, blank = True)
lat = models.FloatField('latitudine', default = 0.0, blank = True)
modified = models.DateField(auto_now = True)
class Meta:
ordering = [ 'name' ]
def __unicode__(self):
return self.name
SPECIAL_PLACE_TYPES = (
('conf-hq', 'Conference Site'),
('pyevents', 'PyEvents'),
)
class SpecialPlace(models.Model):
name = models.CharField('nome', max_length = 100)
address = models.CharField('indirizzo', max_length = 200, default = '', blank = True)
type = models.CharField(max_length = 10, choices=SPECIAL_PLACE_TYPES)
url = models.URLField(blank = True)
email = models.EmailField('email', blank = True)
telephone = models.CharField('contatti telefonici', max_length = 50, blank = True)
note = models.TextField('note', blank = True)
visible = models.BooleanField('visibile', default = True)
lng = models.FloatField('longitudine', default = 0.0, blank = True)
lat = models.FloatField('latitudine', default = 0.0, blank = True)
class Meta:
ordering = [ 'name' ]
def __unicode__(self):
return self.name
try:
assert settings.GOOGLE_MAPS['key']
except (KeyError, TypeError, AssertionError):
pass
else:
def postSaveHotelHandler(sender, **kwargs):
query = sender.objects.exclude(address = '').filter(lng = 0.0).filter(lat = 0.0)
for obj in query:
data = conference.gmap.geocode(
obj.address,
settings.GOOGLE_MAPS['key'],
settings.GOOGLE_MAPS.get('country')
)
if data['Status']['code'] == 200:
point = data['Placemark'][0]['Point']['coordinates']
lng, lat = point[0:2]
obj.lng = lng
obj.lat = lat
obj.save()
post_save.connect(postSaveHotelHandler, sender=Hotel)
post_save.connect(postSaveHotelHandler, sender=SpecialPlace)
class DidYouKnow(models.Model):
"""
Lo sai che?
"""
visible = models.BooleanField('visible', default = True)
messages = generic.GenericRelation(MultilingualContent)
class Quote(models.Model):
who = models.CharField(max_length=100)
conference = models.CharField(max_length=20)
text = models.TextField()
activity = models.CharField(max_length=50, blank=True)
image = models.ImageField(upload_to=_fs_upload_to('quote', attr=lambda i: slugify(i.who)), blank=True)
class Meta:
ordering = ['conference', 'who']
class VotoTalk(models.Model):
user = models.ForeignKey('auth.User')
talk = models.ForeignKey(Talk)
vote = models.DecimalField(max_digits=5, decimal_places=2)
class Meta:
unique_together = (('user', 'talk'),)
#
#def _clear_track_cache(sender, **kwargs):
# if hasattr(sender, 'schedule_id'):
# Track.objects.clear_cache(sender.schedule_id)
#post_save.connect(_clear_track_cache, sender=Track)
#
#def _clear_talkspeaker_cache(sender, **kwargs):
# o = kwargs['instance']
# if isinstance(o, Talk):
# conference = o.conference
# else:
# conference = None
# TalkSpeaker.objects.clear_cache(conference)
#post_save.connect(_clear_talkspeaker_cache, sender=Talk)
#post_save.connect(_clear_talkspeaker_cache, sender=Speaker)
#
#def _clear_schedule_cache(sender, **kwargs):
# o = kwargs['instance']
# if isinstance(o, Event):
# conference = o.schedule.conference
# else:
# conference = o.event.schedule.conference
# Schedule.objects.clear_cache(conference)
#post_save.connect(_clear_schedule_cache, sender=Event)
#post_save.connect(_clear_schedule_cache, sender=EventInterest)
from conference import listeners
|
PythonSanSebastian/epcon
|
conference/models.py
|
Python
|
bsd-2-clause
| 49,377
|
# Copyright (c) Peter Parente
# Distributed under the terms of the BSD 2-Clause License.
import os
from flask import Flask
from flask_sslify import SSLify
from .model import db
from .auth import oauth
from .ui import ui_bp
from .api import api_bp
app = Flask(__name__)
app.secret_key = os.getenv('SECRET_KEY', os.urandom(24))
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('SQLALCHEMY_DATABASE_URI',
'sqlite:////tmp/bof.db')
app.config['APP_TITLE'] = os.getenv('APP_TITLE', 'Birds of a Feather')
app.config['GITHUB_CONSUMER_KEY'] = os.getenv('GITHUB_CONSUMER_KEY')
app.config['GITHUB_CONSUMER_SECRET'] = os.getenv('GITHUB_CONSUMER_SECRET')
app.register_blueprint(api_bp)
app.register_blueprint(ui_bp)
db.init_app(app)
oauth.init_app(app)
if 'VCAP_SERVICES' in os.environ:
SSLify(app)
|
parente/bof
|
bof/__init__.py
|
Python
|
bsd-2-clause
| 897
|
#!/usr/bin/env python
import os
import sys
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.append(PROJECT_DIR)
sys.path.append(os.path.abspath(PROJECT_DIR + '/../'))
sys.path.append(os.path.abspath(PROJECT_DIR + '/../realestate/'))
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testproject.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
wm3ndez/realestate
|
testproject/manage.py
|
Python
|
bsd-2-clause
| 464
|
# What should be exported from module
from download_music import run_midi_load
|
MikhailMS/Final_Project
|
download_music/__init__.py
|
Python
|
bsd-2-clause
| 79
|
"""
Module `lino_xl.lib.properties`
-------------------------------
Imagine that we are doing a study about alimentary habits. We observe a
defined series of properties on the people who participate in our study.
Here are the properties that we are going to observe::
>>> weight = properties.INT.create_property(name='weight')
>>> weight.save()
>>> married = properties.BOOL.create_property(name='married')
>>> married.save()
>>> favdish = properties.CHAR.create_property(name='favdish',label='favorite dish')
>>> favdish.save()
>>> favdish.create_value("Cookies").save()
>>> v = favdish.create_value("Fish").save()
>>> favdish.create_value("Meat").save()
>>> favdish.create_value("Vegetables").save()
Now we have setup the properties. Let's have a look at this metadata::
>>> print favdish.choices_list()
[u'Cookies', u'Fish', u'Meat', u'Vegetables']
>>> qs = properties.Property.objects.all()
>>> ["%s (%s)" % (p.name,','.join(map(str,p.choices_list()))) for p in qs]
[u'weight ()', u'married (True,False)', u'favdish (Cookies,Fish,Meat,Vegetables)']
PropValuesByOwner is a report that cannot be rendered into a normal grid because the 'value' column has variable data type, but it's render_to_dict() method is used to fill an `Ext.grid.PropertyGrid`:
>>> properties.PropValuesByOwner().request(master=Person).render_to_dict()
{'count': 3, 'rows': [{'name': u'favdish', 'value': ''}, {'name': u'married', 'value': None}, {'name': u'weight', 'value': None}], 'title': u'Properties for persons'}
Here are the people we are going to analyze::
>>> chris = Person(name='Chris')
>>> chris.save()
>>> fred = Person(name='Fred')
>>> fred.save()
>>> vera = Person(name='Vera')
>>> vera.save()
>>> mary = Person(name='Mary')
>>> mary.save()
Now we are ready to fill in some real data. Chris, Fred and Vera
answered together to each question. First we asked them "What's
your weight?", and they answered:
>>> weight.set_value_for(chris,70)
>>> weight.set_value_for(fred,110)
>>> weight.set_value_for(vera,60)
When asked whether they were married, they answered:
>>> married.set_value_for(chris,True)
>>> married.set_value_for(fred,False)
>>> married.set_value_for(vera,True)
And about their favourite dish they answered:
>>> favdish.set_value_for(chris,'Cookies')
>>> favdish.set_value_for(fred,'Fish')
>>> favdish.set_value_for(vera,'Vegetables')
Mary came later. She answered all questions at once, which we can enter
in one line of code:
>>> properties.set_value_for(mary,married=True,favdish='Meat')
Note that Mary didn't know her weight.
To see the property values of a person, we can use a manual query...
>>> qs = properties.PropValue.objects.filter(owner_id=fred.pk).order_by('prop__name')
>>> [v.by_owner() for v in qs]
[u'favdish: Fish', u'married: False', u'weight: 110']
... or use the `PropValuesByOwner` report:
>>> properties.PropValuesByOwner().request(master_instance=fred).render_to_dict()
{'count': 3, 'rows': [{'name': u'favdish', 'value': u'Fish'}, {'name': u'married', 'value': False}, {'name': u'weight', 'value': 110}], 'title': u'Properties for Fred'}
Note how properties.PropValuesByOwner also returns 3 rows for Mary although we don't know her weight:
>>> properties.PropValuesByOwner().request(master_instance=mary).render_to_dict()
{'count': 3, 'rows': [{'name': u'favdish', 'value': u'Meat'}, {'name': u'married', 'value': True}, {'name': u'weight', 'value': None}], 'title': u'Properties for Mary'}
Query by property:
>>> qs = properties.PropValue.objects.filter(prop=weight)
>>> [v.by_property() for v in qs]
[u'Chris: 70', u'Fred: 110', u'Vera: 60']
>>> qs = weight.values_query().order_by('value')
>>> [v.by_property() for v in qs]
[u'Vera: 60', u'Chris: 70', u'Fred: 110']
`Report.as_text()`, is currently broken:
>>> #properties.PropValuesByOwner().as_text(fred)
"""
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from lino_xl.lib.properties import models as properties
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Person(models.Model):
name = models.CharField(max_length=20)
properties = generic.GenericRelation(properties.Property)
def __str__(self):
return self.name
|
khchine5/book
|
lino_book/projects/properties/models.py
|
Python
|
bsd-2-clause
| 4,443
|
from os.path import join
from tempfile import mkdtemp
import unittest
from shutil import rmtree
import pandas as pd
from feagen.data_wrappers.pandas_hdf import get_shape_from_pandas_hdf_storer
class Test(unittest.TestCase):
def setUp(self):
self.test_output_dir = mkdtemp(prefix="feagen_test_output_")
pandas_hdf_path = join(self.test_output_dir, "pandas.h5")
self.hdf_store = pd.HDFStore(pandas_hdf_path)
def tearDown(self):
self.hdf_store.close()
rmtree(self.test_output_dir)
def test_get_shape_from_pandas_hdf_storer_df(self):
idx = [1, 2, 3, 5, 4]
col = [10, 9, 6, 7]
df = pd.DataFrame(0, index=idx, columns=col)
self.hdf_store['test'] = df
shape = get_shape_from_pandas_hdf_storer(
self.hdf_store.get_storer('test'))
assert shape == (5, 4)
def test_get_shape_from_pandas_hdf_storer_df_table(self):
idx = [1, 2, 3, 5, 4]
col = [10, 9, 6, 7]
df = pd.DataFrame(0, index=idx, columns=col)
self.hdf_store.put('test', df, format='table')
shape = get_shape_from_pandas_hdf_storer(
self.hdf_store.get_storer('test'))
assert shape == (5, 4)
def test_get_shape_from_pandas_hdf_storer_df_m_idx(self):
idx = pd.MultiIndex.from_product([[0, 1], [0, 1, 2]])
col = [10, 9, 6, 7]
df = pd.DataFrame(0, index=idx, columns=col)
self.hdf_store['test'] = df
shape = get_shape_from_pandas_hdf_storer(
self.hdf_store.get_storer('test'))
assert shape == (6, 4)
def test_get_shape_from_pandas_hdf_storer_df_m_idx_table(self):
idx = pd.MultiIndex.from_product([[0, 1], [0, 1, 2]])
col = [10, 9, 6, 7]
df = pd.DataFrame(0, index=idx, columns=col)
self.hdf_store.put('test', df, format='table')
shape = get_shape_from_pandas_hdf_storer(
self.hdf_store.get_storer('test'))
assert shape == (6, 4)
def test_get_shape_from_pandas_hdf_storer_df_m_col(self):
idx = [10, 9, 6, 7]
col = pd.MultiIndex.from_product([[0, 1], [0, 1, 2]])
df = pd.DataFrame(0, index=idx, columns=col)
self.hdf_store['test'] = df
shape = get_shape_from_pandas_hdf_storer(
self.hdf_store.get_storer('test'))
# TODO: change to (4, 6)
assert shape is None
def test_get_shape_from_pandas_hdf_storer_df_m_col_table(self):
idx = [10, 9, 6, 7]
col = pd.MultiIndex.from_product([[0, 1], [0, 1, 2]])
df = pd.DataFrame(0, index=idx, columns=col)
self.hdf_store.put('test', df, format='table')
shape = get_shape_from_pandas_hdf_storer(
self.hdf_store.get_storer('test'))
assert shape == (4, 6)
def test_get_shape_from_pandas_hdf_storer_df_m_idx_m_col(self):
idx = pd.MultiIndex.from_product([[0, 1], [0, 1, 2]])
col = pd.MultiIndex.from_product([[0, 1], [0, 1]])
df = pd.DataFrame(0, index=idx, columns=col)
self.hdf_store['test'] = df
shape = get_shape_from_pandas_hdf_storer(
self.hdf_store.get_storer('test'))
# TODO: change to (6, 4)
assert shape is None
def test_get_shape_from_pandas_hdf_storer_s(self):
idx = [0, 2, 1, 4, 3]
s = pd.Series(0, index=idx)
self.hdf_store['test'] = s
shape = get_shape_from_pandas_hdf_storer(
self.hdf_store.get_storer('test'))
assert shape == (5,)
def test_get_shape_from_pandas_hdf_storer_s_table(self):
idx = [0, 2, 1, 4, 3]
s = pd.Series(0, index=idx)
self.hdf_store.put('test', s, format='table')
shape = get_shape_from_pandas_hdf_storer(
self.hdf_store.get_storer('test'))
assert shape == (5,)
def test_get_shape_from_pandas_hdf_storer_s_m_idx(self):
idx = pd.MultiIndex.from_product([[0, 1], [0, 1, 2]])
s = pd.Series(0, index=idx)
self.hdf_store['test'] = s
shape = get_shape_from_pandas_hdf_storer(
self.hdf_store.get_storer('test'))
assert shape == (6,)
def test_get_shape_from_pandas_hdf_storer_s_m_idx_table(self):
idx = pd.MultiIndex.from_product([[0, 1], [0, 1, 2]])
s = pd.Series(0, index=idx)
self.hdf_store.put('test', s, format='table')
shape = get_shape_from_pandas_hdf_storer(
self.hdf_store.get_storer('test'))
assert shape == (6,)
|
ianlini/feagen
|
feagen/data_wrappers/tests/test_pandas_hdf.py
|
Python
|
bsd-2-clause
| 4,476
|
import mongoengine as db
class BaseObject(db.Document):
meta = {'allow_inheritance': True}
name = db.StringField(required=True)
tags = db.ListField(db.StringField())
revision = db.IntField(default=1)
|
ameily/pubfs
|
pubfs/core/models.py
|
Python
|
bsd-2-clause
| 225
|
import sys
import time
from entrypoint2 import entrypoint
import pyscreenshot
from pyscreenshot.plugins.gnome_dbus import GnomeDBusWrapper
from pyscreenshot.plugins.gnome_screenshot import GnomeScreenshotWrapper
from pyscreenshot.plugins.kwin_dbus import KwinDBusWrapper
from pyscreenshot.util import run_mod_as_subproc
def run(force_backend, n, childprocess, bbox=None):
sys.stdout.write("%-20s\t" % force_backend)
sys.stdout.flush() # before any crash
if force_backend == "default":
force_backend = None
try:
start = time.time()
for _ in range(n):
pyscreenshot.grab(
backend=force_backend, childprocess=childprocess, bbox=bbox
)
end = time.time()
dt = end - start
s = "%-4.2g sec\t" % dt
s += "(%5d ms per call)" % (1000.0 * dt / n)
sys.stdout.write(s)
finally:
print("")
novirt = [GnomeDBusWrapper.name, KwinDBusWrapper.name, GnomeScreenshotWrapper.name]
def run_all(n, childprocess_param, virtual_only=True, bbox=None):
debug = True
print("")
print("n=%s" % n)
print("------------------------------------------------------")
if bbox:
x1, y1, x2, y2 = map(str, bbox)
bbox = ":".join(map(str, (x1, y1, x2, y2)))
bboxpar = ["--bbox", bbox]
else:
bboxpar = []
if debug:
debugpar = ["--debug"]
else:
debugpar = []
for x in ["default"] + pyscreenshot.backends():
backendpar = ["--backend", x]
# skip non X backends
if virtual_only and x in novirt:
continue
p = run_mod_as_subproc(
"pyscreenshot.check.speedtest",
["--childprocess", childprocess_param] + bboxpar + debugpar + backendpar,
)
print(p.stdout)
@entrypoint
def speedtest(virtual_display=False, backend="", childprocess="", bbox="", number=10):
"""Performance test of all back-ends.
:param virtual_display: run with Xvfb
:param bbox: bounding box coordinates x1:y1:x2:y2
:param backend: back-end can be forced if set (example:default, scrot, wx,..),
otherwise all back-ends are tested
:param childprocess: pyscreenshot parameter childprocess (0/1)
:param number: number of screenshots for each backend (default:10)
"""
childprocess_param = childprocess
if childprocess == "":
childprocess = True # default
elif childprocess == "0":
childprocess = False
elif childprocess == "1":
childprocess = True
else:
raise ValueError("invalid childprocess value")
if bbox:
x1, y1, x2, y2 = map(int, bbox.split(":"))
bbox = x1, y1, x2, y2
else:
bbox = None
def f(virtual_only):
if backend:
try:
run(backend, number, childprocess, bbox=bbox)
except pyscreenshot.FailedBackendError:
pass
else:
run_all(number, childprocess_param, virtual_only=virtual_only, bbox=bbox)
if virtual_display:
from pyvirtualdisplay import Display
with Display(visible=0):
f(virtual_only=True)
else:
f(virtual_only=False)
|
ponty/pyscreenshot
|
pyscreenshot/check/speedtest.py
|
Python
|
bsd-2-clause
| 3,228
|
"""
pygments.lexers._postgres_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Self-updating data files for PostgreSQL lexer.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Autogenerated: please edit them if you like wasting your time.
KEYWORDS = (
'ABORT',
'ABSOLUTE',
'ACCESS',
'ACTION',
'ADD',
'ADMIN',
'AFTER',
'AGGREGATE',
'ALL',
'ALSO',
'ALTER',
'ALWAYS',
'ANALYSE',
'ANALYZE',
'AND',
'ANY',
'ARRAY',
'AS',
'ASC',
'ASSERTION',
'ASSIGNMENT',
'ASYMMETRIC',
'AT',
'ATTACH',
'ATTRIBUTE',
'AUTHORIZATION',
'BACKWARD',
'BEFORE',
'BEGIN',
'BETWEEN',
'BIGINT',
'BINARY',
'BIT',
'BOOLEAN',
'BOTH',
'BY',
'CACHE',
'CALL',
'CALLED',
'CASCADE',
'CASCADED',
'CASE',
'CAST',
'CATALOG',
'CHAIN',
'CHAR',
'CHARACTER',
'CHARACTERISTICS',
'CHECK',
'CHECKPOINT',
'CLASS',
'CLOSE',
'CLUSTER',
'COALESCE',
'COLLATE',
'COLLATION',
'COLUMN',
'COLUMNS',
'COMMENT',
'COMMENTS',
'COMMIT',
'COMMITTED',
'CONCURRENTLY',
'CONFIGURATION',
'CONFLICT',
'CONNECTION',
'CONSTRAINT',
'CONSTRAINTS',
'CONTENT',
'CONTINUE',
'CONVERSION',
'COPY',
'COST',
'CREATE',
'CROSS',
'CSV',
'CUBE',
'CURRENT',
'CURRENT_CATALOG',
'CURRENT_DATE',
'CURRENT_ROLE',
'CURRENT_SCHEMA',
'CURRENT_TIME',
'CURRENT_TIMESTAMP',
'CURRENT_USER',
'CURSOR',
'CYCLE',
'DATA',
'DATABASE',
'DAY',
'DEALLOCATE',
'DEC',
'DECIMAL',
'DECLARE',
'DEFAULT',
'DEFAULTS',
'DEFERRABLE',
'DEFERRED',
'DEFINER',
'DELETE',
'DELIMITER',
'DELIMITERS',
'DEPENDS',
'DESC',
'DETACH',
'DICTIONARY',
'DISABLE',
'DISCARD',
'DISTINCT',
'DO',
'DOCUMENT',
'DOMAIN',
'DOUBLE',
'DROP',
'EACH',
'ELSE',
'ENABLE',
'ENCODING',
'ENCRYPTED',
'END',
'ENUM',
'ESCAPE',
'EVENT',
'EXCEPT',
'EXCLUDE',
'EXCLUDING',
'EXCLUSIVE',
'EXECUTE',
'EXISTS',
'EXPLAIN',
'EXPRESSION',
'EXTENSION',
'EXTERNAL',
'EXTRACT',
'FALSE',
'FAMILY',
'FETCH',
'FILTER',
'FIRST',
'FLOAT',
'FOLLOWING',
'FOR',
'FORCE',
'FOREIGN',
'FORWARD',
'FREEZE',
'FROM',
'FULL',
'FUNCTION',
'FUNCTIONS',
'GENERATED',
'GLOBAL',
'GRANT',
'GRANTED',
'GREATEST',
'GROUP',
'GROUPING',
'GROUPS',
'HANDLER',
'HAVING',
'HEADER',
'HOLD',
'HOUR',
'IDENTITY',
'IF',
'ILIKE',
'IMMEDIATE',
'IMMUTABLE',
'IMPLICIT',
'IMPORT',
'IN',
'INCLUDE',
'INCLUDING',
'INCREMENT',
'INDEX',
'INDEXES',
'INHERIT',
'INHERITS',
'INITIALLY',
'INLINE',
'INNER',
'INOUT',
'INPUT',
'INSENSITIVE',
'INSERT',
'INSTEAD',
'INT',
'INTEGER',
'INTERSECT',
'INTERVAL',
'INTO',
'INVOKER',
'IS',
'ISNULL',
'ISOLATION',
'JOIN',
'KEY',
'LABEL',
'LANGUAGE',
'LARGE',
'LAST',
'LATERAL',
'LEADING',
'LEAKPROOF',
'LEAST',
'LEFT',
'LEVEL',
'LIKE',
'LIMIT',
'LISTEN',
'LOAD',
'LOCAL',
'LOCALTIME',
'LOCALTIMESTAMP',
'LOCATION',
'LOCK',
'LOCKED',
'LOGGED',
'MAPPING',
'MATCH',
'MATERIALIZED',
'MAXVALUE',
'METHOD',
'MINUTE',
'MINVALUE',
'MODE',
'MONTH',
'MOVE',
'NAME',
'NAMES',
'NATIONAL',
'NATURAL',
'NCHAR',
'NEW',
'NEXT',
'NFC',
'NFD',
'NFKC',
'NFKD',
'NO',
'NONE',
'NORMALIZE',
'NORMALIZED',
'NOT',
'NOTHING',
'NOTIFY',
'NOTNULL',
'NOWAIT',
'NULL',
'NULLIF',
'NULLS',
'NUMERIC',
'OBJECT',
'OF',
'OFF',
'OFFSET',
'OIDS',
'OLD',
'ON',
'ONLY',
'OPERATOR',
'OPTION',
'OPTIONS',
'OR',
'ORDER',
'ORDINALITY',
'OTHERS',
'OUT',
'OUTER',
'OVER',
'OVERLAPS',
'OVERLAY',
'OVERRIDING',
'OWNED',
'OWNER',
'PARALLEL',
'PARSER',
'PARTIAL',
'PARTITION',
'PASSING',
'PASSWORD',
'PLACING',
'PLANS',
'POLICY',
'POSITION',
'PRECEDING',
'PRECISION',
'PREPARE',
'PREPARED',
'PRESERVE',
'PRIMARY',
'PRIOR',
'PRIVILEGES',
'PROCEDURAL',
'PROCEDURE',
'PROCEDURES',
'PROGRAM',
'PUBLICATION',
'QUOTE',
'RANGE',
'READ',
'REAL',
'REASSIGN',
'RECHECK',
'RECURSIVE',
'REF',
'REFERENCES',
'REFERENCING',
'REFRESH',
'REINDEX',
'RELATIVE',
'RELEASE',
'RENAME',
'REPEATABLE',
'REPLACE',
'REPLICA',
'RESET',
'RESTART',
'RESTRICT',
'RETURNING',
'RETURNS',
'REVOKE',
'RIGHT',
'ROLE',
'ROLLBACK',
'ROLLUP',
'ROUTINE',
'ROUTINES',
'ROW',
'ROWS',
'RULE',
'SAVEPOINT',
'SCHEMA',
'SCHEMAS',
'SCROLL',
'SEARCH',
'SECOND',
'SECURITY',
'SELECT',
'SEQUENCE',
'SEQUENCES',
'SERIALIZABLE',
'SERVER',
'SESSION',
'SESSION_USER',
'SET',
'SETOF',
'SETS',
'SHARE',
'SHOW',
'SIMILAR',
'SIMPLE',
'SKIP',
'SMALLINT',
'SNAPSHOT',
'SOME',
'SQL',
'STABLE',
'STANDALONE',
'START',
'STATEMENT',
'STATISTICS',
'STDIN',
'STDOUT',
'STORAGE',
'STORED',
'STRICT',
'STRIP',
'SUBSCRIPTION',
'SUBSTRING',
'SUPPORT',
'SYMMETRIC',
'SYSID',
'SYSTEM',
'TABLE',
'TABLES',
'TABLESAMPLE',
'TABLESPACE',
'TEMP',
'TEMPLATE',
'TEMPORARY',
'TEXT',
'THEN',
'TIES',
'TIME',
'TIMESTAMP',
'TO',
'TRAILING',
'TRANSACTION',
'TRANSFORM',
'TREAT',
'TRIGGER',
'TRIM',
'TRUE',
'TRUNCATE',
'TRUSTED',
'TYPE',
'TYPES',
'UESCAPE',
'UNBOUNDED',
'UNCOMMITTED',
'UNENCRYPTED',
'UNION',
'UNIQUE',
'UNKNOWN',
'UNLISTEN',
'UNLOGGED',
'UNTIL',
'UPDATE',
'USER',
'USING',
'VACUUM',
'VALID',
'VALIDATE',
'VALIDATOR',
'VALUE',
'VALUES',
'VARCHAR',
'VARIADIC',
'VARYING',
'VERBOSE',
'VERSION',
'VIEW',
'VIEWS',
'VOLATILE',
'WHEN',
'WHERE',
'WHITESPACE',
'WINDOW',
'WITH',
'WITHIN',
'WITHOUT',
'WORK',
'WRAPPER',
'WRITE',
'XML',
'XMLATTRIBUTES',
'XMLCONCAT',
'XMLELEMENT',
'XMLEXISTS',
'XMLFOREST',
'XMLNAMESPACES',
'XMLPARSE',
'XMLPI',
'XMLROOT',
'XMLSERIALIZE',
'XMLTABLE',
'YEAR',
'YES',
'ZONE',
)
DATATYPES = (
'bigint',
'bigserial',
'bit',
'bit varying',
'bool',
'boolean',
'box',
'bytea',
'char',
'character',
'character varying',
'cidr',
'circle',
'date',
'decimal',
'double precision',
'float4',
'float8',
'inet',
'int',
'int2',
'int4',
'int8',
'integer',
'interval',
'json',
'jsonb',
'line',
'lseg',
'macaddr',
'macaddr8',
'money',
'numeric',
'path',
'pg_lsn',
'pg_snapshot',
'point',
'polygon',
'real',
'serial',
'serial2',
'serial4',
'serial8',
'smallint',
'smallserial',
'text',
'time',
'timestamp',
'timestamptz',
'timetz',
'tsquery',
'tsvector',
'txid_snapshot',
'uuid',
'varbit',
'varchar',
'with time zone',
'without time zone',
'xml',
)
PSEUDO_TYPES = (
'any',
'anyarray',
'anycompatible',
'anycompatiblearray',
'anycompatiblenonarray',
'anycompatiblerange',
'anyelement',
'anyenum',
'anynonarray',
'anyrange',
'cstring',
'event_trigger',
'fdw_handler',
'index_am_handler',
'internal',
'language_handler',
'pg_ddl_command',
'record',
'table_am_handler',
'trigger',
'tsm_handler',
'unknown',
'void',
)
# Remove 'trigger' from types
PSEUDO_TYPES = tuple(sorted(set(PSEUDO_TYPES) - set(map(str.lower, KEYWORDS))))
PLPGSQL_KEYWORDS = (
'ALIAS', 'CONSTANT', 'DIAGNOSTICS', 'ELSIF', 'EXCEPTION', 'EXIT',
'FOREACH', 'GET', 'LOOP', 'NOTICE', 'OPEN', 'PERFORM', 'QUERY', 'RAISE',
'RETURN', 'REVERSE', 'SQLSTATE', 'WHILE',
)
if __name__ == '__main__': # pragma: no cover
import re
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
from pygments.util import format_lines
# One man's constant is another man's variable.
SOURCE_URL = 'https://github.com/postgres/postgres/raw/master'
KEYWORDS_URL = SOURCE_URL + '/src/include/parser/kwlist.h'
DATATYPES_URL = SOURCE_URL + '/doc/src/sgml/datatype.sgml'
def update_myself():
content = urlopen(DATATYPES_URL).read().decode('utf-8', errors='ignore')
data_file = list(content.splitlines())
datatypes = parse_datatypes(data_file)
pseudos = parse_pseudos(data_file)
content = urlopen(KEYWORDS_URL).read().decode('utf-8', errors='ignore')
keywords = parse_keywords(content)
update_consts(__file__, 'DATATYPES', datatypes)
update_consts(__file__, 'PSEUDO_TYPES', pseudos)
update_consts(__file__, 'KEYWORDS', keywords)
def parse_keywords(f):
kw = []
for m in re.finditer(r'PG_KEYWORD\("(.+?)"', f):
kw.append(m.group(1).upper())
if not kw:
raise ValueError('no keyword found')
kw.sort()
return kw
def parse_datatypes(f):
dt = set()
for line in f:
if '<sect1' in line:
break
if '<entry><type>' not in line:
continue
# Parse a string such as
# time [ (<replaceable>p</replaceable>) ] [ without time zone ]
# into types "time" and "without time zone"
# remove all the tags
line = re.sub("<replaceable>[^<]+</replaceable>", "", line)
line = re.sub("<[^>]+>", "", line)
# Drop the parts containing braces
for tmp in [t for tmp in line.split('[')
for t in tmp.split(']') if "(" not in t]:
for t in tmp.split(','):
t = t.strip()
if not t: continue
dt.add(" ".join(t.split()))
dt = list(dt)
dt.sort()
return dt
def parse_pseudos(f):
dt = []
re_start = re.compile(r'\s*<table id="datatype-pseudotypes-table">')
re_entry = re.compile(r'\s*<entry><type>(.+?)</type></entry>')
re_end = re.compile(r'\s*</table>')
f = iter(f)
for line in f:
if re_start.match(line) is not None:
break
else:
raise ValueError('pseudo datatypes table not found')
for line in f:
m = re_entry.match(line)
if m is not None:
dt.append(m.group(1))
if re_end.match(line) is not None:
break
else:
raise ValueError('end of pseudo datatypes table not found')
if not dt:
raise ValueError('pseudo datatypes not found')
dt.sort()
return dt
def update_consts(filename, constname, content):
with open(filename) as f:
data = f.read()
# Line to start/end inserting
re_match = re.compile(r'^%s\s*=\s*\($.*?^\s*\)$' % constname, re.M | re.S)
m = re_match.search(data)
if not m:
raise ValueError('Could not find existing definition for %s' %
(constname,))
new_block = format_lines(constname, content)
data = data[:m.start()] + new_block + data[m.end():]
with open(filename, 'w', newline='\n') as f:
f.write(data)
update_myself()
|
dscorbett/pygments
|
pygments/lexers/_postgres_builtins.py
|
Python
|
bsd-2-clause
| 12,184
|
#!/usr/bin/env python
# -*-coding:utf-8-*-
# @Time : 2017/11/1 ~ 2019/9/1
# @Author : Allen Woo
from flask import request
from apps.core.flask.login_manager import osr_login_required
from apps.configs.sys_config import METHOD_WARNING
from apps.core.blueprint import api
from apps.core.flask.permission import permission_required
from apps.core.flask.response import response_format
from apps.modules.category.process.theme_setting_category import categorys, category_add, category_edit, \
category_delete, get_category_type
@api.route('/admin/content/theme-category', methods=['GET', 'POST', 'PUT', 'DELETE'])
@osr_login_required
@permission_required(use_default=False)
def api_theme_category():
"""
GET:
action:<str>, 可以为get_category, get_category_type, 默认get_category
1.获取当前用户指定的type的所有category
action:<str>, 为get_category
type:<str>, 你设置的那几个类别中的类别,在config.py文件中category, 可在网站管理端设置的
theme_name:<str>
2. 获取所有的type: config.py文件中category的所有CATEGORY TYPE
action:<str>, 为get_category_type
theme_name:<str>
解释:
在分类中(category)又分为几种类型(type)
如: type为post有几个category
POST:
添加文集
name:<str>
type:<str>, 只能是你设置的那几个类别,在config.py文件中category, 或者网站管理设置
theme_name:<str>
PUT:
修改文集
id:<str>, post category id
name:<str>
DELETE:
删除文集名称
ids:<array>, post category ids
"""
if request.c_method == "GET":
if not request.argget.all("action") == "get_category_type":
data = categorys(user_id=0)
else:
data = get_category_type()
elif request.c_method == "POST":
data = category_add(user_id=0)
elif request.c_method == "PUT":
data = category_edit(user_id=0)
elif request.c_method == "DELETE":
data = category_delete(user_id=0)
else:
data = {"msg_type": "w", "msg": METHOD_WARNING, "custom_status": 405}
return response_format(data)
|
osroom/osroom
|
apps/modules/category/apis/theme_category.py
|
Python
|
bsd-2-clause
| 2,262
|
"""Leetcode 796. Rotate String
Easy
URL: https://leetcode.com/problems/rotate-string/
We are given two strings, A and B.
A shift on A consists of taking string A and moving the leftmost character to
the rightmost position. For example, if A = 'abcde', then it will be 'bcdea'
after one shift on A. Return True if and only if A can become B after some
number of shifts on A.
Example 1:
Input: A = 'abcde', B = 'cdeab'
Output: true
Example 2:
Input: A = 'abcde', B = 'abced'
Output: false
Note:
A and B will have length at most 100.
"""
class SolutionStringConcatSubstring(object):
def rotateString(self, A, B):
"""
:type A: str
:type B: str
:rtype: bool
Time complexity: O(2n+2n*n)=O(n^2).
Space complexity:O(n).
"""
# Check if lengths are not equal.
if len(A) != len(B):
return False
# If rotate string, B is substring of concated string A + A.
AA = A + A
if B in AA:
return True
else:
return False
def main():
# Input: A = 'abcde', B = 'cdeab'
# Output: true
A = 'abcde'
B = 'cdeab'
print SolutionStringConcatSubstring().rotateString(A, B)
# Input: A = 'abcde', B = 'abced'
# Output: false
A = 'abcde'
B = 'abced'
print SolutionStringConcatSubstring().rotateString(A, B)
if __name__ == '__main__':
main()
|
bowen0701/algorithms_data_structures
|
lc0796_rotate_string.py
|
Python
|
bsd-2-clause
| 1,408
|
# test_patternchain.py -- Tests for Pattern Chains
"""Tests for Pattern Chain objects"""
from morph import (
pattern,
patternchain
)
from morph.pattern import (
LiteralPattern,
NumericCounterPattern,
)
from morph.patternchain import (
generateFullReplaceChain,
PatternChain,
FilePatternChain,
)
from morph.errors import (
PatternModeError
)
from morph.tests import TestCase
class PatternChainTestCase(TestCase):
def testGenFullReplace(self):
chain = patternchain.generateFullReplaceChain([
'abc_',
'###'])
litpat = LiteralPattern('abc_', mode = pattern.MODE_REPLACE)
numcountpat = NumericCounterPattern(1, 3)
self.assertEqual(PatternChain([litpat, numcountpat]), chain)
def testStr(self):
chain = patternchain.generateFullReplaceChain([
'abc_',
'###'])
self.assertEqual("\tLiteral (replace, abc_)\n"
"\tNumericCounter (append, 1, 1, 3)\n",
str(chain))
def testAppendApply(self):
appendPat0 = LiteralPattern('abc')
appendPat1 = LiteralPattern('123')
chain = PatternChain([appendPat0, appendPat1])
self.assertEqual(['fileabc123'],
chain.apply_to_strings(['file']))
self.assertEqual(['file0abc123', 'file1abc123', 'file2abc123'],
chain.apply_to_strings(['file0', 'file1', 'file2']))
def testReplaceApply(self):
appendPat0 = LiteralPattern('abc_', mode = pattern.MODE_REPLACE)
appendPat1 = NumericCounterPattern(1, 2)
chain = PatternChain([appendPat0, appendPat1])
self.assertEqual(['abc_01'],
chain.apply_to_strings(['file']))
chain.reset()
self.assertEqual(['abc_01', 'abc_02', 'abc_03'],
chain.apply_to_strings(['file0', 'file1', 'file2']))
class FilePatternChainTestCase(TestCase):
def testApply(self):
chain = FilePatternChain()
chain.insert_file('file5', 5)
chain.insert_file('file1.5', 2)
chain.delete_file(0)
chain.move_file(0, 2)
chain.delete_file(2)
self.assertEqual(
['file1.5', 'file2', 'file3', 'file4', 'file5'],
chain.apply_to_strings(
['file0', 'file1', 'file2', 'file3', 'file4'])
)
def testMap(self):
chain = FilePatternChain()
chain.insert_file('file5', 5)
chain.insert_file('file1.5', 2)
chain.delete_file(0)
chain.move_file(0, 2)
chain.delete_file(2)
self.assertEqual(
[(None, 'file1.5'),
('file2', 'file2'),
('file3', 'file3'),
('file4', 'file4'),
(None, 'file5'),
('file0', None),
('file1', None)],
chain.map_to_strings(
['file0', 'file1', 'file2', 'file3', 'file4'])
)
def testStr(self):
chain = FilePatternChain()
chain.insert_file('file5', 4)
chain.insert_file('file1.5', 2)
chain.delete_file(0)
chain.move_file(0, 2)
chain.delete_file(2)
self.assertEqual("\t('insert', 'file5', 4)\n"
"\t('insert', 'file1.5', 2)\n"
"\t('delete', 0)\n"
"\t('move', 0, 2)\n"
"\t('delete', 2)\n",
str(chain))
|
milki/morph
|
morph/tests/test_patternchain.py
|
Python
|
bsd-2-clause
| 3,569
|
# -*- coding: UTF-8 -*-
import logging
from model_utils import Choices
from simptools.wrappers.http import HttpClient, HttpRequest
from requests.exceptions import ConnectionError
from payway.merchants.models import Merchant
__author__ = 'Razzhivin Alexander'
__email__ = 'admin@httpbots.com'
RESPONSE_STATUS = Choices(
('OK', 'OK'),
)
class MerchantHttpRequest(HttpRequest):
def __init__(self, merchant, order):
self.merchant = merchant
self.order = order
if self.merchant.result_url_method == Merchant.URL_METHODS.GET:
self.__set_GET()
else:
self.__set_POST()
def __set_POST(self, *args, **kwargs):
self.POST = self.__request()
def __set_GET(self, *args, **kwargs):
self.GET = self.__request()
def __request(self):
return {
'url': self.merchant.result_url,
'data': {
'uid': self.order.uid,
'is_paid': self.order.is_paid,
'sum': self.order.sum.amount,
'sum_currency': self.order.sum_currency,
'description': self.order.description,
}
}
class MerchantHttpClient(HttpClient):
@classmethod
def notify(cls, merchant, order):
result = ''
try:
request = MerchantHttpRequest(merchant, order)
response = cls.execute(request)
result = response.text
except ConnectionError:
logging.warn('Problems when connecting to merchant {0}'.format(merchant.result_url))
return result
|
RANUX/django-payway
|
payway/merchants/http.py
|
Python
|
bsd-2-clause
| 1,585
|
###
### This file was automatically generated
###
from archinfo.arch import register_arch, Endness, Register
from .common import ArchPcode
class ArchPcode_dsPIC33E_LE_24_default(ArchPcode):
name = 'dsPIC33E:LE:24:default'
pcode_arch = 'dsPIC33E:LE:24:default'
description = 'dsPIC33E'
bits = 24
ip_offset = 0x2e
sp_offset = 0x1e
bp_offset = sp_offset
instruction_endness = Endness.LE
register_list = [
Register('w1w0', 4, 0x0),
Register('w0', 2, 0x0),
Register('w0byte', 1, 0x0),
Register('w1', 2, 0x2),
Register('w1byte', 1, 0x2),
Register('w3w2', 4, 0x4),
Register('w2', 2, 0x4),
Register('w2byte', 1, 0x4),
Register('w3', 2, 0x6),
Register('w3byte', 1, 0x6),
Register('w5w4', 4, 0x8),
Register('w4', 2, 0x8),
Register('w4byte', 1, 0x8),
Register('w5', 2, 0xa),
Register('w5byte', 1, 0xa),
Register('w7w6', 4, 0xc),
Register('w6', 2, 0xc),
Register('w6byte', 1, 0xc),
Register('w7', 2, 0xe),
Register('w7byte', 1, 0xe),
Register('w9w8', 4, 0x10),
Register('w8', 2, 0x10),
Register('w8byte', 1, 0x10),
Register('w9', 2, 0x12),
Register('w9byte', 1, 0x12),
Register('w11w10', 4, 0x14),
Register('w10', 2, 0x14),
Register('w10byte', 1, 0x14),
Register('w11', 2, 0x16),
Register('w11byte', 1, 0x16),
Register('w13w12', 4, 0x18),
Register('w12', 2, 0x18),
Register('w12byte', 1, 0x18),
Register('w13', 2, 0x1a),
Register('w13byte', 1, 0x1a),
Register('w15w14', 4, 0x1c),
Register('w14', 2, 0x1c),
Register('w14byte', 1, 0x1c),
Register('w15', 2, 0x1e),
Register('w15byte', 1, 0x1e),
Register('splim', 2, 0x20),
Register('acca', 6, 0x22),
Register('accal', 2, 0x22),
Register('accah', 2, 0x24),
Register('accau', 2, 0x26),
Register('accb', 6, 0x28),
Register('accbl', 2, 0x28),
Register('accbh', 2, 0x2a),
Register('accbu', 2, 0x2c),
Register('pc', 3, 0x2e, alias_names=('ip',)),
Register('dsrpag', 2, 0x32),
Register('dswpag', 2, 0x34),
Register('rcount', 2, 0x36),
Register('corcon', 2, 0x44),
Register('modcon', 2, 0x46),
Register('xmodsrt', 2, 0x48),
Register('xmodend', 2, 0x4a),
Register('ymodsrt', 2, 0x4c),
Register('ymodend', 2, 0x4e),
Register('xbrev', 2, 0x50),
Register('disicnt', 2, 0x52),
Register('tblpag', 1, 0x54),
Register('shadow_w0', 2, 0x0),
Register('shadow_w1', 2, 0x2),
Register('shadow_w2', 2, 0x4),
Register('shadow_w3', 2, 0x6),
Register('srl', 1, 0x400),
Register('srh', 1, 0x401),
Register('srh_oa', 1, 0x600),
Register('srh_ob', 1, 0x601),
Register('srh_sa', 1, 0x602),
Register('srh_sb', 1, 0x603),
Register('srh_oab', 1, 0x604),
Register('srh_sab', 1, 0x605),
Register('srh_da', 1, 0x606),
Register('srh_dc', 1, 0x607),
Register('srl_ipl2', 1, 0x608),
Register('srl_ipl1', 1, 0x609),
Register('srl_ipl0', 1, 0x60a),
Register('srl_ra', 1, 0x60b),
Register('srl_n', 1, 0x60c),
Register('srl_ov', 1, 0x60d),
Register('srl_z', 1, 0x60e),
Register('srl_c', 1, 0x60f),
Register('disi', 1, 0x610),
Register('shadow_srh_dc', 1, 0x611),
Register('shadow_srl_n', 1, 0x612),
Register('shadow_srl_ov', 1, 0x613),
Register('shadow_srl_z', 1, 0x614),
Register('shadow_srl_c', 1, 0x615),
Register('dostart', 3, 0x800),
Register('dostart1', 3, 0x803),
Register('dostart2', 3, 0x806),
Register('dostart3', 3, 0x809),
Register('doend', 3, 0x80c),
Register('doend1', 3, 0x80f),
Register('doend2', 3, 0x812),
Register('doend3', 3, 0x815),
Register('dostart_shadow', 3, 0x818),
Register('doend_shadow', 3, 0x81b),
Register('wdtcount', 2, 0xa00),
Register('wdtprescalara', 2, 0xa02),
Register('wdtprescalarb', 2, 0xa04),
Register('corcon_var', 1, 0xc00),
Register('corcon_ipl3', 1, 0xc01),
Register('corcon_psv', 1, 0xc02),
Register('corcon_sfa', 1, 0xc03),
Register('corcon_dl', 1, 0xc04),
Register('dcount', 2, 0x1000),
Register('dcount1', 2, 0x1002),
Register('dcount2', 2, 0x1004),
Register('dcount3', 2, 0x1006),
Register('skipnextflag', 1, 0x1200),
Register('contextreg', 4, 0x1400)
]
register_arch(['dspic33e:le:24:default'], 24, Endness.LE, ArchPcode_dsPIC33E_LE_24_default)
|
angr/angr
|
angr/engines/pcode/arch/ArchPcode_dsPIC33E_LE_24_default.py
|
Python
|
bsd-2-clause
| 4,846
|
import math
def get_direction(src, target):
diff = map(lambda a, b: a - b, target, src)
mag = math.sqrt(sum(map(lambda a: a ** 2, diff)))
if mag == 0:
return [0, 0]
return map(lambda a: a / mag, diff)
def distance(pos1, pos2):
return math.sqrt(sum(map(lambda a: a ** 2, map(lambda a, b: a - b, pos1, pos2))))
def magnitude(vector):
return math.sqrt(sum(map(lambda a: a ** 2, vector)))
class Drawable(object):
def draw(self, surface, camera=(0, 0)):
coordinates = (self.rect.left - camera[0], self.rect.top - camera[1])
surface.blit(self.image, coordinates)
|
Wopple/fimbulvetr
|
src/client/util.py
|
Python
|
bsd-3-clause
| 613
|
def extractNotsogoodtranslatorWordpressCom(item):
'''
Parser for 'notsogoodtranslator.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractNotsogoodtranslatorWordpressCom.py
|
Python
|
bsd-3-clause
| 578
|
import httpbenchmark
from unuk.benchmarks.base import runtests
|
pombredanne/unuk
|
src/unuk/benchmarks/__init__.py
|
Python
|
bsd-3-clause
| 63
|
from datetime import datetime, timedelta
import csv
from cStringIO import StringIO
from django.test import TestCase
from django.contrib.auth.models import User
from django.test import Client
from django.http import HttpRequest, QueryDict, response
from mock import patch, Mock
from .models import Variable, Session, Visitor, SESSION_TIMEOUT, VISITOR_FIELDS
from .views import AppLaunch
import utils
import urllib
class ViewTests(TestCase):
def setUp(self):
self.user = User.objects.create(username='testuser',
email='testuser@example.com')
self.user.set_password('password')
self.user.save()
profile = self.user.userprofile
profile_data = {
'country': 'USA',
}
for field in profile_data:
setattr(profile, field, profile_data[field])
profile.save()
self.visitor = Visitor.objects.create()
self.session = Session.objects.create(visitor=self.visitor)
def createRequest(self, user=None):
self.request = Mock()
if user is not None:
self.request.user = user
# sample request with mocked ip address
self.request.META = {
'HTTP_X_FORWARDED_FOR': '192.168.255.182, 10.0.0.0,' +
'127.0.0.1, 198.84.193.157, '
'177.139.233.139',
'HTTP_X_REAL_IP': '177.139.233.132',
'REMOTE_ADDR': '177.139.233.133',
}
self.request.method = 'GET'
self.request.session = {}
return self.request
def test_get(self):
# check that there are no logs for app_launch
app_lauch_cnt = Variable.objects.filter(name='app_launch').count()
self.assertEqual(app_lauch_cnt, 0)
# create a mock request object
r = self.createRequest(self.user)
# build request 'GET'
res_id = 'D7a7de92941a044049a7b8ad09f4c75bb'
res_type = 'GenericResource'
app_name = 'test'
request_url = 'https://apps.hydroshare.org/apps/hydroshare-gis/' \
'?res_id=%s&res_type=%s' % (res_id, res_type)
app_url = urllib.quote(request_url)
href = 'url=%s;name=%s' % (app_url, app_name)
r.GET = QueryDict(href)
# invoke the app logging endpoint
app_logging = AppLaunch()
url_redirect = app_logging.get(r)
# validate response
self.assertTrue(type(url_redirect) == response.HttpResponseRedirect)
self.assertTrue(url_redirect.url == request_url)
# validate logged data
app_lauch_cnt = Variable.objects.filter(name='app_launch').count()
self.assertEqual(app_lauch_cnt, 1)
data = list(Variable.objects.filter(name='app_launch'))
values = dict(tuple(pair.split('=')) for pair in data[0].value.split('|'))
self.assertTrue('res_type' in values.keys())
self.assertTrue('name' in values.keys())
self.assertTrue('user_email_domain' in values.keys())
self.assertTrue('user_type' in values.keys())
self.assertTrue('user_ip' in values.keys())
self.assertTrue('res_id' in values.keys())
self.assertTrue(values['res_type'] == res_type)
self.assertTrue(values['name'] == app_name)
self.assertTrue(values['user_email_domain'] == self.user.email[-3:])
self.assertTrue(values['user_type'] == 'Unspecified')
self.assertTrue(values['user_ip'] == '198.84.193.157')
self.assertTrue(values['res_id'] == res_id)
class TrackingTests(TestCase):
def setUp(self):
self.user = User.objects.create(username='testuser',
email='testuser@example.com')
self.user.set_password('password')
self.user.save()
profile = self.user.userprofile
profile_data = {
'country': 'USA',
}
for field in profile_data:
setattr(profile, field, profile_data[field])
profile.save()
self.visitor = Visitor.objects.create()
self.session = Session.objects.create(visitor=self.visitor)
def createRequest(self, user=None):
request = Mock()
if user is not None:
request.user = user
# sample request with mocked ip address
request.META = {
'HTTP_X_FORWARDED_FOR': '192.168.255.182, 10.0.0.0, ' +
'127.0.0.1, 198.84.193.157, '
'177.139.233.139',
'HTTP_X_REAL_IP': '177.139.233.132',
'REMOTE_ADDR': '177.139.233.133',
}
return request
def test_record_variable(self):
self.session.record('int', 42)
self.session.record('float', 3.14)
self.session.record('true', True)
self.session.record('false', False)
self.session.record('text', "Hello, World")
self.assertEqual("42", self.session.variable_set.get(name='int').value)
self.assertEqual("3.14", self.session.variable_set.get(name='float').value)
self.assertEqual("true", self.session.variable_set.get(name='true').value)
self.assertEqual("false", self.session.variable_set.get(name='false').value)
self.assertEqual('Hello, World', self.session.variable_set.get(name='text').value)
def test_record_bad_value(self):
self.assertRaises(TypeError, self.session.record, 'bad', ['oh no i cannot handle arrays'])
def test_get(self):
self.assertEqual(42, Variable(name='var', value='42', type=0).get_value())
self.assertEqual(3.14, Variable(name='var', value='3.14', type=1).get_value())
self.assertEqual(True, Variable(name='var', value='true', type=3).get_value())
self.assertEqual(False, Variable(name='var', value='false', type=3).get_value())
self.assertEqual("X", Variable(name='var', value='X', type=2).get_value())
self.assertEqual(None, Variable(name='var', value='', type=4).get_value())
def test_for_request_new(self):
request = self.createRequest(user=self.user)
request.session = {}
session = Session.objects.for_request(request)
self.assertIn('hs_tracking_id', request.session)
self.assertEqual(session.visitor.user.id, self.user.id)
def test_for_request_existing(self):
request = self.createRequest(user=self.user)
request.session = {}
session1 = Session.objects.for_request(request)
session2 = Session.objects.for_request(request)
self.assertEqual(session1.id, session2.id)
def test_for_request_expired(self):
request = self.createRequest(user=self.user)
request.session = {}
session1 = Session.objects.for_request(request)
with patch('hs_tracking.models.datetime') as dt_mock:
dt_mock.now.return_value = datetime.now() + timedelta(seconds=SESSION_TIMEOUT)
session2 = Session.objects.for_request(request)
self.assertNotEqual(session1.id, session2.id)
self.assertEqual(session1.visitor.id, session2.visitor.id)
def test_for_other_user(self):
request = self.createRequest(user=self.user)
request.session = {}
session1 = Session.objects.for_request(request)
user2 = User.objects.create(username='testuser2', email='testuser2@example.com')
request = self.createRequest(user=user2)
request.session = {}
session2 = Session.objects.for_request(request)
self.assertNotEqual(session1.id, session2.id)
self.assertNotEqual(session1.visitor.id, session2.visitor.id)
def test_export_visitor_info(self):
request = self.createRequest(user=self.user)
request.session = {}
session1 = Session.objects.for_request(request)
info = session1.visitor.export_visitor_information()
self.assertEqual(info['country'], 'USA')
self.assertEqual(info['username'], 'testuser')
def test_tracking_view(self):
self.user.is_staff = True
self.user.save()
client = Client()
client.login(username=self.user.username, password='password')
response = client.get('/hydroshare/tracking/reports/profiles/')
reader = csv.reader(StringIO(response.content))
rows = list(reader)
self.assertEqual(response.status_code, 200)
self.assertEqual(rows[0], VISITOR_FIELDS)
i = VISITOR_FIELDS.index('username')
# Row 1 is the original unauthenticated session created in setUp()
self.assertEqual(rows[1][i], '')
# Row 2 is the user we just authenticated
self.assertEqual(rows[2][i], self.user.username)
def test_history_empty(self):
self.user.is_staff = True
self.user.save()
client = Client()
response = client.get('/hydroshare/tracking/reports/history/')
self.assertEqual(response.status_code, 200)
reader = csv.reader(StringIO(response.content))
rows = list(reader)
count = Variable.objects.all().count()
self.assertEqual(len(rows), count + 1) # +1 to account for the session header
def test_history_variables(self):
self.user.is_staff = True
self.user.save()
client = Client()
variable = self.session.record('testvar', "abcdef")
self.assertEqual(variable.session.id, self.session.id)
response = client.get('/hydroshare/tracking/reports/history/')
self.assertEqual(response.status_code, 200)
reader = csv.DictReader(StringIO(response.content))
rows = list(reader)
data = rows[-1]
self.assertEqual(int(data['session']), self.session.id)
self.assertEqual(int(data['visitor']), self.visitor.id)
self.assertEqual(data['variable'], "testvar")
self.assertEqual(data['value'], "abcdef")
def test_capture_logins_and_logouts(self):
self.assertEqual(Variable.objects.count(), 0)
client = Client()
client.login(username=self.user.username, password='password')
self.assertEqual(Variable.objects.count(), 2)
var1, var2 = Variable.objects.all()
kvp = dict(tuple(pair.split('=')) for pair in var1.value.split('|'))
self.assertEqual(var1.name, 'begin_session')
self.assertEqual(len(kvp.keys()), 3)
kvp = dict(tuple(pair.split('=')) for pair in var2.value.split('|'))
self.assertEqual(var2.name, 'login')
self.assertEqual(len(kvp.keys()), 3)
client.logout()
self.assertEqual(Variable.objects.count(), 3)
var = Variable.objects.latest('timestamp')
kvp = dict(tuple(pair.split('=')) for pair in var.value.split('|'))
self.assertEqual(var.name, 'logout')
self.assertEqual(len(kvp.keys()), 3)
def test_activity_parsing(self):
client = Client()
client.login(username=self.user.username, password='password')
self.assertEqual(Variable.objects.count(), 2)
var1, var2 = Variable.objects.all()
kvp = dict(tuple(pair.split('=')) for pair in var1.value.split('|'))
self.assertEqual(var1.name, 'begin_session')
self.assertEqual(len(kvp.keys()), 3)
client.logout()
class UtilsTests(TestCase):
def setUp(self):
self.user = User.objects.create(username='testuser', email='testuser@example.com')
self.user.set_password('password')
self.user.save()
self.visitor = Visitor.objects.create()
self.session = Session.objects.create(visitor=self.visitor)
# sample request with mocked ip address
self.request = HttpRequest()
self.request.META = {
'HTTP_X_FORWARDED_FOR': '192.168.255.182, 10.0.0.0, 127.0.0.1, 198.84.193.157, '
'177.139.233.139',
'HTTP_X_REAL_IP': '177.139.233.132',
'REMOTE_ADDR': '177.139.233.133',
}
def tearDown(self):
self.user.delete
def test_std_log_fields(self):
log_fields = utils.get_std_log_fields(self.request, self.session)
self.assertTrue(len(log_fields.keys()) == 3)
self.assertTrue('user_ip' in log_fields)
self.assertTrue('user_type' in log_fields)
self.assertTrue('user_email_domain' in log_fields)
def test_ishuman(self):
useragents = [
('Mozilla/5.0 (compatible; bingbot/2.0; '
'+http://www.bing.com/bingbot.htm)', False),
('Googlebot/2.1 (+http://www.googlebot.com/bot.html)', False),
('Mozilla/5.0 (compatible; Yahoo! Slurp; '
'http://help.yahoo.com/help/us/ysearch/slurp)', False),
('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36', True),
('Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; en-US) '
'AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27', True),
('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:21.0) Gecko/20100101 '
'Firefox/21.0', True),
('Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2486.0 Safari/537.36 '
'Edge/13.10586', True)
]
for useragent, ishuman in useragents:
self.assertEqual(ishuman, utils.is_human(useragent))
def test_get_user_email(self):
# list of common and unusual valid email address formats
valid_emails = [
('email@example.com', 'com'),
('firstname.lastname@example.com', 'com'),
('firstname+lastname@example.com', 'com'),
('"email"@example.com', 'com'),
('1234567890@example.com', 'com'),
('email@example-one.com', 'com'),
('_______@example.com', 'com'),
('email@example.co.uk', 'co.uk'),
('firstname-lastname@example.com', 'com'),
('much."more\ unusual"@example.com', 'com'),
('very.unusual."@".unusual.com@example.com', 'com'),
('very."(),:;<>[]".VERY."very@\\ "very".unusual@strange.example.com', 'example.com')
]
# create session for each email and test email domain parsing
for email, dom in valid_emails:
user = User.objects.create(username='testuser1', email=email)
visitor = Visitor.objects.create()
visitor.user = user
session = Session.objects.create(visitor=visitor)
emaildom = utils.get_user_email_domain(session)
self.assertTrue(emaildom == dom)
user.delete()
def test_client_ip(self):
ip = utils.get_ip(self.request)
self.assertEqual(ip, "198.84.193.157")
def test_get_user_type(self):
user_types = ['Faculty', 'Researcher', 'Test', None]
for user_type in user_types:
self.user.userprofile.user_type = user_type
visitor = Visitor.objects.create()
visitor.user = self.user
session = Session.objects.create(visitor=visitor)
usrtype = utils.get_user_type(session)
self.assertTrue(user_type == usrtype)
del self.user.userprofile.user_type
visitor = Visitor.objects.create()
visitor.user = self.user
session = Session.objects.create(visitor=visitor)
usrtype = utils.get_user_type(session)
self.assertTrue(usrtype is None)
|
ResearchSoftwareInstitute/MyHPOM
|
hs_tracking/tests.py
|
Python
|
bsd-3-clause
| 15,515
|
# -*- coding: utf8 -*-
from flask_login import AnonymousUserMixin
from .interface import BUIhandler, BUIuser, BUIloader
from ...utils import __
import ssl
try:
from ldap3 import (
Server,
Connection,
Tls,
ALL,
RESTARTABLE,
AUTO_BIND_TLS_BEFORE_BIND,
AUTO_BIND_NONE,
SIMPLE,
)
except ImportError:
raise ImportError("Unable to load 'ldap3' module")
class LdapLoader(BUIloader):
"""The :class:`burpui.misc.auth.ldap.LdapLoader` handles searching for and
binding as a :class:`burpui.misc.auth.ldap.LdapUser` user.
"""
section = name = "LDAP:AUTH"
def __init__(self, app=None, handler=None):
""":func:`burpui.misc.auth.ldap.LdapLoader.__init__` establishes a
connection to the LDAP server.
:param app: Instance of the app we are running in
:type app: :class:`burpui.engines.server.BUIServer`
"""
self.app = app
conf = self.app.conf
handler.name = self.name
defaults = {
"LDAP:AUTH": {
"host": "localhost",
"port": None,
"encryption": None,
"binddn": None,
"bindpw": None,
"filter": None,
"base": None,
"searchattr": "uid",
"validate": "none",
"cafile": None,
}
}
mapping = {
"host": "host",
"port": "port",
"encryption": "encryption",
"filt": "filter",
"base": "base",
"attr": "searchattr",
"binddn": "binddn",
"bindpw": "bindpw",
"validate": "validate",
"cafile": "cafile",
}
conf.update_defaults(defaults)
# Maybe the handler argument is None, maybe the 'priority'
# option is missing. We don't care.
try:
handler.priority = (
conf.safe_get("priority", "integer", section=self.section)
or handler.priority
)
except:
pass
for (opt, key) in mapping.items():
setattr(self, opt, conf.safe_get(key, "force_string", section=self.section))
if self.validate and self.validate.lower() in ["none", "optional", "required"]:
self.validate = getattr(ssl, "CERT_{}".format(self.validate.upper()))
else:
self.validate = None
self.version = ssl.OP_NO_SSLv3
self.users = []
self.tls = None
self.ssl = False
self.auto_bind = AUTO_BIND_NONE
if self.encryption == "ssl":
self.ssl = True
elif self.encryption == "tls":
self.tls = Tls(
local_certificate_file=self.cafile,
validate=self.validate,
version=self.version,
)
self.auto_bind = AUTO_BIND_TLS_BEFORE_BIND
if self.port:
try:
self.port = int(self.port)
except ValueError:
self.logger.error("LDAP port must be a valid integer")
self.port = None
self.logger.info("LDAP host: {0}".format(self.host))
self.logger.info("LDAP port: {0}".format(self.port))
self.logger.info("LDAP encryption: {0}".format(self.encryption))
self.logger.info("LDAP filter: {0}".format(self.filt))
self.logger.info("LDAP base: {0}".format(self.base))
self.logger.info("LDAP search attr: {0}".format(self.attr))
self.logger.info("LDAP binddn: {0}".format(self.binddn))
self.logger.info("LDAP bindpw: {0}".format("*****" if self.bindpw else "None"))
self.logger.info("TLS object: {0}".format(self.tls))
try:
self.server = Server(
host=self.host,
port=self.port,
use_ssl=self.ssl,
get_info=ALL,
tls=self.tls,
)
self.logger.debug("LDAP Server = {0}".format(str(self.server)))
if self.binddn:
self.ldap = Connection(
self.server,
user=self.binddn,
password=self.bindpw,
raise_exceptions=True,
client_strategy=RESTARTABLE,
auto_bind=self.auto_bind,
authentication=SIMPLE,
)
else:
self.ldap = Connection(
self.server,
raise_exceptions=True,
client_strategy=RESTARTABLE,
auto_bind=self.auto_bind,
)
okay = False
with self.ldap:
self.logger.debug("LDAP Connection = {0}".format(str(self.ldap)))
self.logger.info("OK, connected to LDAP")
okay = True
if not okay:
raise Exception("Not connected")
self._prefetch()
except Exception as e:
self.logger.error("Could not connect to LDAP: {0}".format(str(e)))
self.server = None
self.ldap = None
def __exit__(self, exc_type, exc_value, traceback):
""":func:`burpui.misc.auth.ldap.LdapLoader.__exit__` closes the
connection to the LDAP server.
"""
if self.ldap and self.ldap.bound:
self.ldap.unbind()
def fetch(self, searchval=None, uniq=True):
""":func:`burpui.misc.auth.ldap.LdapLoader.fetch` searches for a user
object in the LDAP server.
:param searchval: attribute value to search for
:type searchval: str
:param uniq: only return one result
:type uniq: bool
:returns: dictionary of `distinguishedName` and `commonName` attributes for the
user if found, otherwise None.
"""
try:
if self.filt:
query = self.filt.format(self.attr, searchval)
else:
query = "({0}={1})".format(self.attr, searchval)
self.logger.info("filter: {0} | base: {1}".format(query, self.base))
r = None
with self.ldap:
self.logger.debug("LDAP Connection = {0}".format(str(self.ldap)))
self.ldap.search(self.base, query, attributes=["cn", self.attr])
r = self.ldap.response
if not r:
raise ValueError("no results")
except Exception as e:
self.logger.error("Ooops, LDAP lookup failed: {0}".format(str(e)))
return None
if not uniq:
return r
for record in r:
attrs = record["attributes"]
if self.attr in attrs and searchval in attrs[self.attr]:
self.logger.info("Found DN: {0}".format(record["dn"]))
return {"dn": record["dn"], "cn": attrs["cn"][0]}
def _prefetch(self):
"""Prefetch all users that match the filter/base"""
self.users = []
results = self.fetch("*", False) or []
for record in results:
attrs = record["attributes"]
if self.attr in attrs:
self.users.append(attrs[self.attr][0])
self.logger.debug(self.users)
def check(self, dn=None, passwd=None):
""":func:`burpui.misc.auth.ldap.LdapLoader.check` authenticates a user
against the LDAP server.
:param dn: canonical `dn` of the user to authenticate as
:type dn: str
:param passwd: password of the user to authenticate as
:type passwd: str
:returns: True if bind was successful, otherwise False
"""
try:
with Connection(
self.server,
user="{0}".format(dn),
password=passwd,
raise_exceptions=True,
auto_bind=self.auto_bind,
authentication=SIMPLE,
) as con:
self.logger.debug("LDAP Connection = {0}".format(str(con)))
self.logger.info("Bound as user: {0}".format(dn))
return con.bind()
except Exception as e:
self.logger.error(
"Failed to authenticate user: {0}, {1}".format(dn, str(e))
)
self.logger.error("Bind as '{0}' failed".format(dn))
return False
class UserHandler(BUIhandler):
__doc__ = __(
"Connects to a LDAP database to authenticate users. Handles "
"searching for and binding as."
)
priority = 50
preload_users = False
"""The :class:`burpui.misc.auth.ldap.UserHandler` class maintains a list of
``Burp-UI`` users.
"""
def __init__(self, app=None):
""":func:`burpui.misc.auth.ldap.UserHandler.__init__` creates the
handler instance
:param app: Instance of the app we are running in
:type app: :class:`burpui.engines.server.BUIServer`
"""
self.ldap = LdapLoader(app, self)
self.users = {}
def user(self, name=None):
"""See :func:`burpui.misc.auth.interface.BUIhandler.user`"""
if name not in self.users:
self.users[name] = LdapUser(self.ldap, name)
ret = self.users[name]
if not ret.active:
return AnonymousUserMixin()
return ret
@property
def loader(self):
return self.ldap
class LdapUser(BUIuser):
"""The :class:`burpui.misc.auth.ldap.LdapUser` class generates a ``Burp-UI``
user from a user object found in the LDAP server.
"""
def __init__(self, ldap=None, name=None):
""":func:`burpui.misc.auth.ldap.LdapUser.__init__` function finds a user
in the LDAP server and stores the DN of the user if found.
:param ldap: an ``LdapLoader`` instance
:type ldap: :class:`burpui.misc.auth.ldap.LdapLoader`
:param name: login name of the user to find in the LDAP server
:param type: str
"""
self.active = False
self.authenticated = False
self.ldap = ldap
self.name = name
self.backend = self.ldap.name
found = self.ldap.fetch(name)
if found:
self.id = found["dn"]
self.active = True
def login(self, passwd=None):
""":func:`burpui.misc.auth.ldap.LdapUser.login` function finds a user in
the LDAP server and authenticates that user using an LDAP bind.
:param passwd: password to bind to the LDAP server with
:type passwd: str
:returns: True if found and bind was successful;
False if found but bind failed;
otherwise de-activates the user and returns False
"""
if self.ldap.fetch(self.name):
self.authenticated = self.ldap.check(self.id, passwd)
return self.authenticated
else:
self.authenticated = False
self.active = False
return False
def get_id(self):
""":func:`burpui.misc.auth.ldap.LdapUser.get_id` function
:returns: login name of the user
"""
return self.name
|
ziirish/burp-ui
|
burpui/misc/auth/ldap.py
|
Python
|
bsd-3-clause
| 11,183
|
# -*- coding: utf-8 -*-
from cms.exceptions import NoPermissionsException
from cms.models import Page, PagePermission, GlobalPagePermission
from cms.plugin_pool import plugin_pool
from cms.utils import get_cms_setting
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.contrib.sites.models import Site
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
try:
from threading import local
except ImportError:
from django.utils._threading_local import local
# thread local support
_thread_locals = local()
def set_current_user(user):
"""
Assigns current user from request to thread_locals, used by
CurrentUserMiddleware.
"""
_thread_locals.user = user
def get_current_user():
"""
Returns current user, or None
"""
return getattr(_thread_locals, 'user', None)
def has_page_add_permission(request):
"""
Return true if the current user has permission to add a new page. This is
just used for general add buttons - only superuser, or user with can_add in
globalpagepermission can add page.
Special case occur when page is going to be added from add page button in
change list - then we have target and position there, so check if user can
add page under target page will occur.
"""
opts = Page._meta
if request.user.is_superuser:
return True
# if add under page
target = request.GET.get('target', None)
position = request.GET.get('position', None)
if target is not None:
try:
page = Page.objects.get(pk=target)
except Page.DoesNotExist:
return False
if (request.user.has_perm(opts.app_label + '.' + opts.get_add_permission()) and
has_global_page_permission(request, page.site_id, can_add=True)):
return True
if position in ("first-child", "last-child"):
return page.has_add_permission(request)
elif position in ("left", "right"):
if page.parent_id:
return has_generic_permission(page.parent_id, request.user, "add", page.site)
else:
from cms.utils.plugins import current_site
site = current_site(request)
if (request.user.has_perm(opts.app_label + '.' + opts.get_add_permission()) and
has_global_page_permission(request, site, can_add=True)):
return True
return False
def has_any_page_change_permissions(request):
from cms.utils.plugins import current_site
if not request.user.is_authenticated():
return False
return request.user.is_superuser or PagePermission.objects.filter(
page__site=current_site(request)
).filter((
Q(user=request.user) |
Q(group__in=request.user.groups.all())
)).exists()
def has_page_change_permission(request):
"""
Return true if the current user has permission to change this page.
To be granted this permission, you need the cms.change_page permission.
In addition, if CMS_PERMISSION is enabled you also need to either have
global can_change permission or just on this page.
"""
from cms.utils.plugins import current_site
opts = Page._meta
return request.user.is_superuser or (
request.user.has_perm(opts.app_label + '.' + opts.get_change_permission())
and (
not get_cms_setting('PERMISSION') or
has_global_page_permission(request, current_site(request),
can_change=True) or
has_any_page_change_permissions(request)))
def has_global_page_permission(request, site=None, **filters):
"""
A helper function to check for global page permissions for the current user
and site. Caches the result on a request basis, so multiple calls to this
function inside of one request/response cycle only generate one query.
:param request: the Request object
:param site: the Site object or ID
:param filters: queryset filters, e.g. ``can_add = True``
:return: ``True`` or ``False``
"""
if request.user.is_superuser:
return True
if not hasattr(request, '_cms_global_perms'):
request._cms_global_perms = {}
key = tuple((k, v) for k, v in filters.iteritems())
if site:
key = (('site', site.pk if hasattr(site, 'pk') else int(site)),) + key
if key not in request._cms_global_perms:
qs = GlobalPagePermission.objects.with_user(request.user).filter(**filters)
if site:
qs = qs.filter(Q(sites__in=[site]) | Q(sites__isnull=True))
request._cms_global_perms[key] = qs.exists()
return request._cms_global_perms[key]
def get_any_page_view_permissions(request, page):
"""
Used by the admin template tag is_restricted
"""
return PagePermission.objects.for_page(page=page).filter(can_view=True)
def get_user_permission_level(user):
"""
Returns highest user level from the page/permission hierarchy on which
user haves can_change_permission. Also takes look into user groups. Higher
level equals to lover number. Users on top of hierarchy have level 0. Level
is the same like page.level attribute.
Example:
A,W level 0
/ \
user B,GroupE level 1
/ \
C,X D,Y,W level 2
Users A, W have user level 0. GroupE and all his users have user level 1
If user D is a member of GroupE, his user level will be 1, otherwise is
2.
"""
if (user.is_superuser or
GlobalPagePermission.objects.with_can_change_permissions(user).exists()):
# those
return 0
try:
permission = PagePermission.objects.with_can_change_permissions(user).order_by('page__level')[0]
except IndexError:
# user isn't assigned to any node
raise NoPermissionsException
return permission.page.level
def get_subordinate_users(user):
"""
Returns users queryset, containing all subordinate users to given user
including users created by given user and not assigned to any page.
Not assigned users must be returned, because they shouldn't get lost, and
user should still have possibility to see them.
Only users created_by given user which are on the same, or lover level are
returned.
If user haves global permissions or is a superuser, then he can see all the
users.
This function is currently used in PagePermissionInlineAdminForm for limit
users in permission combobox.
Example:
A,W level 0
/ \
user B,GroupE level 1
Z / \
C,X D,Y,W level 2
Rules: W was created by user, Z was created by user, but is not assigned
to any page.
Will return [user, C, X, D, Y, Z]. W was created by user, but is also
assigned to higher level.
"""
# TODO: try to merge with PagePermissionManager.subordinate_to_user()
if user.is_superuser or \
GlobalPagePermission.objects.with_can_change_permissions(user):
return get_user_model().objects.all()
site = Site.objects.get_current()
page_id_allow_list = Page.permissions.get_change_permissions_id_list(user, site)
try:
user_level = get_user_permission_level(user)
except NoPermissionsException:
# no permission so only staff and no page permissions
qs = get_user_model().objects.distinct().filter(
Q(is_staff=True) &
Q(pageuser__created_by=user) &
Q(pagepermission__page=None)
)
qs = qs.exclude(pk=user.id).exclude(groups__user__pk=user.id)
return qs
# normal query
qs = get_user_model().objects.distinct().filter(
Q(is_staff=True) &
(Q(pagepermission__page__id__in=page_id_allow_list) & Q(pagepermission__page__level__gte=user_level))
| (Q(pageuser__created_by=user) & Q(pagepermission__page=None))
)
qs = qs.exclude(pk=user.id).exclude(groups__user__pk=user.id)
return qs
def get_subordinate_groups(user):
"""
Similar to get_subordinate_users, but returns queryset of Groups instead
of Users.
"""
if (user.is_superuser or
GlobalPagePermission.objects.with_can_change_permissions(user)):
return Group.objects.all()
site = Site.objects.get_current()
page_id_allow_list = Page.permissions.get_change_permissions_id_list(user, site)
try:
user_level = get_user_permission_level(user)
except NoPermissionsException:
# no permission no records
# page_id_allow_list is empty
qs = Group.objects.distinct().filter(
Q(pageusergroup__created_by=user) &
Q(pagepermission__page=None)
)
return qs
qs = Group.objects.distinct().filter(
(Q(pagepermission__page__id__in=page_id_allow_list) & Q(pagepermission__page__level__gte=user_level))
| (Q(pageusergroup__created_by=user) & Q(pagepermission__page=None))
)
return qs
def has_global_change_permissions_permission(request):
opts = GlobalPagePermission._meta
user = request.user
if user.is_superuser or (
user.has_perm(opts.app_label + '.' + opts.get_change_permission()) and
has_global_page_permission(request, can_change=True)):
return True
return False
def has_generic_permission(page_id, user, attr, site):
"""
Permission getter for single page with given id.
"""
func = getattr(Page.permissions, "get_%s_id_list" % attr)
permission = func(user, site)
return permission == Page.permissions.GRANT_ALL or page_id in permission
def get_user_sites_queryset(user):
"""
Returns queryset of all sites available for given user.
1. For superuser always returns all sites.
2. For global user returns all sites he haves in global page permissions
together with any sites he is assigned to over an page.
3. For standard user returns just sites he is assigned to over pages.
"""
qs = Site.objects.all()
if user.is_superuser:
return qs
global_ids = GlobalPagePermission.objects.with_user(user).filter(
Q(can_add=True) | Q(can_change=True)
).values_list('id', flat=True)
query = Q()
if global_ids:
query = Q(globalpagepermission__id__in=global_ids)
# haves some global permissions assigned
if not qs.filter(query).exists():
# haves global permissions, but none of sites is specified,
# so he haves access to all sites
return qs
# add some pages if he has permission to add / change them
query |= Q(Q(page__pagepermission__user=user) | Q(page__pagepermission__group__user=user)) & \
(Q(Q(page__pagepermission__can_add=True) | Q(page__pagepermission__can_change=True)))
return qs.filter(query).distinct()
def has_plugin_permission(user, plugin_type, permission_type):
"""
Checks that a user has permissions for the plugin-type given to perform
the action defined in permission_type
permission_type should be 'add', 'change' or 'delete'.
"""
plugin_class = plugin_pool.get_plugin(plugin_type)
plugin_model = plugin_class.model
plugin_opts = plugin_model._meta
return user.has_perm('%s.%s_%s' % (plugin_opts.app_label, permission_type,
plugin_opts.object_name.lower()))
|
pixbuffer/django-cms
|
cms/utils/permissions.py
|
Python
|
bsd-3-clause
| 11,836
|
from django.conf.urls import include, url
from rest_framework.routers import SimpleRouter
from rest_framework_nested.routers import NestedSimpleRouter
from olympia.bandwagon.views import CollectionViewSet, CollectionAddonViewSet
from . import views
accounts = SimpleRouter()
accounts.register(r'account', views.AccountViewSet, base_name='account')
collections = NestedSimpleRouter(accounts, r'account', lookup='user')
collections.register(r'collections', CollectionViewSet,
base_name='collection')
sub_collections = NestedSimpleRouter(collections, r'collections',
lookup='collection')
sub_collections.register('addons', CollectionAddonViewSet,
base_name='collection-addon')
notifications = NestedSimpleRouter(accounts, r'account', lookup='user')
notifications.register(r'notifications', views.AccountNotificationViewSet,
base_name='notification')
urlpatterns = [
url(r'^authenticate/$', views.AuthenticateView.as_view(),
name='accounts.authenticate'),
url(r'^login/start/$',
views.LoginStartView.as_view(),
name='accounts.login_start'),
url(r'^session/$', views.SessionView.as_view(),
name='accounts.session'),
url(r'', include(accounts.urls)),
url(r'^profile/$', views.ProfileView.as_view(), name='account-profile'),
url(r'^super-create/$', views.AccountSuperCreate.as_view(),
name='accounts.super-create'),
url(r'', include(collections.urls)),
url(r'', include(sub_collections.urls)),
url(r'', include(notifications.urls)),
]
|
tsl143/addons-server
|
src/olympia/accounts/urls.py
|
Python
|
bsd-3-clause
| 1,619
|
cases = [
('pmt.py -s 1 -n 20 populations, first without state filter',
'pmt.py -s 1 -n 20 populations'),
('pmt.py -s 2 -n 20 populations filter3, state filter limits population to 3',
'pmt.py -s 2 -n 20 populations filter3')
]
|
nfredrik/pyModelStuff
|
samples/populations/test/test_filter.py
|
Python
|
bsd-3-clause
| 247
|
from PLC.Faults import *
from PLC.Method import Method
from PLC.Parameter import Parameter, Mixed
from PLC.Filter import Filter
from PLC.SliceTags import SliceTag, SliceTags
from PLC.Persons import Person, Persons
from PLC.Sites import Site, Sites
from PLC.Nodes import Nodes
from PLC.Slices import Slice, Slices
from PLC.Auth import Auth
class GetSliceTags(Method):
"""
Returns an array of structs containing details about slice and
sliver attributes. An attribute is a sliver attribute if the
node_id field is set. If slice_tag_filter is specified and
is an array of slice attribute identifiers, or a struct of slice
attribute attributes, only slice attributes matching the filter
will be returned. If return_fields is specified, only the
specified details will be returned.
Users may only query attributes of slices or slivers of which they
are members. PIs may only query attributes of slices or slivers at
their sites, or of which they are members. Admins may query
attributes of any slice or sliver.
"""
roles = ['admin', 'pi', 'user', 'node']
accepts = [
Auth(),
Mixed([SliceTag.fields['slice_tag_id']],
Filter(SliceTag.fields)),
Parameter([str], "List of fields to return", nullok = True)
]
returns = [SliceTag.fields]
def call(self, auth, slice_tag_filter = None, return_fields = None):
# If we are not admin, make sure to only return our own slice
# and sliver attributes.
# if isinstance(self.caller, Person) and \
# 'admin' not in self.caller['roles']:
# # Get slices that we are able to view
# valid_slice_ids = self.caller['slice_ids']
# if 'pi' in self.caller['roles'] and self.caller['site_ids']:
# sites = Sites(self.api, self.caller['site_ids'])
# for site in sites:
# valid_slice_ids += site['slice_ids']
# # techs can view all slices on the nodes at their site
# if 'tech' in self.caller['roles'] and self.caller['site_ids']:
# nodes = Nodes(self.api, {'site_id': self.caller['site_ids']}, ['site_id', 'slice_ids'])
# for node in nodes:
# valid_slice_ids.extend(node['slice_ids'])
#
# if not valid_slice_ids:
# return []
#
# # Get slice attributes that we are able to view
# valid_slice_tag_ids = []
# slices = Slices(self.api, valid_slice_ids)
# for slice in slices:
# valid_slice_tag_ids += slice['slice_tag_ids']
#
# if not valid_slice_tag_ids:
# return []
#
# if slice_tag_filter is None:
# slice_tag_filter = valid_slice_tag_ids
# Must query at least slice_tag_id (see below)
if return_fields is not None and 'slice_tag_id' not in return_fields:
return_fields.append('slice_tag_id')
added_fields = True
else:
added_fields = False
slice_tags = SliceTags(self.api, slice_tag_filter, return_fields)
# Filter out slice attributes that are not viewable
# if isinstance(self.caller, Person) and \
# 'admin' not in self.caller['roles']:
# slice_tags = [slice_tag for slice_tag in slice_tags if slice_tag['slice_tag_id'] in valid_slice_tag_ids]
# Remove slice_tag_id if not specified
if added_fields:
for slice_tag in slice_tags:
if 'slice_tag_id' in slice_tag:
del slice_tag['slice_tag_id']
return slice_tags
|
dreibh/planetlab-lxc-plcapi
|
PLC/Methods/GetSliceTags.py
|
Python
|
bsd-3-clause
| 3,652
|
""" Specify the NetworkNode with its action, context-menus """
# Copyright (C) 2009-2010, Ecole Polytechnique Federale de Lausanne (EPFL) and
# University Hospital Center and University of Lausanne (UNIL-CHUV)
#
# Modified BSD License
# Standard library imports
import os
# Enthought library imports
from traits.api import Instance, Str, Any
from traitsui.api import TreeNode
from traitsui.menu import Menu, Action, Separator
# ConnectomeViewer imports
from cviewer.plugins.cff2.cnetwork import CNetwork
# Logging import
import logging
logger = logging.getLogger('root.'+__name__)
class CNetworkTreeNode(TreeNode):
# The object that contains the container ;^)
parent = Any
# the network associated with this node
node_for=[CNetwork]
# a default icons
# Name of group item icon
icon_group = Str('home.png')
# Name of leaf item icon
icon_item=Str('home.png')
# Name of opened group item icon
icon_open=Str('home.png')
# labels
label='dname'
###
# Private Traits
# activate / deactivate logic
# if the node is activated, this means that there exists a
# corresponding RenderManager instance
_ShowName = Instance(Action,
kw={'name': 'Show name',
'action': 'object.show_name',
'tooltip': 'Shows the network name'}, )
_ChangeParameters = Instance(Action,
kw={'name': 'Edge Parameters',
'action': 'object._edge_parameters',
'tooltip': 'Thresholding and Change Attributes',
'enabled_when' : 'object.loaded == True'}, )
_RenderMatrixAction = Instance(Action,
kw={'name': 'Connectome Matrix Viewer',
'action': 'object.invoke_matrix_viewer',
'tooltip':'View the connectivity matrices',
'enabled_when':'object.loaded == True'}, )
# the menu shown after right-click
menu = Instance(Menu, transient=True)
def get_children(self, object):
""" Get the object's children. """
pass
# Collate the window's views into categories.
#return object.surfaces + object.volumes + object.tracks
######################################################################
# Non-public interface
######################################################################
def _menu_default(self):
""" Standard menus for network nodes """
menu_actions = []
return Menu( *menu_actions)
|
LTS5/connectomeviewer
|
cviewer/plugins/cff2/ui/cnetwork_tree_node.py
|
Python
|
bsd-3-clause
| 2,792
|