repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
barbuza/django | refs/heads/master | django/db/models/signals.py | 399 | from django.apps import apps
from django.dispatch import Signal
from django.utils import six
class_prepared = Signal(providing_args=["class"])
class ModelSignal(Signal):
"""
Signal subclass that allows the sender to be lazily specified as a string
of the `app_label.ModelName` form.
"""
def __init__(self, *args, **kwargs):
super(ModelSignal, self).__init__(*args, **kwargs)
self.unresolved_references = {}
class_prepared.connect(self._resolve_references)
def _resolve_references(self, sender, **kwargs):
opts = sender._meta
reference = (opts.app_label, opts.object_name)
try:
receivers = self.unresolved_references.pop(reference)
except KeyError:
pass
else:
for receiver, weak, dispatch_uid in receivers:
super(ModelSignal, self).connect(
receiver, sender=sender, weak=weak, dispatch_uid=dispatch_uid
)
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
if isinstance(sender, six.string_types):
try:
app_label, model_name = sender.split('.')
except ValueError:
raise ValueError(
"Specified sender must either be a model or a "
"model name of the 'app_label.ModelName' form."
)
try:
sender = apps.get_registered_model(app_label, model_name)
except LookupError:
ref = (app_label, model_name)
refs = self.unresolved_references.setdefault(ref, [])
refs.append((receiver, weak, dispatch_uid))
return
super(ModelSignal, self).connect(
receiver, sender=sender, weak=weak, dispatch_uid=dispatch_uid
)
pre_init = ModelSignal(providing_args=["instance", "args", "kwargs"], use_caching=True)
post_init = ModelSignal(providing_args=["instance"], use_caching=True)
pre_save = ModelSignal(providing_args=["instance", "raw", "using", "update_fields"],
use_caching=True)
post_save = ModelSignal(providing_args=["instance", "raw", "created", "using", "update_fields"], use_caching=True)
pre_delete = ModelSignal(providing_args=["instance", "using"], use_caching=True)
post_delete = ModelSignal(providing_args=["instance", "using"], use_caching=True)
m2m_changed = ModelSignal(
providing_args=["action", "instance", "reverse", "model", "pk_set", "using"],
use_caching=True,
)
pre_migrate = Signal(providing_args=["app_config", "verbosity", "interactive", "using"])
post_migrate = Signal(providing_args=["app_config", "verbosity", "interactive", "using"])
|
tima/ansible | refs/heads/devel | lib/ansible/module_utils/network/dellos6/dellos6.py | 10 | #
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# Copyright (c) 2016 Dell Inc.
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.connection import exec_command
from ansible.module_utils.network.common.config import NetworkConfig, ConfigLine, ignore_line, DEFAULT_COMMENT_TOKENS
_DEVICE_CONFIGS = {}
WARNING_PROMPTS_RE = [
r"[\r\n]?\[confirm yes/no\]:\s?$",
r"[\r\n]?\[y/n\]:\s?$",
r"[\r\n]?\[yes/no\]:\s?$"
]
dellos6_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True),
'timeout': dict(type='int'),
}
dellos6_argument_spec = {
'provider': dict(type='dict', options=dellos6_provider_spec),
}
dellos6_top_spec = {
'host': dict(removed_in_version=2.9),
'port': dict(removed_in_version=2.9, type='int'),
'username': dict(removed_in_version=2.9),
'password': dict(removed_in_version=2.9, no_log=True),
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
'authorize': dict(removed_in_version=2.9, type='bool'),
'auth_pass': dict(removed_in_version=2.9, no_log=True),
'timeout': dict(removed_in_version=2.9, type='int'),
}
dellos6_argument_spec.update(dellos6_top_spec)
def check_args(module, warnings):
pass
def get_config(module, flags=None):
flags = [] if flags is None else flags
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return _DEVICE_CONFIGS[cmd]
except KeyError:
rc, out, err = exec_command(module, cmd)
if rc != 0:
module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_or_strict'))
cfg = to_text(out, errors='surrogate_or_strict').strip()
_DEVICE_CONFIGS[cmd] = cfg
return cfg
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
responses = list()
commands = to_commands(module, to_list(commands))
for cmd in commands:
cmd = module.jsonify(cmd)
rc, out, err = exec_command(module, cmd)
if check_rc and rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), rc=rc)
responses.append(to_text(out, errors='surrogate_or_strict'))
return responses
def load_config(module, commands):
rc, out, err = exec_command(module, 'configure terminal')
if rc != 0:
module.fail_json(msg='unable to enter configuration mode', err=to_text(err, errors='surrogate_or_strict'))
for command in to_list(commands):
if command == 'end':
continue
cmd = {'command': command, 'prompt': WARNING_PROMPTS_RE, 'answer': 'yes'}
rc, out, err = exec_command(module, module.jsonify(cmd))
if rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), command=command, rc=rc)
exec_command(module, 'end')
def get_sublevel_config(running_config, module):
contents = list()
current_config_contents = list()
sublevel_config = Dellos6NetworkConfig(indent=0)
obj = running_config.get_object(module.params['parents'])
if obj:
contents = obj._children
for c in contents:
if isinstance(c, ConfigLine):
current_config_contents.append(c.raw)
sublevel_config.add(current_config_contents, module.params['parents'])
return sublevel_config
def os6_parse(lines, indent=None, comment_tokens=None):
sublevel_cmds = [
re.compile(r'^vlan.*$'),
re.compile(r'^stack.*$'),
re.compile(r'^interface.*$'),
re.compile(r'datacenter-bridging.*$'),
re.compile(r'line (console|telnet|ssh).*$'),
re.compile(r'ip ssh !(server).*$'),
re.compile(r'ip (dhcp|vrf).*$'),
re.compile(r'(ip|mac|management|arp) access-list.*$'),
re.compile(r'ipv6 (dhcp|router).*$'),
re.compile(r'mail-server.*$'),
re.compile(r'vpc domain.*$'),
re.compile(r'router.*$'),
re.compile(r'route-map.*$'),
re.compile(r'policy-map.*$'),
re.compile(r'class-map match-all.*$'),
re.compile(r'captive-portal.*$'),
re.compile(r'admin-profile.*$'),
re.compile(r'link-dependency group.*$'),
re.compile(r'banner motd.*$'),
re.compile(r'openflow.*$'),
re.compile(r'support-assist.*$'),
re.compile(r'template.*$'),
re.compile(r'address-family.*$'),
re.compile(r'spanning-tree mst configuration.*$'),
re.compile(r'logging.*$'),
re.compile(r'(radius-server|tacacs-server) host.*$')]
childline = re.compile(r'^exit$')
config = list()
parent = list()
children = []
parent_match = False
for line in str(lines).split('\n'):
text = str(re.sub(r'([{};])', '', line)).strip()
cfg = ConfigLine(text)
cfg.raw = line
if not text or ignore_line(text, comment_tokens):
parent = list()
children = []
continue
else:
parent_match = False
# handle sublevel parent
for pr in sublevel_cmds:
if pr.match(line):
if len(parent) != 0:
cfg._parents.extend(parent)
parent.append(cfg)
config.append(cfg)
if children:
children.insert(len(parent) - 1, [])
children[len(parent) - 2].append(cfg)
parent_match = True
continue
# handle exit
if childline.match(line):
if children:
parent[len(children) - 1]._children.extend(children[len(children) - 1])
if len(children) > 1:
parent[len(children) - 2]._children.extend(parent[len(children) - 1]._children)
cfg._parents.extend(parent)
children.pop()
parent.pop()
if not children:
children = list()
if parent:
cfg._parents.extend(parent)
parent = list()
config.append(cfg)
# handle sublevel children
elif parent_match is False and len(parent) > 0:
if not children:
cfglist = [cfg]
children.append(cfglist)
else:
children[len(parent) - 1].append(cfg)
cfg._parents.extend(parent)
config.append(cfg)
# handle global commands
elif not parent:
config.append(cfg)
return config
class Dellos6NetworkConfig(NetworkConfig):
def load(self, contents):
self._items = os6_parse(contents, self._indent, DEFAULT_COMMENT_TOKENS)
def _diff_line(self, other, path=None):
diff = list()
for item in self.items:
if str(item) == "exit":
for diff_item in diff:
if diff_item._parents:
if item._parents == diff_item._parents:
diff.append(item)
break
else:
diff.append(item)
break
elif item not in other:
diff.append(item)
return diff
|
uberamd/NGECore2 | refs/heads/master | scripts/object/draft_schematic/armor/component/armor_core_battle_basic.py | 85615 | import sys
def setup(core, object):
return |
ProjectSWGCore/NGECore2 | refs/heads/master | scripts/object/mobile/dressed_assassin_mission_giver_reb_01.py | 85615 | import sys
def setup(core, object):
return |
Hakuba/youtube-dl | refs/heads/master | youtube_dl/extractor/collegerama.py | 17 | from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
float_or_none,
int_or_none,
sanitized_Request,
)
class CollegeRamaIE(InfoExtractor):
_VALID_URL = r'https?://collegerama\.tudelft\.nl/Mediasite/Play/(?P<id>[\da-f]+)'
_TESTS = [
{
'url': 'https://collegerama.tudelft.nl/Mediasite/Play/585a43626e544bdd97aeb71a0ec907a01d',
'md5': '481fda1c11f67588c0d9d8fbdced4e39',
'info_dict': {
'id': '585a43626e544bdd97aeb71a0ec907a01d',
'ext': 'mp4',
'title': 'Een nieuwe wereld: waarden, bewustzijn en techniek van de mensheid 2.0.',
'description': '',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 7713.088,
'timestamp': 1413309600,
'upload_date': '20141014',
},
},
{
'url': 'https://collegerama.tudelft.nl/Mediasite/Play/86a9ea9f53e149079fbdb4202b521ed21d?catalog=fd32fd35-6c99-466c-89d4-cd3c431bc8a4',
'md5': 'ef1fdded95bdf19b12c5999949419c92',
'info_dict': {
'id': '86a9ea9f53e149079fbdb4202b521ed21d',
'ext': 'wmv',
'title': '64ste Vakantiecursus: Afvalwater',
'description': 'md5:7fd774865cc69d972f542b157c328305',
'duration': 10853,
'timestamp': 1326446400,
'upload_date': '20120113',
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
player_options_request = {
"getPlayerOptionsRequest": {
"ResourceId": video_id,
"QueryString": "",
}
}
request = sanitized_Request(
'http://collegerama.tudelft.nl/Mediasite/PlayerService/PlayerService.svc/json/GetPlayerOptions',
json.dumps(player_options_request))
request.add_header('Content-Type', 'application/json')
player_options = self._download_json(request, video_id)
presentation = player_options['d']['Presentation']
title = presentation['Title']
description = presentation.get('Description')
thumbnail = None
duration = float_or_none(presentation.get('Duration'), 1000)
timestamp = int_or_none(presentation.get('UnixTime'), 1000)
formats = []
for stream in presentation['Streams']:
for video in stream['VideoUrls']:
thumbnail_url = stream.get('ThumbnailUrl')
if thumbnail_url:
thumbnail = 'http://collegerama.tudelft.nl' + thumbnail_url
format_id = video['MediaType']
if format_id == 'SS':
continue
formats.append({
'url': video['Location'],
'format_id': format_id,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'timestamp': timestamp,
'formats': formats,
}
|
cchamberlain/gyp | refs/heads/master | test/compiler-override/my_cxx.py | 980 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
print sys.argv
|
TobiasFredersdorf/RIOT | refs/heads/master | tests/lwip_sock_ip/tests/01-run.py | 21 | #!/usr/bin/env python3
# Copyright (C) 2016 Kaspar Schleiser <kaspar@schleiser.de>
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import os
import sys
from datetime import datetime
sys.path.append(os.path.join(os.environ['RIOTBASE'], 'dist/tools/testrunner'))
import testrunner
class InvalidTimeout(Exception):
pass
def _ipv6_tests(code):
return code & (1 << 6)
def _ipv4_tests(code):
return code & (1 << 4)
def testfunc(child):
child.expect(u"code (0x[0-9a-f]{2})")
code = int(child.match.group(1), base=16)
if _ipv4_tests(code):
child.expect_exact(u"Calling test_sock_ip_create4__EAFNOSUPPORT()")
child.expect_exact(u"Calling test_sock_ip_create4__EINVAL_addr()")
child.expect_exact(u"Calling test_sock_ip_create4__EINVAL_netif()")
child.expect_exact(u"Calling test_sock_ip_create4__no_endpoints()")
child.expect_exact(u"Calling test_sock_ip_create4__only_local()")
child.expect_exact(u"Calling test_sock_ip_create4__only_local_reuse_ep()")
child.expect_exact(u"Calling test_sock_ip_create4__only_remote()")
child.expect_exact(u"Calling test_sock_ip_create4__full()")
child.expect_exact(u"Calling test_sock_ip_recv4__EADDRNOTAVAIL()")
child.expect_exact(u"Calling test_sock_ip_recv4__EAGAIN()")
child.expect_exact(u"Calling test_sock_ip_recv4__ENOBUFS()")
child.expect_exact(u"Calling test_sock_ip_recv4__ETIMEDOUT()")
child.match # get to ensure program reached that point
start = datetime.now()
child.expect_exact(u" * Calling sock_ip_recv()")
child.expect(u" \\* \\(timed out with timeout (\\d+)\\)")
exp_diff = int(child.match.group(1))
stop = datetime.now()
diff = (stop - start)
diff = (diff.seconds * 1000000) + diff.microseconds
# fail within 5% of expected
if diff > (exp_diff + (exp_diff * 0.05)) or \
diff < (exp_diff - (exp_diff * 0.05)):
raise InvalidTimeout("Invalid timeout %d (expected %d)" % (diff, exp_diff));
else:
print("Timed out correctly: %d (expected %d)" % (diff, exp_diff))
child.expect_exact(u"Calling test_sock_ip_recv4__socketed()")
child.expect_exact(u"Calling test_sock_ip_recv4__socketed_with_remote()")
child.expect_exact(u"Calling test_sock_ip_recv4__unsocketed()")
child.expect_exact(u"Calling test_sock_ip_recv4__unsocketed_with_remote()")
child.expect_exact(u"Calling test_sock_ip_recv4__with_timeout()")
child.expect_exact(u"Calling test_sock_ip_recv4__non_blocking()")
child.expect_exact(u"Calling test_sock_ip_send4__EAFNOSUPPORT()")
child.expect_exact(u"Calling test_sock_ip_send4__EINVAL_addr()")
child.expect_exact(u"Calling test_sock_ip_send4__EINVAL_netif()")
child.expect_exact(u"Calling test_sock_ip_send4__ENOTCONN()")
child.expect_exact(u"Calling test_sock_ip_send4__socketed_no_local_no_netif()")
child.expect_exact(u"Calling test_sock_ip_send4__socketed_no_netif()")
child.expect_exact(u"Calling test_sock_ip_send4__socketed_no_local()")
child.expect_exact(u"Calling test_sock_ip_send4__socketed()")
child.expect_exact(u"Calling test_sock_ip_send4__socketed_other_remote()")
child.expect_exact(u"Calling test_sock_ip_send4__unsocketed_no_local_no_netif()")
child.expect_exact(u"Calling test_sock_ip_send4__unsocketed_no_netif()")
child.expect_exact(u"Calling test_sock_ip_send4__unsocketed_no_local()")
child.expect_exact(u"Calling test_sock_ip_send4__unsocketed()")
child.expect_exact(u"Calling test_sock_ip_send4__no_sock_no_netif()")
child.expect_exact(u"Calling test_sock_ip_send4__no_sock()")
if _ipv6_tests(code):
child.expect_exact(u"Calling test_sock_ip_create6__EAFNOSUPPORT()")
child.expect_exact(u"Calling test_sock_ip_create6__EINVAL_addr()")
child.expect_exact(u"Calling test_sock_ip_create6__EINVAL_netif()")
child.expect_exact(u"Calling test_sock_ip_create6__no_endpoints()")
child.expect_exact(u"Calling test_sock_ip_create6__only_local()")
child.expect_exact(u"Calling test_sock_ip_create6__only_local_reuse_ep()")
child.expect_exact(u"Calling test_sock_ip_create6__only_remote()")
child.expect_exact(u"Calling test_sock_ip_create6__full()")
child.expect_exact(u"Calling test_sock_ip_recv6__EADDRNOTAVAIL()")
child.expect_exact(u"Calling test_sock_ip_recv6__EAGAIN()")
child.expect_exact(u"Calling test_sock_ip_recv6__ENOBUFS()")
child.expect_exact(u"Calling test_sock_ip_recv6__ETIMEDOUT()")
child.match # get to ensure program reached that point
start = datetime.now()
child.expect_exact(u" * Calling sock_ip_recv()")
child.expect(u" \\* \\(timed out with timeout (\\d+)\\)")
exp_diff = int(child.match.group(1))
stop = datetime.now()
diff = (stop - start)
diff = (diff.seconds * 1000000) + diff.microseconds
# fail within 5% of expected
if diff > (exp_diff + (exp_diff * 0.05)) or \
diff < (exp_diff - (exp_diff * 0.05)):
raise InvalidTimeout("Invalid timeout %d (expected %d)" % (diff, exp_diff));
else:
print("Timed out correctly: %d (expected %d)" % (diff, exp_diff))
child.expect_exact(u"Calling test_sock_ip_recv6__socketed()")
child.expect_exact(u"Calling test_sock_ip_recv6__socketed_with_remote()")
child.expect_exact(u"Calling test_sock_ip_recv6__unsocketed()")
child.expect_exact(u"Calling test_sock_ip_recv6__unsocketed_with_remote()")
child.expect_exact(u"Calling test_sock_ip_recv6__with_timeout()")
child.expect_exact(u"Calling test_sock_ip_recv6__non_blocking()")
child.expect_exact(u"Calling test_sock_ip_send6__EAFNOSUPPORT()")
child.expect_exact(u"Calling test_sock_ip_send6__EINVAL_addr()")
child.expect_exact(u"Calling test_sock_ip_send6__EINVAL_netif()")
child.expect_exact(u"Calling test_sock_ip_send6__ENOTCONN()")
child.expect_exact(u"Calling test_sock_ip_send6__socketed_no_local_no_netif()")
child.expect_exact(u"Calling test_sock_ip_send6__socketed_no_netif()")
child.expect_exact(u"Calling test_sock_ip_send6__socketed_no_local()")
child.expect_exact(u"Calling test_sock_ip_send6__socketed()")
child.expect_exact(u"Calling test_sock_ip_send6__socketed_other_remote()")
child.expect_exact(u"Calling test_sock_ip_send6__unsocketed_no_local_no_netif()")
child.expect_exact(u"Calling test_sock_ip_send6__unsocketed_no_netif()")
child.expect_exact(u"Calling test_sock_ip_send6__unsocketed_no_local()")
child.expect_exact(u"Calling test_sock_ip_send6__unsocketed()")
child.expect_exact(u"Calling test_sock_ip_send6__no_sock_no_netif()")
child.expect_exact(u"Calling test_sock_ip_send6__no_sock()")
child.expect_exact(u"ALL TESTS SUCCESSFUL")
if __name__ == "__main__":
sys.exit(testrunner.run(testfunc))
|
2014c2g9/c2g9 | refs/heads/master | wsgi/static/reeborg/src/libraries/brython/Lib/importlib/_bootstrap.py | 101 | """Core implementation of import.
This module is NOT meant to be directly imported! It has been designed such
that it can be bootstrapped into Python as the implementation of import. As
such it requires the injection of specific modules and attributes in order to
work. One should use importlib as the public-facing version of this module.
"""
#
# IMPORTANT: Whenever making changes to this module, be sure to run
# a top-level make in order to get the frozen version of the module
# update. Not doing so, will result in the Makefile to fail for
# all others who don't have a ./python around to freeze the module
# in the early stages of compilation.
#
# See importlib._setup() for what is injected into the global namespace.
# When editing this code be aware that code executed at import time CANNOT
# reference any injected objects! This includes not only global code but also
# anything specified at the class level.
# XXX Make sure all public names have no single leading underscore and all
# others do.
# Bootstrap-related code ######################################################
_CASE_INSENSITIVE_PLATFORMS = 'win', 'cygwin', 'darwin'
def _make_relax_case():
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return b'PYTHONCASEOK' in _os.environ
else:
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return False
return _relax_case
# TODO: Expose from marshal
def _w_long(x):
"""Convert a 32-bit integer to little-endian.
XXX Temporary until marshal's long functions are exposed.
"""
x = int(x)
int_bytes = []
int_bytes.append(x & 0xFF)
int_bytes.append((x >> 8) & 0xFF)
int_bytes.append((x >> 16) & 0xFF)
int_bytes.append((x >> 24) & 0xFF)
return bytearray(int_bytes)
# TODO: Expose from marshal
def _r_long(int_bytes):
"""Convert 4 bytes in little-endian to an integer.
XXX Temporary until marshal's long function are exposed.
"""
x = int_bytes[0]
x |= int_bytes[1] << 8
x |= int_bytes[2] << 16
x |= int_bytes[3] << 24
return x
def _path_join(*path_parts):
"""Replacement for os.path.join()."""
new_parts = []
for part in path_parts:
if not part:
continue
new_parts.append(part)
if part[-1] not in path_separators:
new_parts.append(path_sep)
return ''.join(new_parts[:-1]) # Drop superfluous path separator.
def _path_split(path):
"""Replacement for os.path.split()."""
for x in reversed(path):
if x in path_separators:
sep = x
break
else:
sep = path_sep
front, _, tail = path.rpartition(sep)
return front, tail
def _path_is_mode_type(path, mode):
"""Test whether the path is the specified mode type."""
try:
stat_info = _os.stat(path)
except OSError:
return False
return (stat_info.st_mode & 0o170000) == mode
# XXX Could also expose Modules/getpath.c:isfile()
def _path_isfile(path):
"""Replacement for os.path.isfile."""
return _path_is_mode_type(path, 0o100000)
# XXX Could also expose Modules/getpath.c:isdir()
def _path_isdir(path):
"""Replacement for os.path.isdir."""
if not path:
path = _os.getcwd()
return _path_is_mode_type(path, 0o040000)
def _write_atomic(path, data, mode=0o666):
"""Best-effort function to write data to a path atomically.
Be prepared to handle a FileExistsError if concurrent writing of the
temporary file is attempted."""
# id() is used to generate a pseudo-random filename.
path_tmp = '{}.{}'.format(path, id(path))
fd = _os.open(path_tmp,
_os.O_EXCL | _os.O_CREAT | _os.O_WRONLY, mode & 0o666)
try:
# We first write data to a temporary file, and then use os.replace() to
# perform an atomic rename.
with _io.FileIO(fd, 'wb') as file:
file.write(data)
_os.replace(path_tmp, path)
except OSError:
try:
_os.unlink(path_tmp)
except OSError:
pass
raise
def _wrap(new, old):
"""Simple substitute for functools.update_wrapper."""
for replace in ['__module__', '__name__', '__qualname__', '__doc__']:
if hasattr(old, replace):
setattr(new, replace, getattr(old, replace))
new.__dict__.update(old.__dict__)
_code_type = type(_wrap.__code__)
def new_module(name):
"""Create a new module.
The module is not entered into sys.modules.
"""
return type(_io)(name)
# Module-level locking ########################################################
# A dict mapping module names to weakrefs of _ModuleLock instances
_module_locks = {}
# A dict mapping thread ids to _ModuleLock instances
_blocking_on = {}
class _DeadlockError(RuntimeError):
pass
class _ModuleLock:
"""A recursive lock implementation which is able to detect deadlocks
(e.g. thread 1 trying to take locks A then B, and thread 2 trying to
take locks B then A).
"""
def __init__(self, name):
self.lock = _thread.allocate_lock()
self.wakeup = _thread.allocate_lock()
self.name = name
self.owner = None
self.count = 0
self.waiters = 0
def has_deadlock(self):
# Deadlock avoidance for concurrent circular imports.
me = _thread.get_ident()
tid = self.owner
while True:
lock = _blocking_on.get(tid)
if lock is None:
return False
tid = lock.owner
if tid == me:
return True
def acquire(self):
"""
Acquire the module lock. If a potential deadlock is detected,
a _DeadlockError is raised.
Otherwise, the lock is always acquired and True is returned.
"""
tid = _thread.get_ident()
_blocking_on[tid] = self
try:
while True:
with self.lock:
if self.count == 0 or self.owner == tid:
self.owner = tid
self.count += 1
return True
if self.has_deadlock():
raise _DeadlockError("deadlock detected by %r" % self)
if self.wakeup.acquire(False):
self.waiters += 1
# Wait for a release() call
self.wakeup.acquire()
self.wakeup.release()
finally:
del _blocking_on[tid]
def release(self):
tid = _thread.get_ident()
with self.lock:
if self.owner != tid:
raise RuntimeError("cannot release un-acquired lock")
assert self.count > 0
self.count -= 1
if self.count == 0:
self.owner = None
if self.waiters:
self.waiters -= 1
self.wakeup.release()
def __repr__(self):
return "_ModuleLock(%r) at %d" % (self.name, id(self))
class _DummyModuleLock:
"""A simple _ModuleLock equivalent for Python builds without
multi-threading support."""
def __init__(self, name):
self.name = name
self.count = 0
def acquire(self):
self.count += 1
return True
def release(self):
if self.count == 0:
raise RuntimeError("cannot release un-acquired lock")
self.count -= 1
def __repr__(self):
return "_DummyModuleLock(%r) at %d" % (self.name, id(self))
# The following two functions are for consumption by Python/import.c.
def _get_module_lock(name):
"""Get or create the module lock for a given module name.
Should only be called with the import lock taken."""
lock = None
try:
lock = _module_locks[name]()
except KeyError:
pass
if lock is None:
if _thread is None:
lock = _DummyModuleLock(name)
else:
lock = _ModuleLock(name)
def cb(_):
del _module_locks[name]
_module_locks[name] = _weakref.ref(lock, cb)
return lock
def _lock_unlock_module(name):
"""Release the global import lock, and acquires then release the
module lock for a given module name.
This is used to ensure a module is completely initialized, in the
event it is being imported by another thread.
Should only be called with the import lock taken."""
lock = _get_module_lock(name)
_imp.release_lock()
try:
lock.acquire()
except _DeadlockError:
# Concurrent circular import, we'll accept a partially initialized
# module object.
pass
else:
lock.release()
# Frame stripping magic ###############################################
def _call_with_frames_removed(f, *args, **kwds):
"""remove_importlib_frames in import.c will always remove sequences
of importlib frames that end with a call to this function
Use it instead of a normal call in places where including the importlib
frames introduces unwanted noise into the traceback (e.g. when executing
module code)
"""
return f(*args, **kwds)
# Finder/loader utility code ###############################################
"""Magic word to reject .pyc files generated by other Python versions.
It should change for each incompatible change to the bytecode.
The value of CR and LF is incorporated so if you ever read or write
a .pyc file in text mode the magic number will be wrong; also, the
Apple MPW compiler swaps their values, botching string constants.
The magic numbers must be spaced apart at least 2 values, as the
-U interpeter flag will cause MAGIC+1 being used. They have been
odd numbers for some time now.
There were a variety of old schemes for setting the magic number.
The current working scheme is to increment the previous value by
10.
Starting with the adoption of PEP 3147 in Python 3.2, every bump in magic
number also includes a new "magic tag", i.e. a human readable string used
to represent the magic number in __pycache__ directories. When you change
the magic number, you must also set a new unique magic tag. Generally this
can be named after the Python major version of the magic number bump, but
it can really be anything, as long as it's different than anything else
that's come before. The tags are included in the following table, starting
with Python 3.2a0.
Known values:
Python 1.5: 20121
Python 1.5.1: 20121
Python 1.5.2: 20121
Python 1.6: 50428
Python 2.0: 50823
Python 2.0.1: 50823
Python 2.1: 60202
Python 2.1.1: 60202
Python 2.1.2: 60202
Python 2.2: 60717
Python 2.3a0: 62011
Python 2.3a0: 62021
Python 2.3a0: 62011 (!)
Python 2.4a0: 62041
Python 2.4a3: 62051
Python 2.4b1: 62061
Python 2.5a0: 62071
Python 2.5a0: 62081 (ast-branch)
Python 2.5a0: 62091 (with)
Python 2.5a0: 62092 (changed WITH_CLEANUP opcode)
Python 2.5b3: 62101 (fix wrong code: for x, in ...)
Python 2.5b3: 62111 (fix wrong code: x += yield)
Python 2.5c1: 62121 (fix wrong lnotab with for loops and
storing constants that should have been removed)
Python 2.5c2: 62131 (fix wrong code: for x, in ... in listcomp/genexp)
Python 2.6a0: 62151 (peephole optimizations and STORE_MAP opcode)
Python 2.6a1: 62161 (WITH_CLEANUP optimization)
Python 3000: 3000
3010 (removed UNARY_CONVERT)
3020 (added BUILD_SET)
3030 (added keyword-only parameters)
3040 (added signature annotations)
3050 (print becomes a function)
3060 (PEP 3115 metaclass syntax)
3061 (string literals become unicode)
3071 (PEP 3109 raise changes)
3081 (PEP 3137 make __file__ and __name__ unicode)
3091 (kill str8 interning)
3101 (merge from 2.6a0, see 62151)
3103 (__file__ points to source file)
Python 3.0a4: 3111 (WITH_CLEANUP optimization).
Python 3.0a5: 3131 (lexical exception stacking, including POP_EXCEPT)
Python 3.1a0: 3141 (optimize list, set and dict comprehensions:
change LIST_APPEND and SET_ADD, add MAP_ADD)
Python 3.1a0: 3151 (optimize conditional branches:
introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE)
Python 3.2a0: 3160 (add SETUP_WITH)
tag: cpython-32
Python 3.2a1: 3170 (add DUP_TOP_TWO, remove DUP_TOPX and ROT_FOUR)
tag: cpython-32
Python 3.2a2 3180 (add DELETE_DEREF)
Python 3.3a0 3190 __class__ super closure changed
Python 3.3a0 3200 (__qualname__ added)
3210 (added size modulo 2**32 to the pyc header)
Python 3.3a1 3220 (changed PEP 380 implementation)
Python 3.3a4 3230 (revert changes to implicit __class__ closure)
MAGIC must change whenever the bytecode emitted by the compiler may no
longer be understood by older implementations of the eval loop (usually
due to the addition of new opcodes).
"""
_RAW_MAGIC_NUMBER = 3230 | ord('\r') << 16 | ord('\n') << 24
_MAGIC_BYTES = bytes(_RAW_MAGIC_NUMBER >> n & 0xff for n in range(0, 25, 8))
_PYCACHE = '__pycache__'
SOURCE_SUFFIXES = ['.py'] # _setup() adds .pyw as needed.
DEBUG_BYTECODE_SUFFIXES = ['.pyc']
OPTIMIZED_BYTECODE_SUFFIXES = ['.pyo']
def cache_from_source(path, debug_override=None):
"""Given the path to a .py file, return the path to its .pyc/.pyo file.
The .py file does not need to exist; this simply returns the path to the
.pyc/.pyo file calculated as if the .py file were imported. The extension
will be .pyc unless sys.flags.optimize is non-zero, then it will be .pyo.
If debug_override is not None, then it must be a boolean and is used in
place of sys.flags.optimize.
If sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
debug = not sys.flags.optimize if debug_override is None else debug_override
if debug:
suffixes = DEBUG_BYTECODE_SUFFIXES
else:
suffixes = OPTIMIZED_BYTECODE_SUFFIXES
head, tail = _path_split(path)
base_filename, sep, _ = tail.partition('.')
tag = sys.implementation.cache_tag
if tag is None:
raise NotImplementedError('sys.implementation.cache_tag is None')
filename = ''.join([base_filename, sep, tag, suffixes[0]])
return _path_join(head, _PYCACHE, filename)
def source_from_cache(path):
"""Given the path to a .pyc./.pyo file, return the path to its .py file.
The .pyc/.pyo file does not need to exist; this simply returns the path to
the .py file calculated to correspond to the .pyc/.pyo file. If path does
not conform to PEP 3147 format, ValueError will be raised. If
sys.implementation.cache_tag is None then NotImplementedError is raised.
"""
if sys.implementation.cache_tag is None:
raise NotImplementedError('sys.implementation.cache_tag is None')
head, pycache_filename = _path_split(path)
head, pycache = _path_split(head)
if pycache != _PYCACHE:
raise ValueError('{} not bottom-level directory in '
'{!r}'.format(_PYCACHE, path))
if pycache_filename.count('.') != 2:
raise ValueError('expected only 2 dots in '
'{!r}'.format(pycache_filename))
base_filename = pycache_filename.partition('.')[0]
return _path_join(head, base_filename + SOURCE_SUFFIXES[0])
def _get_sourcefile(bytecode_path):
"""Convert a bytecode file path to a source path (if possible).
This function exists purely for backwards-compatibility for
PyImport_ExecCodeModuleWithFilenames() in the C API.
"""
if len(bytecode_path) == 0:
return None
rest, _, extension = bytecode_path.rpartition('.')
if not rest or extension.lower()[-3:-1] != 'py':
return bytecode_path
try:
source_path = source_from_cache(bytecode_path)
except (NotImplementedError, ValueError):
source_path = bytecode_path[:-1]
return source_path if _path_isfile(source_path) else bytecode_path
def _verbose_message(message, *args, verbosity=1):
"""Print the message to stderr if -v/PYTHONVERBOSE is turned on."""
if sys.flags.verbose >= verbosity:
if not message.startswith(('#', 'import ')):
message = '# ' + message
print(message.format(*args), file=sys.stderr)
def set_package(fxn):
"""Set __package__ on the returned module."""
def set_package_wrapper(*args, **kwargs):
module = fxn(*args, **kwargs)
if getattr(module, '__package__', None) is None:
module.__package__ = module.__name__
if not hasattr(module, '__path__'):
module.__package__ = module.__package__.rpartition('.')[0]
return module
_wrap(set_package_wrapper, fxn)
return set_package_wrapper
def set_loader(fxn):
"""Set __loader__ on the returned module."""
def set_loader_wrapper(self, *args, **kwargs):
module = fxn(self, *args, **kwargs)
if not hasattr(module, '__loader__'):
module.__loader__ = self
return module
_wrap(set_loader_wrapper, fxn)
return set_loader_wrapper
def module_for_loader(fxn):
"""Decorator to handle selecting the proper module for loaders.
The decorated function is passed the module to use instead of the module
name. The module passed in to the function is either from sys.modules if
it already exists or is a new module. If the module is new, then __name__
is set the first argument to the method, __loader__ is set to self, and
__package__ is set accordingly (if self.is_package() is defined) will be set
before it is passed to the decorated function (if self.is_package() does
not work for the module it will be set post-load).
If an exception is raised and the decorator created the module it is
subsequently removed from sys.modules.
The decorator assumes that the decorated function takes the module name as
the second argument.
"""
def module_for_loader_wrapper(self, fullname, *args, **kwargs):
module = sys.modules.get(fullname)
is_reload = module is not None
if not is_reload:
# This must be done before open() is called as the 'io' module
# implicitly imports 'locale' and would otherwise trigger an
# infinite loop.
module = new_module(fullname)
# This must be done before putting the module in sys.modules
# (otherwise an optimization shortcut in import.c becomes wrong)
module.__initializing__ = True
sys.modules[fullname] = module
module.__loader__ = self
try:
is_package = self.is_package(fullname)
except (ImportError, AttributeError):
pass
else:
if is_package:
module.__package__ = fullname
else:
module.__package__ = fullname.rpartition('.')[0]
else:
module.__initializing__ = True
try:
# If __package__ was not set above, __import__() will do it later.
return fxn(self, module, *args, **kwargs)
except:
if not is_reload:
del sys.modules[fullname]
raise
finally:
module.__initializing__ = False
_wrap(module_for_loader_wrapper, fxn)
return module_for_loader_wrapper
def _check_name(method):
"""Decorator to verify that the module being requested matches the one the
loader can handle.
The first argument (self) must define _name which the second argument is
compared against. If the comparison fails then ImportError is raised.
"""
def _check_name_wrapper(self, name=None, *args, **kwargs):
if name is None:
name = self.name
elif self.name != name:
raise ImportError("loader cannot handle %s" % name, name=name)
return method(self, name, *args, **kwargs)
_wrap(_check_name_wrapper, method)
return _check_name_wrapper
def _requires_builtin(fxn):
"""Decorator to verify the named module is built-in."""
def _requires_builtin_wrapper(self, fullname):
if fullname not in sys.builtin_module_names:
raise ImportError("{} is not a built-in module".format(fullname),
name=fullname)
return fxn(self, fullname)
_wrap(_requires_builtin_wrapper, fxn)
return _requires_builtin_wrapper
def _requires_frozen(fxn):
"""Decorator to verify the named module is frozen."""
def _requires_frozen_wrapper(self, fullname):
if not _imp.is_frozen(fullname):
raise ImportError("{} is not a frozen module".format(fullname),
name=fullname)
return fxn(self, fullname)
_wrap(_requires_frozen_wrapper, fxn)
return _requires_frozen_wrapper
def _find_module_shim(self, fullname):
"""Try to find a loader for the specified module by delegating to
self.find_loader()."""
# Call find_loader(). If it returns a string (indicating this
# is a namespace package portion), generate a warning and
# return None.
loader, portions = self.find_loader(fullname)
if loader is None and len(portions):
msg = "Not importing directory {}: missing __init__"
_warnings.warn(msg.format(portions[0]), ImportWarning)
return loader
# Loaders #####################################################################
class BuiltinImporter:
"""Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def module_repr(cls, module):
return "<module '{}' (built-in)>".format(module.__name__)
@classmethod
def find_module(cls, fullname, path=None):
"""Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
"""
if path is not None:
return None
return cls if _imp.is_builtin(fullname) else None
@classmethod
@set_package
@set_loader
@_requires_builtin
def load_module(cls, fullname):
"""Load a built-in module."""
is_reload = fullname in sys.modules
try:
return _call_with_frames_removed(_imp.init_builtin, fullname)
except:
if not is_reload and fullname in sys.modules:
del sys.modules[fullname]
raise
@classmethod
@_requires_builtin
def get_code(cls, fullname):
"""Return None as built-in modules do not have code objects."""
return None
@classmethod
@_requires_builtin
def get_source(cls, fullname):
"""Return None as built-in modules do not have source code."""
return None
@classmethod
@_requires_builtin
def is_package(cls, fullname):
"""Return False as built-in modules are never packages."""
return False
class FrozenImporter:
"""Meta path import for frozen modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def module_repr(cls, m):
return "<module '{}' (frozen)>".format(m.__name__)
@classmethod
def find_module(cls, fullname, path=None):
"""Find a frozen module."""
return cls if _imp.is_frozen(fullname) else None
@classmethod
@set_package
@set_loader
@_requires_frozen
def load_module(cls, fullname):
"""Load a frozen module."""
is_reload = fullname in sys.modules
try:
m = _call_with_frames_removed(_imp.init_frozen, fullname)
# Let our own module_repr() method produce a suitable repr.
del m.__file__
return m
except:
if not is_reload and fullname in sys.modules:
del sys.modules[fullname]
raise
@classmethod
@_requires_frozen
def get_code(cls, fullname):
"""Return the code object for the frozen module."""
return _imp.get_frozen_object(fullname)
@classmethod
@_requires_frozen
def get_source(cls, fullname):
"""Return None as frozen modules do not have source code."""
return None
@classmethod
@_requires_frozen
def is_package(cls, fullname):
"""Return True if the frozen module is a package."""
return _imp.is_frozen_package(fullname)
class WindowsRegistryFinder:
"""Meta path finder for modules declared in the Windows registry.
"""
REGISTRY_KEY = (
"Software\\Python\\PythonCore\\{sys_version}"
"\\Modules\\{fullname}")
REGISTRY_KEY_DEBUG = (
"Software\\Python\\PythonCore\\{sys_version}"
"\\Modules\\{fullname}\\Debug")
DEBUG_BUILD = False # Changed in _setup()
@classmethod
def _open_registry(cls, key):
try:
return _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, key)
except WindowsError:
return _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, key)
@classmethod
def _search_registry(cls, fullname):
if cls.DEBUG_BUILD:
registry_key = cls.REGISTRY_KEY_DEBUG
else:
registry_key = cls.REGISTRY_KEY
key = registry_key.format(fullname=fullname,
sys_version=sys.version[:3])
try:
with cls._open_registry(key) as hkey:
filepath = _winreg.QueryValue(hkey, "")
except WindowsError:
return None
return filepath
@classmethod
def find_module(cls, fullname, path=None):
"""Find module named in the registry."""
filepath = cls._search_registry(fullname)
if filepath is None:
return None
try:
_os.stat(filepath)
except OSError:
return None
for loader, suffixes in _get_supported_file_loaders():
if filepath.endswith(tuple(suffixes)):
return loader(fullname, filepath)
class _LoaderBasics:
"""Base class of common code needed by both SourceLoader and
SourcelessFileLoader."""
def is_package(self, fullname):
"""Concrete implementation of InspectLoader.is_package by checking if
the path returned by get_filename has a filename of '__init__.py'."""
filename = _path_split(self.get_filename(fullname))[1]
filename_base = filename.rsplit('.', 1)[0]
tail_name = fullname.rpartition('.')[2]
return filename_base == '__init__' and tail_name != '__init__'
def _bytes_from_bytecode(self, fullname, data, bytecode_path, source_stats):
"""Return the marshalled bytes from bytecode, verifying the magic
number, timestamp and source size along the way.
If source_stats is None then skip the timestamp check.
"""
magic = data[:4]
raw_timestamp = data[4:8]
raw_size = data[8:12]
if magic != _MAGIC_BYTES:
msg = 'bad magic number in {!r}: {!r}'.format(fullname, magic)
_verbose_message(msg)
raise ImportError(msg, name=fullname, path=bytecode_path)
elif len(raw_timestamp) != 4:
message = 'bad timestamp in {}'.format(fullname)
_verbose_message(message)
raise EOFError(message)
elif len(raw_size) != 4:
message = 'bad size in {}'.format(fullname)
_verbose_message(message)
raise EOFError(message)
if source_stats is not None:
try:
source_mtime = int(source_stats['mtime'])
except KeyError:
pass
else:
if _r_long(raw_timestamp) != source_mtime:
message = 'bytecode is stale for {}'.format(fullname)
_verbose_message(message)
raise ImportError(message, name=fullname,
path=bytecode_path)
try:
source_size = source_stats['size'] & 0xFFFFFFFF
except KeyError:
pass
else:
if _r_long(raw_size) != source_size:
raise ImportError(
"bytecode is stale for {}".format(fullname),
name=fullname, path=bytecode_path)
# Can't return the code object as errors from marshal loading need to
# propagate even when source is available.
return data[12:]
@module_for_loader
def _load_module(self, module, *, sourceless=False):
"""Helper for load_module able to handle either source or sourceless
loading."""
name = module.__name__
code_object = self.get_code(name)
module.__file__ = self.get_filename(name)
if not sourceless:
try:
module.__cached__ = cache_from_source(module.__file__)
except NotImplementedError:
module.__cached__ = module.__file__
else:
module.__cached__ = module.__file__
module.__package__ = name
if self.is_package(name):
module.__path__ = [_path_split(module.__file__)[0]]
else:
module.__package__ = module.__package__.rpartition('.')[0]
module.__loader__ = self
_call_with_frames_removed(exec, code_object, module.__dict__)
return module
class SourceLoader(_LoaderBasics):
def path_mtime(self, path):
"""Optional method that returns the modification time (an int) for the
specified path, where path is a str.
"""
raise NotImplementedError
def path_stats(self, path):
"""Optional method returning a metadata dict for the specified path
to by the path (str).
Possible keys:
- 'mtime' (mandatory) is the numeric timestamp of last source
code modification;
- 'size' (optional) is the size in bytes of the source code.
Implementing this method allows the loader to read bytecode files.
"""
return {'mtime': self.path_mtime(path)}
def _cache_bytecode(self, source_path, cache_path, data):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
The source path is needed in order to correctly transfer permissions
"""
# For backwards compatibility, we delegate to set_data()
return self.set_data(cache_path, data)
def set_data(self, path, data):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
"""
raise NotImplementedError
def get_source(self, fullname):
"""Concrete implementation of InspectLoader.get_source."""
import tokenize
path = self.get_filename(fullname)
try:
source_bytes = self.get_data(path)
except IOError as exc:
raise ImportError("source not available through get_data()",
name=fullname) from exc
readsource = _io.BytesIO(source_bytes).readline
try:
encoding = tokenize.detect_encoding(readsource)
except SyntaxError as exc:
raise ImportError("Failed to detect encoding",
name=fullname) from exc
newline_decoder = _io.IncrementalNewlineDecoder(None, True)
try:
return newline_decoder.decode(source_bytes.decode(encoding[0]))
except UnicodeDecodeError as exc:
raise ImportError("Failed to decode source file",
name=fullname) from exc
def get_code(self, fullname):
"""Concrete implementation of InspectLoader.get_code.
Reading of bytecode requires path_stats to be implemented. To write
bytecode, set_data must also be implemented.
"""
source_path = self.get_filename(fullname)
source_mtime = None
try:
bytecode_path = cache_from_source(source_path)
except NotImplementedError:
bytecode_path = None
else:
try:
st = self.path_stats(source_path)
except NotImplementedError:
pass
else:
source_mtime = int(st['mtime'])
try:
data = self.get_data(bytecode_path)
except IOError:
pass
else:
try:
bytes_data = self._bytes_from_bytecode(fullname, data,
bytecode_path,
st)
except (ImportError, EOFError):
pass
else:
_verbose_message('{} matches {}', bytecode_path,
source_path)
found = marshal.loads(bytes_data)
if isinstance(found, _code_type):
_imp._fix_co_filename(found, source_path)
_verbose_message('code object from {}',
bytecode_path)
return found
else:
msg = "Non-code object in {}"
raise ImportError(msg.format(bytecode_path),
name=fullname, path=bytecode_path)
source_bytes = self.get_data(source_path)
code_object = _call_with_frames_removed(compile,
source_bytes, source_path, 'exec',
dont_inherit=True)
_verbose_message('code object from {}', source_path)
if (not sys.dont_write_bytecode and bytecode_path is not None and
source_mtime is not None):
data = bytearray(_MAGIC_BYTES)
data.extend(_w_long(source_mtime))
data.extend(_w_long(len(source_bytes)))
data.extend(marshal.dumps(code_object))
try:
self._cache_bytecode(source_path, bytecode_path, data)
_verbose_message('wrote {!r}', bytecode_path)
except NotImplementedError:
pass
return code_object
def load_module(self, fullname):
"""Concrete implementation of Loader.load_module.
Requires ExecutionLoader.get_filename and ResourceLoader.get_data to be
implemented to load source code. Use of bytecode is dictated by whether
get_code uses/writes bytecode.
"""
return self._load_module(fullname)
class FileLoader:
"""Base file loader class which implements the loader protocol methods that
require file system usage."""
def __init__(self, fullname, path):
"""Cache the module name and the path to the file found by the
finder."""
self.name = fullname
self.path = path
@_check_name
def load_module(self, fullname):
"""Load a module from a file."""
# Issue #14857: Avoid the zero-argument form so the implementation
# of that form can be updated without breaking the frozen module
return super(FileLoader, self).load_module(fullname)
@_check_name
def get_filename(self, fullname):
"""Return the path to the source file as found by the finder."""
return self.path
def get_data(self, path):
"""Return the data from path as raw bytes."""
with _io.FileIO(path, 'r') as file:
return file.read()
class SourceFileLoader(FileLoader, SourceLoader):
"""Concrete implementation of SourceLoader using the file system."""
def path_stats(self, path):
"""Return the metadata for the path."""
st = _os.stat(path)
return {'mtime': st.st_mtime, 'size': st.st_size}
def _cache_bytecode(self, source_path, bytecode_path, data):
# Adapt between the two APIs
try:
mode = _os.stat(source_path).st_mode
except OSError:
mode = 0o666
# We always ensure write access so we can update cached files
# later even when the source files are read-only on Windows (#6074)
mode |= 0o200
return self.set_data(bytecode_path, data, _mode=mode)
def set_data(self, path, data, *, _mode=0o666):
"""Write bytes data to a file."""
parent, filename = _path_split(path)
path_parts = []
# Figure out what directories are missing.
while parent and not _path_isdir(parent):
parent, part = _path_split(parent)
path_parts.append(part)
# Create needed directories.
for part in reversed(path_parts):
parent = _path_join(parent, part)
try:
_os.mkdir(parent)
except FileExistsError:
# Probably another Python process already created the dir.
continue
except OSError as exc:
# Could be a permission error, read-only filesystem: just forget
# about writing the data.
_verbose_message('could not create {!r}: {!r}', parent, exc)
return
try:
_write_atomic(path, data, _mode)
_verbose_message('created {!r}', path)
except OSError as exc:
# Same as above: just don't write the bytecode.
_verbose_message('could not create {!r}: {!r}', path, exc)
class SourcelessFileLoader(FileLoader, _LoaderBasics):
"""Loader which handles sourceless file imports."""
def load_module(self, fullname):
return self._load_module(fullname, sourceless=True)
def get_code(self, fullname):
path = self.get_filename(fullname)
data = self.get_data(path)
bytes_data = self._bytes_from_bytecode(fullname, data, path, None)
found = marshal.loads(bytes_data)
if isinstance(found, _code_type):
_verbose_message('code object from {!r}', path)
return found
else:
raise ImportError("Non-code object in {}".format(path),
name=fullname, path=path)
def get_source(self, fullname):
"""Return None as there is no source code."""
return None
# Filled in by _setup().
EXTENSION_SUFFIXES = []
class ExtensionFileLoader:
"""Loader for extension modules.
The constructor is designed to work with FileFinder.
"""
def __init__(self, name, path):
self.name = name
self.path = path
@_check_name
@set_package
@set_loader
def load_module(self, fullname):
"""Load an extension module."""
is_reload = fullname in sys.modules
try:
module = _call_with_frames_removed(_imp.load_dynamic,
fullname, self.path)
_verbose_message('extension module loaded from {!r}', self.path)
if self.is_package(fullname) and not hasattr(module, '__path__'):
module.__path__ = [_path_split(self.path)[0]]
return module
except:
if not is_reload and fullname in sys.modules:
del sys.modules[fullname]
raise
def is_package(self, fullname):
"""Return True if the extension module is a package."""
file_name = _path_split(self.path)[1]
return any(file_name == '__init__' + suffix
for suffix in EXTENSION_SUFFIXES)
def get_code(self, fullname):
"""Return None as an extension module cannot create a code object."""
return None
def get_source(self, fullname):
"""Return None as extension modules have no source code."""
return None
class _NamespacePath:
"""Represents a namespace package's path. It uses the module name
to find its parent module, and from there it looks up the parent's
__path__. When this changes, the module's own path is recomputed,
using path_finder. For top-level modules, the parent module's path
is sys.path."""
def __init__(self, name, path, path_finder):
self._name = name
self._path = path
self._last_parent_path = tuple(self._get_parent_path())
self._path_finder = path_finder
def _find_parent_path_names(self):
"""Returns a tuple of (parent-module-name, parent-path-attr-name)"""
parent, dot, me = self._name.rpartition('.')
if dot == '':
# This is a top-level module. sys.path contains the parent path.
return 'sys', 'path'
# Not a top-level module. parent-module.__path__ contains the
# parent path.
return parent, '__path__'
def _get_parent_path(self):
parent_module_name, path_attr_name = self._find_parent_path_names()
return getattr(sys.modules[parent_module_name], path_attr_name)
def _recalculate(self):
# If the parent's path has changed, recalculate _path
parent_path = tuple(self._get_parent_path()) # Make a copy
if parent_path != self._last_parent_path:
loader, new_path = self._path_finder(self._name, parent_path)
# Note that no changes are made if a loader is returned, but we
# do remember the new parent path
if loader is None:
self._path = new_path
self._last_parent_path = parent_path # Save the copy
return self._path
def __iter__(self):
return iter(self._recalculate())
def __len__(self):
return len(self._recalculate())
def __repr__(self):
return "_NamespacePath({!r})".format(self._path)
def __contains__(self, item):
return item in self._recalculate()
def append(self, item):
self._path.append(item)
class NamespaceLoader:
def __init__(self, name, path, path_finder):
self._path = _NamespacePath(name, path, path_finder)
@classmethod
def module_repr(cls, module):
return "<module '{}' (namespace)>".format(module.__name__)
@module_for_loader
def load_module(self, module):
"""Load a namespace module."""
_verbose_message('namespace module loaded with path {!r}', self._path)
module.__path__ = self._path
return module
# Finders #####################################################################
class PathFinder:
"""Meta path finder for sys.path and package __path__ attributes."""
@classmethod
def invalidate_caches(cls):
"""Call the invalidate_caches() method on all path entry finders
stored in sys.path_importer_caches (where implemented)."""
for finder in sys.path_importer_cache.values():
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
@classmethod
def _path_hooks(cls, path):
"""Search sequence of hooks for a finder for 'path'.
If 'hooks' is false then use sys.path_hooks.
"""
if not sys.path_hooks:
_warnings.warn('sys.path_hooks is empty', ImportWarning)
for hook in sys.path_hooks:
try:
return hook(path)
except ImportError:
continue
else:
return None
@classmethod
def _path_importer_cache(cls, path):
"""Get the finder for the path entry from sys.path_importer_cache.
If the path entry is not in the cache, find the appropriate finder
and cache it. If no finder is available, store None.
"""
if path == '':
path = '.'
try:
finder = sys.path_importer_cache[path]
except KeyError:
finder = cls._path_hooks(path)
sys.path_importer_cache[path] = finder
return finder
@classmethod
def _get_loader(cls, fullname, path):
"""Find the loader or namespace_path for this module/package name."""
# If this ends up being a namespace package, namespace_path is
# the list of paths that will become its __path__
namespace_path = []
for entry in path:
if not isinstance(entry, (str, bytes)):
continue
finder = cls._path_importer_cache(entry)
if finder is not None:
if hasattr(finder, 'find_loader'):
loader, portions = finder.find_loader(fullname)
else:
loader = finder.find_module(fullname)
portions = []
if loader is not None:
# We found a loader: return it immediately.
return loader, namespace_path
# This is possibly part of a namespace package.
# Remember these path entries (if any) for when we
# create a namespace package, and continue iterating
# on path.
namespace_path.extend(portions)
else:
return None, namespace_path
@classmethod
def find_module(cls, fullname, path=None):
"""Find the module on sys.path or 'path' based on sys.path_hooks and
sys.path_importer_cache."""
if path is None:
path = sys.path
loader, namespace_path = cls._get_loader(fullname, path)
if loader is not None:
return loader
else:
if namespace_path:
# We found at least one namespace path. Return a
# loader which can create the namespace package.
return NamespaceLoader(fullname, namespace_path, cls._get_loader)
else:
return None
class FileFinder:
"""File-based finder.
Interactions with the file system are cached for performance, being
refreshed when the directory the finder is handling has been modified.
"""
def __init__(self, path, *loader_details):
"""Initialize with the path to search on and a variable number of
2-tuples containing the loader and the file suffixes the loader
recognizes."""
loaders = []
for loader, suffixes in loader_details:
loaders.extend((suffix, loader) for suffix in suffixes)
self._loaders = loaders
# Base (directory) path
self.path = path or '.'
self._path_mtime = -1
self._path_cache = set()
self._relaxed_path_cache = set()
def invalidate_caches(self):
"""Invalidate the directory mtime."""
self._path_mtime = -1
find_module = _find_module_shim
def find_loader(self, fullname):
"""Try to find a loader for the specified module, or the namespace
package portions. Returns (loader, list-of-portions)."""
is_namespace = False
tail_module = fullname.rpartition('.')[2]
try:
mtime = _os.stat(self.path).st_mtime
except OSError:
mtime = -1
if mtime != self._path_mtime:
self._fill_cache()
self._path_mtime = mtime
# tail_module keeps the original casing, for __file__ and friends
if _relax_case():
cache = self._relaxed_path_cache
cache_module = tail_module.lower()
else:
cache = self._path_cache
cache_module = tail_module
# Check if the module is the name of a directory (and thus a package).
if cache_module in cache:
base_path = _path_join(self.path, tail_module)
if _path_isdir(base_path):
for suffix, loader in self._loaders:
init_filename = '__init__' + suffix
full_path = _path_join(base_path, init_filename)
if _path_isfile(full_path):
return (loader(fullname, full_path), [base_path])
else:
# A namespace package, return the path if we don't also
# find a module in the next section.
is_namespace = True
# Check for a file w/ a proper suffix exists.
for suffix, loader in self._loaders:
full_path = _path_join(self.path, tail_module + suffix)
_verbose_message('trying {}'.format(full_path), verbosity=2)
if cache_module + suffix in cache:
if _path_isfile(full_path):
return (loader(fullname, full_path), [])
if is_namespace:
_verbose_message('possible namespace for {}'.format(base_path))
return (None, [base_path])
return (None, [])
def _fill_cache(self):
"""Fill the cache of potential modules and packages for this directory."""
path = self.path
try:
contents = _os.listdir(path)
except (FileNotFoundError, PermissionError, NotADirectoryError):
# Directory has either been removed, turned into a file, or made
# unreadable.
contents = []
# We store two cached versions, to handle runtime changes of the
# PYTHONCASEOK environment variable.
if not sys.platform.startswith('win'):
self._path_cache = set(contents)
else:
# Windows users can import modules with case-insensitive file
# suffixes (for legacy reasons). Make the suffix lowercase here
# so it's done once instead of for every import. This is safe as
# the specified suffixes to check against are always specified in a
# case-sensitive manner.
lower_suffix_contents = set()
for item in contents:
name, dot, suffix = item.partition('.')
if dot:
new_name = '{}.{}'.format(name, suffix.lower())
else:
new_name = name
lower_suffix_contents.add(new_name)
self._path_cache = lower_suffix_contents
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
self._relaxed_path_cache = set(fn.lower() for fn in contents)
@classmethod
def path_hook(cls, *loader_details):
"""A class method which returns a closure to use on sys.path_hook
which will return an instance using the specified loaders and the path
called on the closure.
If the path called on the closure is not a directory, ImportError is
raised.
"""
def path_hook_for_FileFinder(path):
"""Path hook for importlib.machinery.FileFinder."""
if not _path_isdir(path):
raise ImportError("only directories are supported", path=path)
return cls(path, *loader_details)
return path_hook_for_FileFinder
def __repr__(self):
return "FileFinder(%r)" % (self.path,)
# Import itself ###############################################################
class _ImportLockContext:
"""Context manager for the import lock."""
def __enter__(self):
"""Acquire the import lock."""
_imp.acquire_lock()
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Release the import lock regardless of any raised exceptions."""
_imp.release_lock()
def _resolve_name(name, package, level):
"""Resolve a relative module name to an absolute one."""
bits = package.rsplit('.', level - 1)
if len(bits) < level:
raise ValueError('attempted relative import beyond top-level package')
base = bits[0]
return '{}.{}'.format(base, name) if name else base
def _find_module(name, path):
"""Find a module's loader."""
if not sys.meta_path:
_warnings.warn('sys.meta_path is empty', ImportWarning)
for finder in sys.meta_path:
with _ImportLockContext():
loader = finder.find_module(name, path)
if loader is not None:
# The parent import may have already imported this module.
if name not in sys.modules:
return loader
else:
return sys.modules[name].__loader__
else:
return None
def _sanity_check(name, package, level):
"""Verify arguments are "sane"."""
if not isinstance(name, str):
raise TypeError("module name must be str, not {}".format(type(name)))
if level < 0:
raise ValueError('level must be >= 0')
if package:
if not isinstance(package, str):
raise TypeError("__package__ not set to a string")
elif package not in sys.modules:
msg = ("Parent module {!r} not loaded, cannot perform relative "
"import")
raise SystemError(msg.format(package))
if not name and level == 0:
raise ValueError("Empty module name")
_ERR_MSG = 'No module named {!r}'
def _find_and_load_unlocked(name, import_):
path = None
parent = name.rpartition('.')[0]
if parent:
if parent not in sys.modules:
_call_with_frames_removed(import_, parent)
# Crazy side-effects!
if name in sys.modules:
return sys.modules[name]
# Backwards-compatibility; be nicer to skip the dict lookup.
parent_module = sys.modules[parent]
try:
path = parent_module.__path__
except AttributeError:
msg = (_ERR_MSG + '; {} is not a package').format(name, parent)
raise ImportError(msg, name=name)
loader = _find_module(name, path)
if loader is None:
exc = ImportError(_ERR_MSG.format(name), name=name)
# TODO(brett): switch to a proper ModuleNotFound exception in Python
# 3.4.
exc._not_found = True
raise exc
elif name not in sys.modules:
# The parent import may have already imported this module.
loader.load_module(name)
_verbose_message('import {!r} # {!r}', name, loader)
# Backwards-compatibility; be nicer to skip the dict lookup.
module = sys.modules[name]
if parent:
# Set the module as an attribute on its parent.
parent_module = sys.modules[parent]
setattr(parent_module, name.rpartition('.')[2], module)
# Set __package__ if the loader did not.
if getattr(module, '__package__', None) is None:
try:
module.__package__ = module.__name__
if not hasattr(module, '__path__'):
module.__package__ = module.__package__.rpartition('.')[0]
except AttributeError:
pass
# Set loader if need be.
if not hasattr(module, '__loader__'):
try:
module.__loader__ = loader
except AttributeError:
pass
return module
def _find_and_load(name, import_):
"""Find and load the module, and release the import lock."""
try:
lock = _get_module_lock(name)
finally:
_imp.release_lock()
lock.acquire()
try:
return _find_and_load_unlocked(name, import_)
finally:
lock.release()
def _gcd_import(name, package=None, level=0):
"""Import and return the module based on its name, the package the call is
being made from, and the level adjustment.
This function represents the greatest common denominator of functionality
between import_module and __import__. This includes setting __package__ if
the loader did not.
"""
_sanity_check(name, package, level)
if level > 0:
name = _resolve_name(name, package, level)
_imp.acquire_lock()
if name not in sys.modules:
return _find_and_load(name, _gcd_import)
module = sys.modules[name]
if module is None:
_imp.release_lock()
message = ("import of {} halted; "
"None in sys.modules".format(name))
raise ImportError(message, name=name)
_lock_unlock_module(name)
return module
def _handle_fromlist(module, fromlist, import_):
"""Figure out what __import__ should return.
The import_ parameter is a callable which takes the name of module to
import. It is required to decouple the function from assuming importlib's
import implementation is desired.
"""
# The hell that is fromlist ...
# If a package was imported, try to import stuff from fromlist.
if hasattr(module, '__path__'):
if '*' in fromlist:
fromlist = list(fromlist)
fromlist.remove('*')
if hasattr(module, '__all__'):
fromlist.extend(module.__all__)
for x in fromlist:
if not hasattr(module, x):
from_name = '{}.{}'.format(module.__name__, x)
try:
_call_with_frames_removed(import_, from_name)
except ImportError as exc:
# Backwards-compatibility dictates we ignore failed
# imports triggered by fromlist for modules that don't
# exist.
# TODO(brett): In Python 3.4, have import raise
# ModuleNotFound and catch that.
if getattr(exc, '_not_found', False):
if exc.name == from_name:
continue
raise
return module
def _calc___package__(globals):
"""Calculate what __package__ should be.
__package__ is not guaranteed to be defined or could be set to None
to represent that its proper value is unknown.
"""
package = globals.get('__package__')
if package is None:
package = globals['__name__']
if '__path__' not in globals:
package = package.rpartition('.')[0]
return package
def _get_supported_file_loaders():
"""Returns a list of file-based module loaders.
Each item is a tuple (loader, suffixes).
"""
extensions = ExtensionFileLoader, _imp.extension_suffixes()
source = SourceFileLoader, SOURCE_SUFFIXES
bytecode = SourcelessFileLoader, BYTECODE_SUFFIXES
return [extensions, source, bytecode]
def __import__(name, globals=None, locals=None, fromlist=(), level=0):
"""Import a module.
The 'globals' argument is used to infer where the import is occuring from
to handle relative imports. The 'locals' argument is ignored. The
'fromlist' argument specifies what should exist as attributes on the module
being imported (e.g. ``from module import <fromlist>``). The 'level'
argument represents the package location to import from in a relative
import (e.g. ``from ..pkg import mod`` would have a 'level' of 2).
"""
if level == 0:
module = _gcd_import(name)
else:
globals_ = globals if globals is not None else {}
package = _calc___package__(globals_)
module = _gcd_import(name, package, level)
if not fromlist:
# Return up to the first dot in 'name'. This is complicated by the fact
# that 'name' may be relative.
if level == 0:
return _gcd_import(name.partition('.')[0])
elif not name:
return module
else:
# Figure out where to slice the module's name up to the first dot
# in 'name'.
cut_off = len(name) - len(name.partition('.')[0])
# Slice end needs to be positive to alleviate need to special-case
# when ``'.' not in name``.
return sys.modules[module.__name__[:len(module.__name__)-cut_off]]
else:
return _handle_fromlist(module, fromlist, _gcd_import)
def _setup(sys_module, _imp_module):
"""Setup importlib by importing needed built-in modules and injecting them
into the global namespace.
As sys is needed for sys.modules access and _imp is needed to load built-in
modules, those two modules must be explicitly passed in.
"""
global _imp, sys, BYTECODE_SUFFIXES
_imp = _imp_module
sys = sys_module
if sys.flags.optimize:
BYTECODE_SUFFIXES = OPTIMIZED_BYTECODE_SUFFIXES
else:
BYTECODE_SUFFIXES = DEBUG_BYTECODE_SUFFIXES
module_type = type(sys)
for name, module in sys.modules.items():
if isinstance(module, module_type):
if not hasattr(module, '__loader__'):
if name in sys.builtin_module_names:
module.__loader__ = BuiltinImporter
elif _imp.is_frozen(name):
module.__loader__ = FrozenImporter
self_module = sys.modules[__name__]
for builtin_name in ('_io', '_warnings', 'builtins', 'marshal'):
if builtin_name not in sys.modules:
builtin_module = BuiltinImporter.load_module(builtin_name)
else:
builtin_module = sys.modules[builtin_name]
setattr(self_module, builtin_name, builtin_module)
os_details = ('posix', ['/']), ('nt', ['\\', '/']), ('os2', ['\\', '/'])
for builtin_os, path_separators in os_details:
# Assumption made in _path_join()
assert all(len(sep) == 1 for sep in path_separators)
path_sep = path_separators[0]
if builtin_os in sys.modules:
os_module = sys.modules[builtin_os]
break
else:
try:
os_module = BuiltinImporter.load_module(builtin_os)
# TODO: rip out os2 code after 3.3 is released as per PEP 11
if builtin_os == 'os2' and 'EMX GCC' in sys.version:
path_sep = path_separators[1]
break
except ImportError:
continue
else:
raise ImportError('importlib requires posix or nt')
try:
thread_module = BuiltinImporter.load_module('_thread')
except ImportError:
# Python was built without threads
thread_module = None
weakref_module = BuiltinImporter.load_module('_weakref')
if builtin_os == 'nt':
winreg_module = BuiltinImporter.load_module('winreg')
setattr(self_module, '_winreg', winreg_module)
setattr(self_module, '_os', os_module)
setattr(self_module, '_thread', thread_module)
setattr(self_module, '_weakref', weakref_module)
setattr(self_module, 'path_sep', path_sep)
setattr(self_module, 'path_separators', set(path_separators))
# Constants
setattr(self_module, '_relax_case', _make_relax_case())
EXTENSION_SUFFIXES.extend(_imp.extension_suffixes())
if builtin_os == 'nt':
SOURCE_SUFFIXES.append('.pyw')
if '_d.pyd' in EXTENSION_SUFFIXES:
WindowsRegistryFinder.DEBUG_BUILD = True
def _install(sys_module, _imp_module):
"""Install importlib as the implementation of import."""
_setup(sys_module, _imp_module)
supported_loaders = _get_supported_file_loaders()
sys.path_hooks.extend([FileFinder.path_hook(*supported_loaders)])
sys.meta_path.append(BuiltinImporter)
sys.meta_path.append(FrozenImporter)
if _os.__name__ == 'nt':
sys.meta_path.append(WindowsRegistryFinder)
sys.meta_path.append(PathFinder)
|
markovg/nest-simulator | refs/heads/master | topology/examples/conncon_sources.py | 16 | # -*- coding: utf-8 -*-
#
# conncon_sources.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module Example
Create two 30x30 layers of iaf_psc_alpha neurons,
connect with convergent projection and rectangular mask,
visualize connection from target perspective.
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
'''
import nest
import nest.topology as topo
import pylab
pylab.ion()
nest.ResetKernel()
nest.set_verbosity('M_WARNING')
# create two test layers
a = topo.CreateLayer({'columns': 30, 'rows': 30, 'extent': [3.0, 3.0],
'elements': 'iaf_psc_alpha', 'edge_wrap': True})
b = topo.CreateLayer({'columns': 30, 'rows': 30, 'extent': [3.0, 3.0],
'elements': 'iaf_psc_alpha', 'edge_wrap': True})
topo.ConnectLayers(a, b, {'connection_type': 'convergent',
'mask': {'rectangular': {'lower_left': [-0.2, -0.5],
'upper_right': [0.2, 0.5]}},
'kernel': 0.5,
'weights': {'uniform': {'min': 0.5, 'max': 2.0}},
'delays': 1.0})
pylab.clf()
# plot sources of neurons in different grid locations
for tgt_pos in [[15, 15], [0, 0]]:
# obtain node id for center
tgt = topo.GetElement(b, tgt_pos)
# obtain list of outgoing connections for ctr
# int() required to cast numpy.int64
spos = tuple(zip(*[topo.GetPosition([int(conn[0])])[0] for conn in
nest.GetConnections(target=tgt)]))
# scatter-plot
pylab.scatter(spos[0], spos[1], 20, zorder=10)
# mark sender position with transparent red circle
ctrpos = pylab.array(topo.GetPosition(tgt)[0])
pylab.gca().add_patch(pylab.Circle(ctrpos, radius=0.1, zorder=99,
fc='r', alpha=0.4, ec='none'))
# mark mask position with open red rectangle
pylab.gca().add_patch(
pylab.Rectangle(ctrpos - (0.2, 0.5), 0.4, 1.0, zorder=1,
fc='none', ec='r', lw=3))
# mark layer edge
pylab.gca().add_patch(pylab.Rectangle((-1.5, -1.5), 3.0, 3.0, zorder=1,
fc='none', ec='k', lw=3))
# beautify
pylab.axes().set_xticks(pylab.arange(-1.5, 1.55, 0.5))
pylab.axes().set_yticks(pylab.arange(-1.5, 1.55, 0.5))
pylab.grid(True)
pylab.axis([-2.0, 2.0, -2.0, 2.0])
pylab.axes().set_aspect('equal', 'box')
pylab.title('Connection sources')
|
SaganBolliger/nupic | refs/heads/master | tests/swarming/nupic/swarming/experiments/smart_speculation_temporal/description.py | 32 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalNextStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': { u'A': { 'fieldname': u'daynight',
'n': 300,
'name': u'daynight',
'type': 'SDRCategoryEncoder',
'w': 21},
u'B': { 'fieldname': u'daynight',
'n': 300,
'name': u'daynight',
'type': 'SDRCategoryEncoder',
'w': 21},
u'C': { 'fieldname': u'precip',
'n': 300,
'name': u'precip',
'type': 'SDRCategoryEncoder',
'w': 21},
u'D': { 'clipInput': True,
'fieldname': u'visitor_winloss',
'maxval': 0.78600000000000003,
'minval': 0.0,
'n': 150,
'name': u'visitor_winloss',
'type': 'AdaptiveScalarEncoder',
'w': 21},
u'E': { 'clipInput': True,
'fieldname': u'home_winloss',
'maxval': 0.69999999999999996,
'minval': 0.0,
'n': 150,
'name': u'home_winloss',
'type': 'AdaptiveScalarEncoder',
'w': 21},
u'F': { 'dayOfWeek': (7, 1),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
u'G': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (7, 1),
'type': 'DateEncoder'},
u'pred': { 'clipInput': True,
'fieldname': u'attendance',
'maxval': 36067,
'minval': 0,
'n': 150,
'name': u'attendance',
'type': 'AdaptiveScalarEncoder',
'w': 21}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 1.0,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 15,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupicengine/cluster/database/StreamDef.json.
#
'dataset' : { u'info': u'baseball benchmark test',
u'streams': [ { u'columns': [ u'daynight',
u'precip',
u'home_winloss',
u'visitor_winloss',
u'attendance',
u'timestamp'],
u'info': u'OAK01.csv',
u'source': u'file://extra/baseball_stadium/OAK01reformatted.csv'}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
#'iterationCount' : ITERATION_COUNT,
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'attendance', inferenceElement=InferenceElement.prediction,
metric='aae', params={'window': 1000}),
MetricSpec(field=u'attendance', inferenceElement=InferenceElement.prediction,
metric='trivial_aae', params={'window': 1000}),
MetricSpec(field=u'attendance', inferenceElement=InferenceElement.prediction,
metric='nupicScore_scalar', params={'frequencyWindow': 1000, 'movingAverageWindow': 1000}),
MetricSpec(field=u'attendance', inferenceElement=InferenceElement.prediction,
metric='nupicScore_scalar', params={'frequencyWindow': 1000})
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
|
ReganBell/QReview | refs/heads/master | networkx/linalg/__init__.py | 10 | from networkx.linalg.attrmatrix import *
import networkx.linalg.attrmatrix
from networkx.linalg.spectrum import *
import networkx.linalg.spectrum
from networkx.linalg.graphmatrix import *
import networkx.linalg.graphmatrix
from networkx.linalg.laplacianmatrix import *
import networkx.linalg.laplacianmatrix
from networkx.linalg.algebraicconnectivity import *
|
powellc/beltbadgers | refs/heads/master | beltbadgers/apps/belts/context_processors.py | 2 | from dojo.models import Student
def load_student(request):
try:
stu = Student.objects.get(user=request.user)
except:
stu = None
return {'student': stu} |
KarenKawaii/2.7-python-dev-KarenKawaii | refs/heads/master | pythonexercises/introduction/hello.py | 1 | # -*- coding: utf-8 -*-
"""
This module is a Hello World.
"""
print "Hello World"
|
cmtm/networkx | refs/heads/master | networkx/algorithms/assortativity/connectivity.py | 7 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2011 by
# Jordi Torrents <jtorrents@milnou.net>
# Aric Hagberg <hagberg@lanl.gov>
# All rights reserved.
# BSD license.
#
#
# Authors: Jordi Torrents <jtorrents@milnou.net>
# Aric Hagberg <hagberg@lanl.gov>
from __future__ import division
from collections import defaultdict
import networkx as nx
__all__ = ['average_degree_connectivity',
'k_nearest_neighbors']
def average_degree_connectivity(G, source="in+out", target="in+out",
nodes=None, weight=None):
r"""Compute the average degree connectivity of graph.
The average degree connectivity is the average nearest neighbor degree of
nodes with degree k. For weighted graphs, an analogous measure can
be computed using the weighted average neighbors degree defined in
[1]_, for a node `i`, as
.. math::
k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j
where `s_i` is the weighted degree of node `i`,
`w_{ij}` is the weight of the edge that links `i` and `j`,
and `N(i)` are the neighbors of node `i`.
Parameters
----------
G : NetworkX graph
source : "in"|"out"|"in+out" (default:"in+out")
Directed graphs only. Use "in"- or "out"-degree for source node.
target : "in"|"out"|"in+out" (default:"in+out"
Directed graphs only. Use "in"- or "out"-degree for target node.
nodes : list or iterable (optional)
Compute neighbor connectivity for these nodes. The default is all
nodes.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used as a weight.
If None, then each edge has weight 1.
Returns
-------
d : dict
A dictionary keyed by degree k with the value of average connectivity.
Raises
------
ValueError
If either `source` or `target` are not one of 'in',
'out', or 'in+out'.
Examples
--------
>>> G=nx.path_graph(4)
>>> G.edge[1][2]['weight'] = 3
>>> nx.k_nearest_neighbors(G)
{1: 2.0, 2: 1.5}
>>> nx.k_nearest_neighbors(G, weight='weight')
{1: 2.0, 2: 1.75}
See also
--------
neighbors_average_degree
Notes
-----
This algorithm is sometimes called "k nearest neighbors" and is also
available as `k_nearest_neighbors`.
References
----------
.. [1] A. Barrat, M. Barthélemy, R. Pastor-Satorras, and A. Vespignani,
"The architecture of complex weighted networks".
PNAS 101 (11): 3747–3752 (2004).
"""
# First, determine the type of neighbors and the type of degree to use.
if G.is_directed():
if source not in ('in', 'out', 'in+out'):
raise ValueError('source must be one of "in", "out", or "in+out"')
if target not in ('in', 'out', 'in+out'):
raise ValueError('target must be one of "in", "out", or "in+out"')
direction = {'out': G.out_degree,
'in': G.in_degree,
'in+out': G.degree}
neighbor_funcs = {'out': G.successors,
'in': G.predecessors,
'in+out': G.neighbors}
source_degree = direction[source]
target_degree = direction[target]
neighbors = neighbor_funcs[source]
# `reverse` indicates whether to look at the in-edge when
# computing the weight of an edge.
reverse = (source == 'in')
else:
source_degree = G.degree
target_degree = G.degree
neighbors = G.neighbors
reverse = False
dsum = defaultdict(int)
dnorm = defaultdict(int)
# Check if `source_nodes` is actually a single node in the graph.
source_nodes = source_degree(nodes)
if nodes in G:
source_nodes = [(nodes, source_degree(nodes))]
for n, k in source_nodes:
nbrdeg = target_degree(neighbors(n))
if weight is None:
s = sum(d for n, d in nbrdeg)
else: # weight nbr degree by weight of (n,nbr) edge
if reverse:
s = sum(G[nbr][n].get(weight, 1) * d for nbr, d in nbrdeg)
else:
s = sum(G[n][nbr].get(weight, 1) * d for nbr, d in nbrdeg)
dnorm[k] += source_degree(n, weight=weight)
dsum[k] += s
# normalize
dc = {}
for k, avg in dsum.items():
dc[k] = avg
norm = dnorm[k]
if avg > 0 and norm > 0:
dc[k] /= norm
return dc
k_nearest_neighbors = average_degree_connectivity
|
nimbusproject/kazoo | refs/heads/master | kazoo/recipe/__init__.py | 82 | |
PCManticore/argus-ci | refs/heads/master | ci/tests.py | 2 | # Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from argus.backends.heat import heat_backend
from argus.backends.tempest import manager
from argus.backends.tempest import cloud as tempest_cloud_backend
from argus.backends.tempest import tempest_backend
from argus.introspection.cloud import windows as introspection
from argus.recipes.cloud import windows as recipe
from argus.scenarios import base
from argus.scenarios.cloud import windows as windows_scenarios
from argus.tests.cloud import smoke
from argus.tests.cloud.windows import test_smoke
from argus import util
def _availability_zones():
api_manager = manager.APIManager()
try:
zones = api_manager.availability_zone_client.list_availability_zones()
info = zones['availabilityZoneInfo']
return {zone['zoneName'] for zone in info}
finally:
api_manager.cleanup_credentials()
AVAILABILITY_ZONES = _availability_zones()
class BaseWindowsScenario(base.BaseScenario):
backend_type = tempest_backend.BaseWindowsTempestBackend
introspection_type = introspection.InstanceIntrospection
recipe_type = recipe.CloudbaseinitRecipe
service_type = 'http'
userdata = None
metadata = {}
class ScenarioSmoke(BaseWindowsScenario):
test_classes = (test_smoke.TestSmoke, )
class ScenarioSmokeHeat(BaseWindowsScenario):
test_classes = (test_smoke.TestSmoke, test_smoke.TestHeatUserdata)
backend_type = heat_backend.WindowsHeatBackend
userdata = util.get_resource('windows/test_heat.ps1')
class ScenarioMultipartSmoke(BaseWindowsScenario):
test_classes = (test_smoke.TestScriptsUserdataSmoke,
smoke.TestSetTimezone)
recipe_type = recipe.CloudbaseinitScriptRecipe
userdata = util.get_resource('windows/multipart_userdata')
class ScenarioMultipartSmokeWindowsPartTwo(BaseWindowsScenario):
test_classes = (smoke.TestSetHostname,
smoke.TestSetTimezone,
smoke.TestPowershellMultipartX86TxtExists,
smoke.TestNoError)
userdata = util.get_resource('windows/multipart_userdata_part_two')
class ScenarioUserAlreadyCreated(BaseWindowsScenario):
test_classes = (test_smoke.TestSmoke, )
recipe_type = recipe.CloudbaseinitCreateUserRecipe
class ScenarioGenericSmoke(BaseWindowsScenario):
test_classes = (test_smoke.TestEC2Userdata, test_smoke.TestSmoke)
userdata = util.get_resource('windows/ec2script')
metadata = {"admin_pass": "PASsw0r4&!="}
class ScenarioSmokeRescue(BaseWindowsScenario):
backend_type = tempest_cloud_backend.RescueWindowsBackend
test_classes = (smoke.TestPasswordPostedRescueSmoke,
smoke.TestNoError)
metadata = {"admin_pass": "PASsw0r4&!="}
class ScenarioCloudstackSmokeUpdatePassword(
BaseWindowsScenario,
windows_scenarios.CloudstackWindowsScenario):
test_classes = (smoke.TestCloudstackUpdatePasswordSmoke,
smoke.TestNoError)
recipe_type = recipe.CloudbaseinitCloudstackRecipe
service_type = 'cloudstack'
metadata = {"admin_pass": "PASsw0r4&!="}
class ScenarioCloudstackMetadata(
BaseWindowsScenario,
windows_scenarios.CloudstackWindowsScenario):
test_classes = (test_smoke.TestSmoke, )
recipe_type = recipe.CloudbaseinitCloudstackRecipe
service_type = 'cloudstack'
class ScenarioEC2Metadata(BaseWindowsScenario,
windows_scenarios.EC2WindowsScenario):
test_classes = (test_smoke.TestSmoke, )
recipe_type = recipe.CloudbaseinitEC2Recipe
service_type = 'ec2'
class ScenarioMaasMetadata(BaseWindowsScenario,
windows_scenarios.MaasWindowsScenario):
test_classes = (test_smoke.TestSmoke, )
recipe_type = recipe.CloudbaseinitMaasRecipe
service_type = 'maas'
class ScenarioWinRMPlugin(BaseWindowsScenario):
# Test for for checking that a fix for
# https://bugs.launchpad.net/cloudbase-init/+bug/1433174 works.
test_classes = (smoke.TestPasswordMetadataSmoke,
smoke.TestNoError,
test_smoke.TestCertificateWinRM)
recipe_type = recipe.CloudbaseinitWinrmRecipe
metadata = {"admin_pass": "PASsw0r4&!="}
userdata = util.get_certificate()
class ScenarioX509PublicKeys(BaseWindowsScenario,
windows_scenarios.HTTPKeysWindowsScenario):
test_classes = (smoke.TestNoError,
smoke.TestPublicKeys,
test_smoke.TestCertificateWinRM)
recipe_type = recipe.CloudbaseinitKeysRecipe
metadata = {"admin_pass": "PASsw0r4&!="}
service_type = 'http'
class ScenarioNextLogonAlwaysChange(BaseWindowsScenario):
recipe_type = recipe.AlwaysChangeLogonPasswordRecipe
test_classes = (test_smoke.TestNextLogonPassword,
smoke.TestNoError)
class ScenarioNextLogonOnMetadataOnly(BaseWindowsScenario):
recipe_type = recipe.ClearPasswordLogonRecipe
test_classes = (test_smoke.TestNextLogonPassword,
smoke.TestNoError)
metadata = {"admin_pass": "PASsw0r4&!="}
class ScenarioLocalScripts(BaseWindowsScenario):
test_classes = (test_smoke.TestSmoke,
test_smoke.TestLocalScripts)
recipe_type = recipe.CloudbaseinitLocalScriptsRecipe
@unittest.skipIf('configdrive_vfat_drive' not in AVAILABILITY_ZONES,
'Needs special availability zone')
class ScenarioSmokeConfigdriveVfatDrive(BaseWindowsScenario):
test_classes = (test_smoke.TestSmoke, )
service_type = 'configdrive'
availability_zone = 'configdrive_vfat_drive'
@unittest.skipIf('configdrive_vfat_cdrom' not in AVAILABILITY_ZONES,
'Needs special availability zone')
class ScenarioSmokeConfigdriveVfatCdrom(BaseWindowsScenario):
test_classes = (test_smoke.TestSmoke, )
service_type = 'configdrive'
availability_zone = 'configdrive_vfat_cdrom'
@unittest.skipIf('configdrive_iso9660_drive' not in AVAILABILITY_ZONES,
'Needs special availability zone')
class ScenarioSmokeConfigdriveIso9660Drive(BaseWindowsScenario):
test_classes = (test_smoke.TestSmoke, )
service_type = 'configdrive'
availability_zone = 'configdrive_iso9660_drive'
@unittest.skipIf('configdrive_iso9660_cdrom' not in AVAILABILITY_ZONES,
'Needs special availability zone')
class ScenarioSmokeConfigdriveIso9660Cdrom(BaseWindowsScenario):
test_classes = (test_smoke.TestSmoke, )
service_type = 'configdrive'
availability_zone = 'configdrive_iso9660_cdrom'
@unittest.skipIf('static_network' not in AVAILABILITY_ZONES,
'Needs special availability zone')
class ScenarioNetworkConfig(BaseWindowsScenario):
backend_type = tempest_cloud_backend.NetworkWindowsBackend
test_classes = (smoke.TestStaticNetwork, )
availability_zone = 'static_network'
|
yitian134/chromium | refs/heads/master | ppapi/generators/idl_node.py | 11 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Nodes for PPAPI IDL AST"""
#
# IDL Node
#
# IDL Node defines the IDLAttribute and IDLNode objects which are constructed
# by the parser as it processes the various 'productions'. The IDLAttribute
# objects are assigned to the IDLNode's property dictionary instead of being
# applied as children of The IDLNodes, so they do not exist in the final tree.
# The AST of IDLNodes is the output from the parsing state and will be used
# as the source data by the various generators.
#
import hashlib
import sys
from idl_log import ErrOut, InfoOut, WarnOut
from idl_propertynode import IDLPropertyNode
from idl_namespace import IDLNamespace
from idl_release import IDLRelease, IDLReleaseMap
# IDLAttribute
#
# A temporary object used by the parsing process to hold an Extended Attribute
# which will be passed as a child to a standard IDLNode.
#
class IDLAttribute(object):
def __init__(self, name, value):
self.cls = 'ExtAttribute'
self.name = name
self.value = value
def __str__(self):
return '%s=%s' % (self.name, self.value)
#
# IDLNode
#
# This class implements the AST tree, providing the associations between
# parents and children. It also contains a namepsace and propertynode to
# allow for look-ups. IDLNode is derived from IDLRelease, so it is
# version aware.
#
class IDLNode(IDLRelease):
# Set of object IDLNode types which have a name and belong in the namespace.
NamedSet = set(['Enum', 'EnumItem', 'File', 'Function', 'Interface',
'Member', 'Param', 'Struct', 'Type', 'Typedef'])
show_versions = False
def __init__(self, cls, filename, lineno, pos, children=None):
# Initialize with no starting or ending Version
IDLRelease.__init__(self, None, None)
self.cls = cls
self.lineno = lineno
self.pos = pos
self.filename = filename
self.hashes = {}
self.deps = {}
self.errors = 0
self.namespace = None
self.typelist = None
self.parent = None
self.property_node = IDLPropertyNode()
# self.children is a list of children ordered as defined
self.children = []
# Process the passed in list of children, placing ExtAttributes into the
# property dictionary, and nodes into the local child list in order. In
# addition, add nodes to the namespace if the class is in the NamedSet.
if not children: children = []
for child in children:
if child.cls == 'ExtAttribute':
self.SetProperty(child.name, child.value)
else:
self.AddChild(child)
#
# String related functions
#
#
# Return a string representation of this node
def __str__(self):
name = self.GetName()
ver = IDLRelease.__str__(self)
if name is None: name = ''
if not IDLNode.show_versions: ver = ''
return '%s(%s%s)' % (self.cls, name, ver)
# Return file and line number for where node was defined
def Location(self):
return '%s(%d)' % (self.filename, self.lineno)
# Log an error for this object
def Error(self, msg):
self.errors += 1
ErrOut.LogLine(self.filename, self.lineno, 0, ' %s %s' %
(str(self), msg))
if self.lineno == 46: raise Exception("huh?")
# Log a warning for this object
def Warning(self, msg):
WarnOut.LogLine(self.filename, self.lineno, 0, ' %s %s' %
(str(self), msg))
def GetName(self):
return self.GetProperty('NAME')
def GetNameVersion(self):
name = self.GetProperty('NAME', default='')
ver = IDLRelease.__str__(self)
return '%s%s' % (name, ver)
# Dump this object and its children
def Dump(self, depth=0, comments=False, out=sys.stdout):
if self.cls in ['Comment', 'Copyright']:
is_comment = True
else:
is_comment = False
# Skip this node if it's a comment, and we are not printing comments
if not comments and is_comment: return
tab = ''.rjust(depth * 2)
if is_comment:
out.write('%sComment\n' % tab)
for line in self.GetName().split('\n'):
out.write('%s "%s"\n' % (tab, line))
else:
out.write('%s%s\n' % (tab, self))
properties = self.property_node.GetPropertyList()
if properties:
out.write('%s Properties\n' % tab)
for p in properties:
if is_comment and p == 'NAME':
# Skip printing the name for comments, since we printed above already
continue
out.write('%s %s : %s\n' % (tab, p, self.GetProperty(p)))
for child in self.children:
child.Dump(depth+1, comments=comments, out=out)
#
# Search related functions
#
# Check if node is of a given type
def IsA(self, *typelist):
if self.cls in typelist: return True
return False
# Get a list of objects for this key
def GetListOf(self, *keys):
out = []
for child in self.children:
if child.cls in keys: out.append(child)
return out
def GetOneOf(self, *keys):
out = self.GetListOf(*keys)
if out: return out[0]
return None
def SetParent(self, parent):
self.property_node.AddParent(parent)
self.parent = parent
def AddChild(self, node):
node.SetParent(self)
self.children.append(node)
# Get a list of all children
def GetChildren(self):
return self.children
# Get a list of all children of a given version
def GetChildrenVersion(self, version):
out = []
for child in self.children:
if child.IsVersion(version): out.append(child)
return out
# Get a list of all children in a given range
def GetChildrenRange(self, vmin, vmax):
out = []
for child in self.children:
if child.IsRange(vmin, vmax): out.append(child)
return out
def FindVersion(self, name, version):
node = self.namespace.FindNode(name, version)
if not node and self.parent:
node = self.parent.FindVersion(name, version)
return node
def FindRange(self, name, vmin, vmax):
nodes = self.namespace.FindNodes(name, vmin, vmax)
if not nodes and self.parent:
nodes = self.parent.FindVersion(name, vmin, vmax)
return nodes
def GetType(self, release):
if not self.typelist: return None
return self.typelist.FindRelease(release)
def GetHash(self, release):
hashval = self.hashes.get(release, None)
if hashval is None:
hashval = hashlib.sha1()
hashval.update(self.cls)
for key in self.property_node.GetPropertyList():
val = self.GetProperty(key)
hashval.update('%s=%s' % (key, str(val)))
typeref = self.GetType(release)
if typeref:
hashval.update(typeref.GetHash(release))
for child in self.GetChildren():
if child.IsA('Copyright', 'Comment', 'Label'): continue
if not child.IsRelease(release):
continue
hashval.update( child.GetHash(release) )
self.hashes[release] = hashval
return hashval.hexdigest()
def GetDeps(self, release):
deps = self.deps.get(release, None)
if deps is None:
deps = set([self])
for child in self.GetChildren():
deps |= child.GetDeps(release)
typeref = self.GetType(release)
if typeref: deps |= typeref.GetDeps(release)
self.deps[release] = deps
return deps
def GetVersion(self, release):
filenode = self.GetProperty('FILE')
if not filenode:
return None
return filenode.release_map.GetVersion(release)
def GetRelease(self, version):
filenode = self.GetProperty('FILE')
if not filenode:
return None
return filenode.release_map.GetRelease(version)
def GetUniqueReleases(self, releases):
# Given a list of global release, return a subset of releases
# for this object that change.
last_hash = None
builds = []
filenode = self.GetProperty('FILE')
file_releases = filenode.release_map.GetReleases()
# Generate a set of unique releases for this object based on versions
# available in this file's release labels.
for rel in file_releases:
# Check if this object is valid for the release in question.
if not self.IsRelease(rel): continue
# Only add it if the hash is different.
cur_hash = self.GetHash(rel)
if last_hash != cur_hash:
builds.append(rel)
last_hash = cur_hash
# Remap the requested releases to releases in the unique build set to
# use first available release names and remove duplicates.
# UNIQUE VERSION: 'M13', 'M14', 'M17'
# REQUESTED RANGE: 'M15', 'M16', 'M17', 'M18'
# REMAP RESULT: 'M14', 'M17'
out_list = []
build_len = len(builds)
build_index = 0
rel_len = len(releases)
rel_index = 0
while build_index < build_len and rel_index < rel_len:
while rel_index < rel_len and releases[rel_index] < builds[build_index]:
rel_index = rel_index + 1
# If we've reached the end of the request list, we must be done
if rel_index == rel_len:
break
# Check this current request
cur = releases[rel_index]
while build_index < build_len and cur >= builds[build_index]:
build_index = build_index + 1
out_list.append(builds[build_index - 1])
rel_index = rel_index + 1
return out_list
def SetProperty(self, name, val):
self.property_node.SetProperty(name, val)
def GetProperty(self, name, default=None):
return self.property_node.GetProperty(name, default)
def Traverse(self, data, func):
func(self, data)
for child in self.children:
child.Traverse(data, func)
#
# IDLFile
#
# A specialized version of IDLNode which tracks errors and warnings.
#
class IDLFile(IDLNode):
def __init__(self, name, children, errors=0):
attrs = [IDLAttribute('NAME', name),
IDLAttribute('ERRORS', errors)]
if not children: children = []
IDLNode.__init__(self, 'File', name, 1, 0, attrs + children)
self.release_map = IDLReleaseMap([('M13', 1.0)])
#
# Tests
#
def StringTest():
errors = 0
name_str = 'MyName'
text_str = 'MyNode(%s)' % name_str
name_node = IDLAttribute('NAME', name_str)
node = IDLNode('MyNode', 'no file', 1, 0, [name_node])
if node.GetName() != name_str:
ErrOut.Log('GetName returned >%s< not >%s<' % (node.GetName(), name_str))
errors += 1
if node.GetProperty('NAME') != name_str:
ErrOut.Log('Failed to get name property.')
errors += 1
if str(node) != text_str:
ErrOut.Log('str() returned >%s< not >%s<' % (str(node), text_str))
errors += 1
if not errors: InfoOut.Log('Passed StringTest')
return errors
def ChildTest():
errors = 0
child = IDLNode('child', 'no file', 1, 0)
parent = IDLNode('parent', 'no file', 1, 0, [child])
if child.parent != parent:
ErrOut.Log('Failed to connect parent.')
errors += 1
if [child] != parent.GetChildren():
ErrOut.Log('Failed GetChildren.')
errors += 1
if child != parent.GetOneOf('child'):
ErrOut.Log('Failed GetOneOf(child)')
errors += 1
if parent.GetOneOf('bogus'):
ErrOut.Log('Failed GetOneOf(bogus)')
errors += 1
if not parent.IsA('parent'):
ErrOut.Log('Expecting parent type')
errors += 1
parent = IDLNode('parent', 'no file', 1, 0, [child, child])
if [child, child] != parent.GetChildren():
ErrOut.Log('Failed GetChildren2.')
errors += 1
if not errors: InfoOut.Log('Passed ChildTest')
return errors
def Main():
errors = StringTest()
errors += ChildTest()
if errors:
ErrOut.Log('IDLNode failed with %d errors.' % errors)
return -1
return 0
if __name__ == '__main__':
sys.exit(Main())
|
shipci/boto | refs/heads/develop | boto/s3/tagging.py | 236 | from boto import handler
import xml.sax
class Tag(object):
def __init__(self, key=None, value=None):
self.key = key
self.value = value
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Key':
self.key = value
elif name == 'Value':
self.value = value
def to_xml(self):
return '<Tag><Key>%s</Key><Value>%s</Value></Tag>' % (
self.key, self.value)
def __eq__(self, other):
return (self.key == other.key and self.value == other.value)
class TagSet(list):
def startElement(self, name, attrs, connection):
if name == 'Tag':
tag = Tag()
self.append(tag)
return tag
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
def add_tag(self, key, value):
tag = Tag(key, value)
self.append(tag)
def to_xml(self):
xml = '<TagSet>'
for tag in self:
xml += tag.to_xml()
xml += '</TagSet>'
return xml
class Tags(list):
"""A container for the tags associated with a bucket."""
def startElement(self, name, attrs, connection):
if name == 'TagSet':
tag_set = TagSet()
self.append(tag_set)
return tag_set
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
def to_xml(self):
xml = '<Tagging>'
for tag_set in self:
xml += tag_set.to_xml()
xml +='</Tagging>'
return xml
def add_tag_set(self, tag_set):
self.append(tag_set)
|
agoose77/hivesystem | refs/heads/master | manual/chess/tut-drone-2.py | 1 | import bee
import dragonfly
from dragonfly.commandhive import commandhive, commandapp
from components.drones.keyboardmove import keyboardmove
from components.drones.chessprocessor import chessprocessor
from components.drones.chesskeeper import chesskeeper
from components.drones.chessboard import chessboard
from components.drones.movereporter import movereporter
from direct.showbase.ShowBase import taskMgr
from panda3d.core import getModelPath
import os
getModelPath().prependPath(os.getcwd())
from bee import hivemodule
class myapp(commandapp):
def on_tick(self):
taskMgr.step()
taskMgr.step()
class myhive(commandhive):
_hivecontext = hivemodule.appcontext(myapp)
keyboardmove("White")
keyboardmove("Black")
chessprocessor()
chesskeeper()
chessboard()
movereporter()
m = myhive().getinstance()
m.build("m")
m.place()
m.close()
m.init()
m.run()
|
ConeyLiu/spark | refs/heads/master | python/pyspark/streaming/listener.py | 75 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__all__ = ["StreamingListener"]
class StreamingListener(object):
def __init__(self):
pass
def onStreamingStarted(self, streamingStarted):
"""
Called when the streaming has been started.
"""
pass
def onReceiverStarted(self, receiverStarted):
"""
Called when a receiver has been started
"""
pass
def onReceiverError(self, receiverError):
"""
Called when a receiver has reported an error
"""
pass
def onReceiverStopped(self, receiverStopped):
"""
Called when a receiver has been stopped
"""
pass
def onBatchSubmitted(self, batchSubmitted):
"""
Called when a batch of jobs has been submitted for processing.
"""
pass
def onBatchStarted(self, batchStarted):
"""
Called when processing of a batch of jobs has started.
"""
pass
def onBatchCompleted(self, batchCompleted):
"""
Called when processing of a batch of jobs has completed.
"""
pass
def onOutputOperationStarted(self, outputOperationStarted):
"""
Called when processing of a job of a batch has started.
"""
pass
def onOutputOperationCompleted(self, outputOperationCompleted):
"""
Called when processing of a job of a batch has completed
"""
pass
class Java:
implements = ["org.apache.spark.streaming.api.java.PythonStreamingListener"]
|
felixma/nova | refs/heads/master | nova/api/openstack/compute/legacy_v2/contrib/cells.py | 17 | # Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The cells extension."""
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_utils import strutils
from oslo_utils import timeutils
import six
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.cells import rpcapi as cells_rpcapi
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova import rpc
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
authorize = extensions.extension_authorizer('compute', 'cells')
def _filter_keys(item, keys):
"""Filters all model attributes except for keys
item is a dict
"""
return {k: v for k, v in six.iteritems(item) if k in keys}
def _fixup_cell_info(cell_info, keys):
"""If the transport_url is present in the cell, derive username,
rpc_host, and rpc_port from it.
"""
if 'transport_url' not in cell_info:
return
# Disassemble the transport URL
transport_url = cell_info.pop('transport_url')
try:
transport_url = rpc.get_transport_url(transport_url)
except messaging.InvalidTransportURL:
# Just go with None's
for key in keys:
cell_info.setdefault(key, None)
return
if not transport_url.hosts:
return
transport_host = transport_url.hosts[0]
transport_field_map = {'rpc_host': 'hostname', 'rpc_port': 'port'}
for key in keys:
if key in cell_info:
continue
transport_field = transport_field_map.get(key, key)
cell_info[key] = getattr(transport_host, transport_field)
def _scrub_cell(cell, detail=False):
keys = ['name', 'username', 'rpc_host', 'rpc_port']
if detail:
keys.append('capabilities')
cell_info = _filter_keys(cell, keys + ['transport_url'])
_fixup_cell_info(cell_info, keys)
cell_info['type'] = 'parent' if cell['is_parent'] else 'child'
return cell_info
class Controller(object):
"""Controller for Cell resources."""
def __init__(self, ext_mgr):
self.cells_rpcapi = cells_rpcapi.CellsAPI()
self.ext_mgr = ext_mgr
def _get_cells(self, ctxt, req, detail=False):
"""Return all cells."""
# Ask the CellsManager for the most recent data
items = self.cells_rpcapi.get_cell_info_for_neighbors(ctxt)
items = common.limited(items, req)
items = [_scrub_cell(item, detail=detail) for item in items]
return dict(cells=items)
@common.check_cells_enabled
def index(self, req):
"""Return all cells in brief."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req)
@common.check_cells_enabled
def detail(self, req):
"""Return all cells in detail."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req, detail=True)
@common.check_cells_enabled
def info(self, req):
"""Return name and capabilities for this cell."""
context = req.environ['nova.context']
authorize(context)
cell_capabs = {}
my_caps = CONF.cells.capabilities
for cap in my_caps:
key, value = cap.split('=')
cell_capabs[key] = value
cell = {'name': CONF.cells.name,
'type': 'self',
'rpc_host': None,
'rpc_port': 0,
'username': None,
'capabilities': cell_capabs}
return dict(cell=cell)
@common.check_cells_enabled
def capacities(self, req, id=None):
"""Return capacities for a given cell or all cells."""
# TODO(kaushikc): return capacities as a part of cell info and
# cells detail calls in v2.1, along with capabilities
if not self.ext_mgr.is_loaded('os-cell-capacities'):
raise exc.HTTPNotFound()
context = req.environ['nova.context']
authorize(context)
try:
capacities = self.cells_rpcapi.get_capacities(context,
cell_name=id)
except exception.CellNotFound:
msg = (_("Cell %(id)s not found.") % {'id': id})
raise exc.HTTPNotFound(explanation=msg)
return dict(cell={"capacities": capacities})
@common.check_cells_enabled
def show(self, req, id):
"""Return data about the given cell name. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
try:
cell = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@common.check_cells_enabled
def delete(self, req, id):
"""Delete a child or parent cell entry. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
authorize(context, action="delete")
# NOTE(eliqiao): back-compatible with db layer hard-code admin
# permission checks.
nova_context.require_admin_context(context)
try:
num_deleted = self.cells_rpcapi.cell_delete(context, id)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
if num_deleted == 0:
raise exc.HTTPNotFound()
return {}
def _validate_cell_name(self, cell_name):
"""Validate cell name is not empty and doesn't contain '!',
'.' or '@'.
"""
if not cell_name:
msg = _("Cell name cannot be empty")
raise exc.HTTPBadRequest(explanation=msg)
if '!' in cell_name or '.' in cell_name or '@' in cell_name:
msg = _("Cell name cannot contain '!', '.' or '@'")
raise exc.HTTPBadRequest(explanation=msg)
def _validate_cell_type(self, cell_type):
"""Validate cell_type is 'parent' or 'child'."""
if cell_type not in ['parent', 'child']:
msg = _("Cell type must be 'parent' or 'child'")
raise exc.HTTPBadRequest(explanation=msg)
def _normalize_cell(self, cell, existing=None):
"""Normalize input cell data. Normalizations include:
* Converting cell['type'] to is_parent boolean.
* Merging existing transport URL with transport information.
"""
# Start with the cell type conversion
if 'type' in cell:
self._validate_cell_type(cell['type'])
cell['is_parent'] = cell['type'] == 'parent'
del cell['type']
# Avoid cell type being overwritten to 'child'
elif existing:
cell['is_parent'] = existing['is_parent']
else:
cell['is_parent'] = False
# Now we disassemble the existing transport URL...
transport_url = existing.get('transport_url') if existing else None
transport_url = rpc.get_transport_url(transport_url)
if 'rpc_virtual_host' in cell:
transport_url.virtual_host = cell.pop('rpc_virtual_host')
if not transport_url.hosts:
transport_url.hosts.append(messaging.TransportHost())
transport_host = transport_url.hosts[0]
if cell.get('rpc_port') is not None:
try:
cell['rpc_port'] = int(cell['rpc_port'])
except ValueError:
raise exc.HTTPBadRequest(
explanation=_('rpc_port must be integer'))
# Copy over the input fields
transport_field_map = {
'username': 'username',
'password': 'password',
'hostname': 'rpc_host',
'port': 'rpc_port',
}
for key, input_field in transport_field_map.items():
# Only override the value if we're given an override
if input_field in cell:
setattr(transport_host, key, cell.pop(input_field))
# Now set the transport URL
cell['transport_url'] = str(transport_url)
@common.check_cells_enabled
def create(self, req, body):
"""Create a child cell entry."""
context = req.environ['nova.context']
authorize(context)
authorize(context, action="create")
# NOTE(eliqiao): back-compatible with db layer hard-code admin
# permission checks.
nova_context.require_admin_context(context)
if 'cell' not in body:
msg = _("No cell information in request")
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
if 'name' not in cell:
msg = _("No cell name in request")
raise exc.HTTPBadRequest(explanation=msg)
self._validate_cell_name(cell['name'])
self._normalize_cell(cell)
try:
cell = self.cells_rpcapi.cell_create(context, cell)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@common.check_cells_enabled
def update(self, req, id, body):
"""Update a child cell entry. 'id' is the cell name to update."""
context = req.environ['nova.context']
authorize(context)
authorize(context, action="update")
# NOTE(eliqiao): back-compatible with db layer hard-code admin
# permission checks.
nova_context.require_admin_context(context)
if 'cell' not in body:
msg = _("No cell information in request")
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
cell.pop('id', None)
if 'name' in cell:
self._validate_cell_name(cell['name'])
try:
# NOTE(Vek): There is a race condition here if multiple
# callers are trying to update the cell
# information simultaneously. Since this
# operation is administrative in nature, and
# will be going away in the future, I don't see
# it as much of a problem...
existing = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound:
raise exc.HTTPNotFound()
self._normalize_cell(cell, existing)
try:
cell = self.cells_rpcapi.cell_update(context, id, cell)
except exception.CellNotFound:
raise exc.HTTPNotFound()
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@common.check_cells_enabled
def sync_instances(self, req, body):
"""Tell all cells to sync instance info."""
context = req.environ['nova.context']
authorize(context)
authorize(context, action="sync_instances")
project_id = body.pop('project_id', None)
deleted = body.pop('deleted', False)
updated_since = body.pop('updated_since', None)
if body:
msg = _("Only 'updated_since', 'project_id' and 'deleted' are "
"understood.")
raise exc.HTTPBadRequest(explanation=msg)
if isinstance(deleted, six.string_types):
try:
deleted = strutils.bool_from_string(deleted, strict=True)
except ValueError as err:
raise exc.HTTPBadRequest(explanation=six.text_type(err))
if updated_since:
try:
timeutils.parse_isotime(updated_since)
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
self.cells_rpcapi.sync_instances(context, project_id=project_id,
updated_since=updated_since, deleted=deleted)
class Cells(extensions.ExtensionDescriptor):
"""Enables cells-related functionality such as adding neighbor cells,
listing neighbor cells, and getting the capabilities of the local cell.
"""
name = "Cells"
alias = "os-cells"
namespace = "http://docs.openstack.org/compute/ext/cells/api/v1.1"
updated = "2013-05-14T00:00:00Z"
def get_resources(self):
coll_actions = {
'detail': 'GET',
'info': 'GET',
'sync_instances': 'POST',
'capacities': 'GET',
}
memb_actions = {
'capacities': 'GET',
}
res = extensions.ResourceExtension('os-cells',
Controller(self.ext_mgr), collection_actions=coll_actions,
member_actions=memb_actions)
return [res]
|
proxysh/Safejumper-for-Mac | refs/heads/master | buildlinux/env32/lib/python2.7/site-packages/pyasn1/codec/__init__.py | 3653 | # This file is necessary to make this directory a package.
|
Cl3MM/metagoofil | refs/heads/master | hachoir_metadata/misc.py | 16 | from hachoir_metadata.metadata import RootMetadata, registerExtractor
from hachoir_metadata.safe import fault_tolerant
from hachoir_parser.container import SwfFile
from hachoir_parser.misc import TorrentFile, TrueTypeFontFile, OLE2_File, PcfFile
from hachoir_core.field import isString
from hachoir_core.error import warning
from hachoir_parser import guessParser
from hachoir_metadata.setter import normalizeString
class TorrentMetadata(RootMetadata):
KEY_TO_ATTR = {
u"announce": "url",
u"comment": "comment",
u"creation_date": "creation_date",
}
INFO_TO_ATTR = {
u"length": "file_size",
u"name": "filename",
}
def extract(self, torrent):
for field in torrent[0]:
self.processRoot(field)
@fault_tolerant
def processRoot(self, field):
if field.name in self.KEY_TO_ATTR:
key = self.KEY_TO_ATTR[field.name]
value = field.value
setattr(self, key, value)
elif field.name == "info" and "value" in field:
for field in field["value"]:
self.processInfo(field)
@fault_tolerant
def processInfo(self, field):
if field.name in self.INFO_TO_ATTR:
key = self.INFO_TO_ATTR[field.name]
value = field.value
setattr(self, key, value)
elif field.name == "piece_length":
self.comment = "Piece length: %s" % field.display
class TTF_Metadata(RootMetadata):
NAMEID_TO_ATTR = {
0: "copyright", # Copyright notice
3: "title", # Unique font identifier
5: "version", # Version string
8: "author", # Manufacturer name
11: "url", # URL Vendor
14: "copyright", # License info URL
}
def extract(self, ttf):
if "header" in ttf:
self.extractHeader(ttf["header"])
if "names" in ttf:
self.extractNames(ttf["names"])
@fault_tolerant
def extractHeader(self, header):
self.creation_date = header["created"].value
self.last_modification = header["modified"].value
self.comment = u"Smallest readable size in pixels: %s pixels" % header["lowest"].value
self.comment = u"Font direction: %s" % header["font_dir"].display
@fault_tolerant
def extractNames(self, names):
offset = names["offset"].value
for header in names.array("header"):
key = header["nameID"].value
foffset = offset + header["offset"].value
field = names.getFieldByAddress(foffset*8)
if not field or not isString(field):
continue
value = field.value
if key not in self.NAMEID_TO_ATTR:
continue
key = self.NAMEID_TO_ATTR[key]
if key == "version" and value.startswith(u"Version "):
# "Version 1.2" => "1.2"
value = value[8:]
setattr(self, key, value)
class OLE2_Metadata(RootMetadata):
SUMMARY_ID_TO_ATTR = {
2: "title", # Title
3: "title", # Subject
4: "author",
6: "comment",
8: "author", # Last saved by
12: "creation_date",
13: "last_modification",
14: "nb_page",
18: "producer",
}
IGNORE_SUMMARY = set((
1, # Code page
))
DOC_SUMMARY_ID_TO_ATTR = {
3: "title", # Subject
14: "author", # Manager
}
IGNORE_DOC_SUMMARY = set((
1, # Code page
))
def extract(self, ole2):
self._extract(ole2)
def _extract(self, fieldset):
try:
fieldset._feedAll()
except StopIteration:
pass
if "root[0]" in fieldset:
self._extract(self.getFragment(fieldset["root[0]"]))
doc_summary = self.getField(fieldset, "doc_summary[0]")
if doc_summary:
self.useSummary(doc_summary, True)
word_doc = self.getField(fieldset, "word_doc[0]")
if word_doc:
self.useWordDocument(word_doc)
summary = self.getField(fieldset, "summary[0]")
if summary:
self.useSummary(summary, False)
table = self.getField(fieldset, "table1[0]")
if table:
self.useTable(table)
def getFragment(self, frag):
stream = frag.getSubIStream()
ministream = guessParser(stream)
if not ministream:
warning("Unable to create the OLE2 mini stream parser!")
return frag
return ministream
def getField(self, fieldset, name):
# _feedAll() is needed to make sure that we get all fragments
# eg. summary[0], summary[1], ..., summary[n]
try:
fieldset._feedAll()
except StopIteration:
pass
if name not in fieldset:
return None
field = fieldset[name]
return self.getFragment(field)
@fault_tolerant
def useSummary(self, summary, is_doc_summary):
if "os" in summary:
self.os = summary["os"].display
if "section[0]" not in summary:
return
summary = summary["section[0]"]
for property in summary.array("property_index"):
self.useProperty(summary, property, is_doc_summary)
@fault_tolerant
def useWordDocument(self, doc):
self.comment = "Encrypted: %s" % doc["FIB/fEncrypted"].value
@fault_tolerant
def useTable(self,table):
if 'SttbSavedBy' in table:
arr = list(table['SttbSavedBy'].array('string'))
for i in xrange(0, len(arr),2):
self.revision_history = "Revision #%d: Author '%s', file '%s'"%(i//2, arr[i].value , arr[i+1].value)
@fault_tolerant
def useProperty(self, summary, property, is_doc_summary):
field = summary.getFieldByAddress(property["offset"].value*8)
if not field \
or "value" not in field:
return
field = field["value"]
if not field.hasValue():
return
# Get value
value = field.value
if isinstance(value, (str, unicode)):
value = normalizeString(value)
if not value:
return
# Get property identifier
prop_id = property["id"].value
if is_doc_summary:
id_to_attr = self.DOC_SUMMARY_ID_TO_ATTR
ignore = self.IGNORE_DOC_SUMMARY
else:
id_to_attr = self.SUMMARY_ID_TO_ATTR
ignore = self.IGNORE_SUMMARY
if prop_id in ignore:
return
# Get Hachoir metadata key
try:
key = id_to_attr[prop_id]
use_prefix = False
except LookupError:
key = "comment"
use_prefix = True
if use_prefix:
prefix = property["id"].display
if (prefix in ("TotalEditingTime", "LastPrinted")) \
and (not field):
# Ignore null time delta
return
value = "%s: %s" % (prefix, value)
else:
if (key == "last_modification") and (not field):
# Ignore null timestamp
return
setattr(self, key, value)
class PcfMetadata(RootMetadata):
PROP_TO_KEY = {
'CHARSET_REGISTRY': 'charset',
'COPYRIGHT': 'copyright',
'WEIGHT_NAME': 'font_weight',
'FOUNDRY': 'author',
'FONT': 'title',
'_XMBDFED_INFO': 'producer',
}
def extract(self, pcf):
if "properties" in pcf:
self.useProperties(pcf["properties"])
def useProperties(self, properties):
last = properties["total_str_length"]
offset0 = last.address + last.size
for index in properties.array("property"):
# Search name and value
value = properties.getFieldByAddress(offset0+index["value_offset"].value*8)
if not value:
continue
value = value.value
if not value:
continue
name = properties.getFieldByAddress(offset0+index["name_offset"].value*8)
if not name:
continue
name = name.value
if name not in self.PROP_TO_KEY:
warning("Skip %s=%r" % (name, value))
continue
key = self.PROP_TO_KEY[name]
setattr(self, key, value)
class SwfMetadata(RootMetadata):
def extract(self, swf):
self.height = swf["rect/ymax"].value # twips
self.width = swf["rect/xmax"].value # twips
self.format_version = "flash version %s" % swf["version"].value
self.frame_rate = swf["frame_rate"].value
self.comment = "Frame count: %s" % swf["frame_count"].value
registerExtractor(TorrentFile, TorrentMetadata)
registerExtractor(TrueTypeFontFile, TTF_Metadata)
registerExtractor(OLE2_File, OLE2_Metadata)
registerExtractor(PcfFile, PcfMetadata)
registerExtractor(SwfFile, SwfMetadata)
|
gnuhub/intellij-community | refs/heads/master | plugins/hg4idea/testData/bin/mercurial/sshpeer.py | 90 | # sshpeer.py - ssh repository proxy class for mercurial
#
# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import re
from i18n import _
import util, error, wireproto
class remotelock(object):
def __init__(self, repo):
self.repo = repo
def release(self):
self.repo.unlock()
self.repo = None
def __del__(self):
if self.repo:
self.release()
def _serverquote(s):
'''quote a string for the remote shell ... which we assume is sh'''
if re.match('[a-zA-Z0-9@%_+=:,./-]*$', s):
return s
return "'%s'" % s.replace("'", "'\\''")
class sshpeer(wireproto.wirepeer):
def __init__(self, ui, path, create=False):
self._url = path
self.ui = ui
self.pipeo = self.pipei = self.pipee = None
u = util.url(path, parsequery=False, parsefragment=False)
if u.scheme != 'ssh' or not u.host or u.path is None:
self._abort(error.RepoError(_("couldn't parse location %s") % path))
self.user = u.user
if u.passwd is not None:
self._abort(error.RepoError(_("password in URL not supported")))
self.host = u.host
self.port = u.port
self.path = u.path or "."
sshcmd = self.ui.config("ui", "ssh", "ssh")
remotecmd = self.ui.config("ui", "remotecmd", "hg")
args = util.sshargs(sshcmd, self.host, self.user, self.port)
if create:
cmd = '%s %s %s' % (sshcmd, args,
util.shellquote("%s init %s" %
(_serverquote(remotecmd), _serverquote(self.path))))
ui.note(_('running %s\n') % cmd)
res = util.system(cmd)
if res != 0:
self._abort(error.RepoError(_("could not create remote repo")))
self.validate_repo(ui, sshcmd, args, remotecmd)
def url(self):
return self._url
def validate_repo(self, ui, sshcmd, args, remotecmd):
# cleanup up previous run
self.cleanup()
cmd = '%s %s %s' % (sshcmd, args,
util.shellquote("%s -R %s serve --stdio" %
(_serverquote(remotecmd), _serverquote(self.path))))
ui.note(_('running %s\n') % cmd)
cmd = util.quotecommand(cmd)
# while self.subprocess isn't used, having it allows the subprocess to
# to clean up correctly later
self.pipeo, self.pipei, self.pipee, self.subprocess = util.popen4(cmd)
# skip any noise generated by remote shell
self._callstream("hello")
r = self._callstream("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
lines = ["", "dummy"]
max_noise = 500
while lines[-1] and max_noise:
l = r.readline()
self.readerr()
if lines[-1] == "1\n" and l == "\n":
break
if l:
ui.debug("remote: ", l)
lines.append(l)
max_noise -= 1
else:
self._abort(error.RepoError(_('no suitable response from '
'remote hg')))
self._caps = set()
for l in reversed(lines):
if l.startswith("capabilities:"):
self._caps.update(l[:-1].split(":")[1].split())
break
def _capabilities(self):
return self._caps
def readerr(self):
while True:
size = util.fstat(self.pipee).st_size
if size == 0:
break
s = self.pipee.read(size)
if not s:
break
for l in s.splitlines():
self.ui.status(_("remote: "), l, '\n')
def _abort(self, exception):
self.cleanup()
raise exception
def cleanup(self):
if self.pipeo is None:
return
self.pipeo.close()
self.pipei.close()
try:
# read the error descriptor until EOF
for l in self.pipee:
self.ui.status(_("remote: "), l)
except (IOError, ValueError):
pass
self.pipee.close()
__del__ = cleanup
def _callstream(self, cmd, **args):
self.ui.debug("sending %s command\n" % cmd)
self.pipeo.write("%s\n" % cmd)
_func, names = wireproto.commands[cmd]
keys = names.split()
wireargs = {}
for k in keys:
if k == '*':
wireargs['*'] = args
break
else:
wireargs[k] = args[k]
del args[k]
for k, v in sorted(wireargs.iteritems()):
self.pipeo.write("%s %d\n" % (k, len(v)))
if isinstance(v, dict):
for dk, dv in v.iteritems():
self.pipeo.write("%s %d\n" % (dk, len(dv)))
self.pipeo.write(dv)
else:
self.pipeo.write(v)
self.pipeo.flush()
return self.pipei
def _call(self, cmd, **args):
self._callstream(cmd, **args)
return self._recv()
def _callpush(self, cmd, fp, **args):
r = self._call(cmd, **args)
if r:
return '', r
while True:
d = fp.read(4096)
if not d:
break
self._send(d)
self._send("", flush=True)
r = self._recv()
if r:
return '', r
return self._recv(), ''
def _decompress(self, stream):
return stream
def _recv(self):
l = self.pipei.readline()
if l == '\n':
err = []
while True:
line = self.pipee.readline()
if line == '-\n':
break
err.extend([line])
if len(err) > 0:
# strip the trailing newline added to the last line server-side
err[-1] = err[-1][:-1]
self._abort(error.OutOfBandError(*err))
self.readerr()
try:
l = int(l)
except ValueError:
self._abort(error.ResponseError(_("unexpected response:"), l))
return self.pipei.read(l)
def _send(self, data, flush=False):
self.pipeo.write("%d\n" % len(data))
if data:
self.pipeo.write(data)
if flush:
self.pipeo.flush()
self.readerr()
def lock(self):
self._call("lock")
return remotelock(self)
def unlock(self):
self._call("unlock")
def addchangegroup(self, cg, source, url, lock=None):
'''Send a changegroup to the remote server. Return an integer
similar to unbundle(). DEPRECATED, since it requires locking the
remote.'''
d = self._call("addchangegroup")
if d:
self._abort(error.RepoError(_("push refused: %s") % d))
while True:
d = cg.read(4096)
if not d:
break
self.pipeo.write(d)
self.readerr()
self.pipeo.flush()
self.readerr()
r = self._recv()
if not r:
return 1
try:
return int(r)
except ValueError:
self._abort(error.ResponseError(_("unexpected response:"), r))
instance = sshpeer
|
40223211/cadpbtest-0420 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/imp.py | 637 | """This module provides the components needed to build your own __import__
function. Undocumented functions are obsolete.
In most cases it is preferred you consider using the importlib module's
functionality over this module.
"""
# (Probably) need to stay in _imp
from _imp import (lock_held, acquire_lock, release_lock,
get_frozen_object, is_frozen_package,
init_builtin, init_frozen, is_builtin, is_frozen,
_fix_co_filename)
try:
from _imp import load_dynamic
except ImportError:
# Platform doesn't support dynamic loading.
load_dynamic = None
# Directly exposed by this module
from importlib._bootstrap import new_module
from importlib._bootstrap import cache_from_source, source_from_cache
from importlib import _bootstrap
#fixme brython
#from importlib import machinery
import importlib.machinery as machinery
import os
import sys
import tokenize
import warnings
# DEPRECATED
SEARCH_ERROR = 0
PY_SOURCE = 1
PY_COMPILED = 2
C_EXTENSION = 3
PY_RESOURCE = 4
PKG_DIRECTORY = 5
C_BUILTIN = 6
PY_FROZEN = 7
PY_CODERESOURCE = 8
IMP_HOOK = 9
def get_magic():
"""Return the magic number for .pyc or .pyo files."""
return _bootstrap._MAGIC_BYTES
def get_tag():
"""Return the magic tag for .pyc or .pyo files."""
return sys.implementation.cache_tag
def get_suffixes():
warnings.warn('imp.get_suffixes() is deprecated; use the constants '
'defined on importlib.machinery instead',
DeprecationWarning, 2)
extensions = [(s, 'rb', C_EXTENSION) for s in machinery.EXTENSION_SUFFIXES]
source = [(s, 'U', PY_SOURCE) for s in machinery.SOURCE_SUFFIXES]
bytecode = [(s, 'rb', PY_COMPILED) for s in machinery.BYTECODE_SUFFIXES]
return extensions + source + bytecode
class NullImporter:
"""Null import object."""
def __init__(self, path):
if path == '':
raise ImportError('empty pathname', path='')
elif os.path.isdir(path):
raise ImportError('existing directory', path=path)
def find_module(self, fullname):
"""Always returns None."""
return None
class _HackedGetData:
"""Compatibiilty support for 'file' arguments of various load_*()
functions."""
def __init__(self, fullname, path, file=None):
super().__init__(fullname, path)
self.file = file
def get_data(self, path):
"""Gross hack to contort loader to deal w/ load_*()'s bad API."""
if self.file and path == self.path:
if not self.file.closed:
file = self.file
else:
self.file = file = open(self.path, 'r')
with file:
# Technically should be returning bytes, but
# SourceLoader.get_code() just passed what is returned to
# compile() which can handle str. And converting to bytes would
# require figuring out the encoding to decode to and
# tokenize.detect_encoding() only accepts bytes.
return file.read()
else:
return super().get_data(path)
class _LoadSourceCompatibility(_HackedGetData, _bootstrap.SourceFileLoader):
"""Compatibility support for implementing load_source()."""
#brython fix me
pass
def load_source(name, pathname, file=None):
msg = ('imp.load_source() is deprecated; use '
'importlib.machinery.SourceFileLoader(name, pathname).load_module()'
' instead')
warnings.warn(msg, DeprecationWarning, 2)
_LoadSourceCompatibility(name, pathname, file).load_module(name)
module = sys.modules[name]
# To allow reloading to potentially work, use a non-hacked loader which
# won't rely on a now-closed file object.
module.__loader__ = _bootstrap.SourceFileLoader(name, pathname)
return module
class _LoadCompiledCompatibility(_HackedGetData,
_bootstrap.SourcelessFileLoader):
"""Compatibility support for implementing load_compiled()."""
#brython fix me
pass
def load_compiled(name, pathname, file=None):
msg = ('imp.load_compiled() is deprecated; use '
'importlib.machinery.SourcelessFileLoader(name, pathname).'
'load_module() instead ')
warnings.warn(msg, DeprecationWarning, 2)
_LoadCompiledCompatibility(name, pathname, file).load_module(name)
module = sys.modules[name]
# To allow reloading to potentially work, use a non-hacked loader which
# won't rely on a now-closed file object.
module.__loader__ = _bootstrap.SourcelessFileLoader(name, pathname)
return module
def load_package(name, path):
msg = ('imp.load_package() is deprecated; use either '
'importlib.machinery.SourceFileLoader() or '
'importlib.machinery.SourcelessFileLoader() instead')
warnings.warn(msg, DeprecationWarning, 2)
if os.path.isdir(path):
extensions = (machinery.SOURCE_SUFFIXES[:] +
machinery.BYTECODE_SUFFIXES[:])
for extension in extensions:
path = os.path.join(path, '__init__'+extension)
if os.path.exists(path):
break
else:
raise ValueError('{!r} is not a package'.format(path))
return _bootstrap.SourceFileLoader(name, path).load_module(name)
def load_module(name, file, filename, details):
"""**DEPRECATED**
Load a module, given information returned by find_module().
The module name must include the full package name, if any.
"""
suffix, mode, type_ = details
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if mode and (not mode.startswith(('r', 'U')) or '+' in mode):
raise ValueError('invalid file open mode {!r}'.format(mode))
elif file is None and type_ in {PY_SOURCE, PY_COMPILED}:
msg = 'file object required for import (type code {})'.format(type_)
raise ValueError(msg)
elif type_ == PY_SOURCE:
return load_source(name, filename, file)
elif type_ == PY_COMPILED:
return load_compiled(name, filename, file)
elif type_ == C_EXTENSION and load_dynamic is not None:
if file is None:
with open(filename, 'rb') as opened_file:
return load_dynamic(name, filename, opened_file)
else:
return load_dynamic(name, filename, file)
elif type_ == PKG_DIRECTORY:
return load_package(name, filename)
elif type_ == C_BUILTIN:
return init_builtin(name)
elif type_ == PY_FROZEN:
return init_frozen(name)
else:
msg = "Don't know how to import {} (type code {})".format(name, type_)
raise ImportError(msg, name=name)
def find_module(name, path=None):
"""**DEPRECATED**
Search for a module.
If path is omitted or None, search for a built-in, frozen or special
module and continue search in sys.path. The module name cannot
contain '.'; to search for a submodule of a package, pass the
submodule name and the package's __path__.
"""
if not isinstance(name, str):
raise TypeError("'name' must be a str, not {}".format(type(name)))
elif not isinstance(path, (type(None), list)):
# Backwards-compatibility
raise RuntimeError("'list' must be None or a list, "
"not {}".format(type(name)))
if path is None:
if is_builtin(name):
return None, None, ('', '', C_BUILTIN)
elif is_frozen(name):
return None, None, ('', '', PY_FROZEN)
else:
path = sys.path
for entry in path:
package_directory = os.path.join(entry, name)
for suffix in ['.py', machinery.BYTECODE_SUFFIXES[0]]:
package_file_name = '__init__' + suffix
file_path = os.path.join(package_directory, package_file_name)
if os.path.isfile(file_path):
return None, package_directory, ('', '', PKG_DIRECTORY)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for suffix, mode, type_ in get_suffixes():
file_name = name + suffix
file_path = os.path.join(entry, file_name)
if os.path.isfile(file_path):
break
else:
continue
break # Break out of outer loop when breaking out of inner loop.
else:
raise ImportError(_bootstrap._ERR_MSG.format(name), name=name)
encoding = None
if mode == 'U':
with open(file_path, 'rb') as file:
encoding = tokenize.detect_encoding(file.readline)[0]
file = open(file_path, mode, encoding=encoding)
return file, file_path, (suffix, mode, type_)
_RELOADING = {}
def reload(module):
"""Reload the module and return it.
The module must have been successfully imported before.
"""
if not module or type(module) != type(sys):
raise TypeError("reload() argument must be module")
name = module.__name__
if name not in sys.modules:
msg = "module {} not in sys.modules"
raise ImportError(msg.format(name), name=name)
if name in _RELOADING:
return _RELOADING[name]
_RELOADING[name] = module
try:
parent_name = name.rpartition('.')[0]
if parent_name and parent_name not in sys.modules:
msg = "parent {!r} not in sys.modules"
raise ImportError(msg.format(parent_name), name=parent_name)
module.__loader__.load_module(name)
# The module may have replaced itself in sys.modules!
return sys.modules[module.__name__]
finally:
try:
del _RELOADING[name]
except KeyError:
pass
|
photocrowd/dj-stripe | refs/heads/master | djstripe/migrations/0006_auto_20150602_1934.py | 13 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('djstripe', '0005_charge_captured'),
]
operations = [
migrations.AlterField(
model_name='invoiceitem',
name='plan',
field=models.CharField(max_length=100, null=True, blank=True),
preserve_default=True,
),
]
|
albertjan/pypyjs | refs/heads/master | website/js/pypy.js-0.2.0/lib/modules/lib2to3/fixes/__init__.py | 2547 | # Dummy file to make this directory a package.
|
nathanielove/pdf-server | refs/heads/master | section/services.py | 1 | def recursive_search(section_dict, section_id):
if section_dict['id'] == section_id:
return section_dict
else:
for child_section in section_dict['children']:
result = recursive_search(child_section, section_id)
if result:
return result
return None
|
wndias/bc.repository | refs/heads/master | plugin.video.youtube/resources/lib/youtube/client/youtube.py | 2 | __author__ = 'bromix'
from resources.lib.kodion import simple_requests as requests
from .login_client import LoginClient
from ..helper.video_info import VideoInfo
class YouTube(LoginClient):
def __init__(self, config={}, language='en-US', region='US', items_per_page=50, access_token='', access_token_tv=''):
LoginClient.__init__(self, config=config, language=language, region=region, access_token=access_token,
access_token_tv=access_token_tv)
self._max_results = items_per_page
pass
def get_max_results(self):
return self._max_results
def get_language(self):
return self._language
def get_region(self):
return self._region
def calculate_next_page_token(self, page, max_result):
page -= 1
low = 'AEIMQUYcgkosw048'
high = 'ABCDEFGHIJKLMNOP'
len_low = len(low)
len_high = len(high)
position = page * max_result
overflow_token = 'Q'
if position >= 128:
overflow_token_iteration = position // 128
overflow_token = '%sE' % high[overflow_token_iteration]
pass
low_iteration = position % len_low
# at this position the iteration starts with 'I' again (after 'P')
if position >= 256:
multiplier = (position // 128) - 1
position -= 128 * multiplier
pass
high_iteration = (position / len_low) % len_high
return 'C%s%s%sAA' % (high[high_iteration], low[low_iteration], overflow_token)
def update_watch_history(self, video_id):
headers = {'Host': 'www.youtube.com',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.36 Safari/537.36',
'Accept': 'image/webp,*/*;q=0.8',
'DNT': '1',
'Referer': 'https://www.youtube.com/tv',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.8,de;q=0.6'}
params = {'noflv': '1',
'html5': '1',
'video_id': video_id,
'referrer': '',
'eurl': 'https://www.youtube.com/tv#/watch?v=%s' % video_id,
'skl': 'false',
'ns': 'yt',
'el': 'leanback',
'ps': 'leanback'}
if self._access_token:
params['access_token'] = self._access_token
pass
url = 'https://www.youtube.com/user_watch'
result = requests.get(url, params=params, headers=headers, verify=False, allow_redirects=True)
pass
def get_video_streams(self, context, video_id):
video_info = VideoInfo(context, access_token=self._access_token, language=self._language)
video_streams = video_info.load_stream_infos(video_id)
# update title
for video_stream in video_streams:
title = '[B]%s[/B] (%s;%s / %s@%d)' % (
video_stream['title'], video_stream['container'], video_stream['video']['encoding'],
video_stream['audio']['encoding'], video_stream['audio']['bitrate'])
video_stream['title'] = title
pass
return video_streams
def remove_playlist(self, playlist_id):
params = {'id': playlist_id,
'mine': 'true'}
return self._perform_v3_request(method='DELETE', path='playlists', params=params)
def get_supported_languages(self, language=None):
_language = language
if not _language:
_language = self._language
pass
_language = _language.replace('-', '_')
params = {'part': 'snippet',
'hl': _language}
return self._perform_v3_request(method='GET', path='i18nLanguages', params=params)
def get_supported_regions(self, language=None):
_language = language
if not _language:
_language = self._language
pass
_language = _language.replace('-', '_')
params = {'part': 'snippet',
'hl': _language}
return self._perform_v3_request(method='GET', path='i18nRegions', params=params)
def rename_playlist(self, playlist_id, new_title, privacy_status='private'):
params = {'part': 'snippet,id,status'}
post_data = {'kind': 'youtube#playlist',
'id': playlist_id,
'snippet': {'title': new_title},
'status': {'privacyStatus': privacy_status}}
return self._perform_v3_request(method='PUT', path='playlists', params=params, post_data=post_data)
def create_playlist(self, title, privacy_status='private'):
params = {'part': 'snippet,status'}
post_data = {'kind': 'youtube#playlist',
'snippet': {'title': title},
'status': {'privacyStatus': privacy_status}}
return self._perform_v3_request(method='POST', path='playlists', params=params, post_data=post_data)
def get_video_rating(self, video_id):
if isinstance(video_id, list):
video_id = ','.join(video_id)
pass
params = {'id': video_id}
return self._perform_v3_request(method='GET', path='videos/getRating', params=params)
def rate_video(self, video_id, rating='like'):
"""
Rate a video
:param video_id: if of the video
:param rating: [like|dislike|none]
:return:
"""
params = {'id': video_id,
'rating': rating}
return self._perform_v3_request(method='POST', path='videos/rate', params=params)
def add_video_to_playlist(self, playlist_id, video_id):
params = {'part': 'snippet',
'mine': 'true'}
post_data = {'kind': 'youtube#playlistItem',
'snippet': {'playlistId': playlist_id,
'resourceId': {'kind': 'youtube#video',
'videoId': video_id}}}
return self._perform_v3_request(method='POST', path='playlistItems', params=params, post_data=post_data)
def remove_video_from_playlist(self, playlist_id, playlist_item_id):
params = {'id': playlist_item_id}
return self._perform_v3_request(method='DELETE', path='playlistItems', params=params)
def unsubscribe(self, subscription_id):
params = {'id': subscription_id}
return self._perform_v3_request(method='DELETE', path='subscriptions', params=params)
def subscribe(self, channel_id):
params = {'part': 'snippet'}
post_data = {'kind': 'youtube#subscription',
'snippet': {'resourceId': {'kind': 'youtube#channel',
'channelId': channel_id}}}
return self._perform_v3_request(method='POST', path='subscriptions', params=params, post_data=post_data)
def get_subscription(self, channel_id, order='alphabetical', page_token=''):
"""
:param channel_id: [channel-id|'mine']
:param order: ['alphabetical'|'relevance'|'unread']
:param page_token:
:return:
"""
params = {'part': 'snippet',
'maxResults': str(self._max_results),
'order': order}
if channel_id == 'mine':
params['mine'] = 'true'
pass
else:
params['channelId'] = channel_id
pass
if page_token:
params['pageToken'] = page_token
pass
return self._perform_v3_request(method='GET', path='subscriptions', params=params)
def get_guide_category(self, guide_category_id, page_token=''):
params = {'part': 'snippet,contentDetails,brandingSettings',
'maxResults': str(self._max_results),
'categoryId': guide_category_id,
'regionCode': self._region,
'hl': self._language}
if page_token:
params['pageToken'] = page_token
pass
return self._perform_v3_request(method='GET', path='channels', params=params)
def get_guide_categories(self, page_token=''):
params = {'part': 'snippet',
'maxResults': str(self._max_results),
'regionCode': self._region,
'hl': self._language}
if page_token:
params['pageToken'] = page_token
pass
return self._perform_v3_request(method='GET', path='guideCategories', params=params)
def get_popular_videos(self, page_token=''):
params = {'part': 'snippet',
'maxResults': str(self._max_results),
'regionCode': self._region,
'hl': self._language,
'chart': 'mostPopular'}
if page_token:
params['pageToken'] = page_token
pass
return self._perform_v3_request(method='GET', path='videos', params=params)
def get_video_category(self, video_category_id, page_token=''):
params = {'part': 'snippet,contentDetails',
'maxResults': str(self._max_results),
'videoCategoryId': video_category_id,
'chart': 'mostPopular',
'regionCode': self._region,
'hl': self._language}
if page_token:
params['pageToken'] = page_token
pass
return self._perform_v3_request(method='GET', path='videos', params=params)
def get_video_categories(self, page_token=''):
params = {'part': 'snippet',
'maxResults': str(self._max_results),
'regionCode': self._region,
'hl': self._language}
if page_token:
params['pageToken'] = page_token
pass
return self._perform_v3_request(method='GET', path='videoCategories', params=params)
def get_activities(self, channel_id, page_token=''):
params = {'part': 'snippet,contentDetails',
'maxResults': str(self._max_results),
'regionCode': self._region,
'hl': self._language}
if channel_id == 'home':
params['home'] = 'true'
pass
elif channel_id == 'mine':
params['mine'] = 'true'
pass
else:
params['channelId'] = channel_id
pass
if page_token:
params['pageToken'] = page_token
pass
return self._perform_v3_request(method='GET', path='activities', params=params)
def get_channel_sections(self, channel_id):
params = {'part': 'snippet,contentDetails',
'regionCode': self._region,
'hl': self._language}
if channel_id == 'mine':
params['mine'] = 'true'
pass
else:
params['channelId'] = channel_id
pass
return self._perform_v3_request(method='GET', path='channelSections', params=params)
def get_playlists_of_channel(self, channel_id, page_token=''):
params = {'part': 'snippet',
'maxResults': str(self._max_results)}
if channel_id != 'mine':
params['channelId'] = channel_id
pass
else:
params['mine'] = 'true'
pass
if page_token:
params['pageToken'] = page_token
pass
return self._perform_v3_request(method='GET', path='playlists', params=params)
def get_playlist_item_id_of_video_id(self, playlist_id, video_id, page_token=''):
old_max_results = self._max_results
self._max_results = 50
json_data = self.get_playlist_items(playlist_id=playlist_id, page_token=page_token)
self._max_results = old_max_results
items = json_data.get('items', [])
for item in items:
playlist_item_id = item['id']
playlist_video_id = item.get('snippet', {}).get('resourceId', {}).get('videoId', '')
if playlist_video_id and playlist_video_id == video_id:
return playlist_item_id
pass
next_page_token = json_data.get('nextPageToken', '')
if next_page_token:
return self.get_playlist_item_id_of_video_id(playlist_id=playlist_id, video_id=video_id,
page_token=next_page_token)
return None
def get_playlist_items(self, playlist_id, page_token=''):
# prepare params
params = {'part': 'snippet',
'maxResults': str(self._max_results),
'playlistId': playlist_id}
if page_token:
params['pageToken'] = page_token
pass
return self._perform_v3_request(method='GET', path='playlistItems', params=params)
def get_channel_by_username(self, username):
"""
Returns a collection of zero or more channel resources that match the request criteria.
:param channel_id: list or comma-separated list of the YouTube channel ID(s)
:return:
"""
params = {'part': 'id',
'forUsername': username}
return self._perform_v3_request(method='GET', path='channels', params=params)
def get_channels(self, channel_id):
"""
Returns a collection of zero or more channel resources that match the request criteria.
:param channel_id: list or comma-separated list of the YouTube channel ID(s)
:return:
"""
if isinstance(channel_id, list):
channel_id = ','.join(channel_id)
pass
params = {'part': 'snippet,contentDetails,brandingSettings'}
if channel_id != 'mine':
params['id'] = channel_id
pass
else:
params['mine'] = 'true'
pass
return self._perform_v3_request(method='GET', path='channels', params=params, quota_optimized=False)
def get_disliked_videos(self, page_token=''):
# prepare page token
if not page_token:
page_token = ''
pass
# prepare params
params = {'part': 'snippet',
'myRating': 'dislike',
'maxResults': str(self._max_results)}
if page_token:
params['pageToken'] = page_token
pass
return self._perform_v3_request(method='GET', path='videos', params=params)
def get_videos(self, video_id):
"""
Returns a list of videos that match the API request parameters
:param video_id: list of video ids
:return:
"""
if isinstance(video_id, list):
video_id = ','.join(video_id)
pass
params = {'part': 'snippet,contentDetails',
'id': video_id}
return self._perform_v3_request(method='GET', path='videos', params=params)
def get_playlists(self, playlist_id):
if isinstance(playlist_id, list):
playlist_id = ','.join(playlist_id)
pass
params = {'part': 'snippet,contentDetails',
'id': playlist_id}
return self._perform_v3_request(method='GET', path='playlists', params=params)
def get_live_events(self, event_type='live', order='relevance', page_token=''):
"""
:param event_type: one of: 'live', 'completed', 'upcoming'
:param order: one of: 'date', 'rating', 'relevance', 'title', 'videoCount', 'viewCount'
:param page_token:
:return:
"""
# prepare page token
if not page_token:
page_token = ''
pass
# prepare params
params = {'part': 'snippet',
'type': 'video',
'order': order,
'eventType': event_type,
'regionCode': self._region,
'hl': self._language,
'maxResults': str(self._max_results)}
if page_token:
params['pageToken'] = page_token
pass
return self._perform_v3_request(method='GET', path='search', params=params, quota_optimized=True)
def get_related_videos(self, video_id, page_token=''):
# prepare page token
if not page_token:
page_token = ''
pass
# prepare params
params = {'relatedToVideoId': video_id,
'part': 'snippet',
'type': 'video',
'regionCode': self._region,
'hl': self._language,
'maxResults': str(self._max_results)}
if page_token:
params['pageToken'] = page_token
pass
return self._perform_v3_request(method='GET', path='search', params=params, quota_optimized=True)
def search(self, q, search_type=['video', 'channel', 'playlist'], event_type='', page_token=''):
"""
Returns a collection of search results that match the query parameters specified in the API request. By default,
a search result set identifies matching video, channel, and playlist resources, but you can also configure
queries to only retrieve a specific type of resource.
:param q:
:param search_type: acceptable values are: 'video' | 'channel' | 'playlist'
:param event_type: 'live', 'completed', 'upcoming'
:param page_token: can be ''
:return:
"""
# prepare search type
if not search_type:
search_type = ''
pass
if isinstance(search_type, list):
search_type = ','.join(search_type)
pass
# prepare page token
if not page_token:
page_token = ''
pass
# prepare params
params = {'q': q,
'part': 'snippet',
'regionCode': self._region,
'hl': self._language,
'maxResults': str(self._max_results)}
if event_type and event_type in ['live', 'upcoming', 'completed']:
params['eventType'] = event_type
pass
if search_type:
params['type'] = search_type
pass
if page_token:
params['pageToken'] = page_token
pass
return self._perform_v3_request(method='GET', path='search', params=params, quota_optimized=False)
def get_my_subscriptions(self, page_token=None, offset=0):
if not page_token:
page_token = ''
pass
result = {'items': [],
'next_page_token': page_token,
'offset': offset}
def _perform(_page_token, _offset, _result):
_post_data = {
'context': {
'client': {
'clientName': 'TVHTML5',
'clientVersion': '5.20150304',
'theme': 'CLASSIC',
'acceptRegion': '%s' % self._region,
'acceptLanguage': '%s' % self._language.replace('_', '-')
},
'user': {
'enableSafetyMode': False
}
},
'browseId': 'FEsubscriptions'
}
if _page_token:
_post_data['continuation'] = _page_token
pass
_json_data = self._perform_v1_tv_request(method='POST', path='browse', post_data=_post_data)
_data = _json_data.get('contents', {}).get('sectionListRenderer', {}).get('contents', [{}])[0].get(
'shelfRenderer', {}).get('content', {}).get('horizontalListRenderer', {})
if not _data:
_data = _json_data.get('continuationContents', {}).get('horizontalListContinuation', {})
pass
_items = _data.get('items', [])
if not _result:
_result = {'items': []}
pass
_new_offset = self._max_results - len(_result['items']) + _offset
if _offset > 0:
_items = _items[_offset:]
pass
_result['offset'] = _new_offset
for _item in _items:
_item = _item.get('gridVideoRenderer', {})
if _item:
_video_item = {'id': _item['videoId'],
'title': _item.get('title', {}).get('runs', [{}])[0].get('text', '')}
_result['items'].append(_video_item)
pass
pass
_continuations = _data.get('continuations', [{}])[0].get('nextContinuationData', {}).get('continuation', '')
if _continuations and len(_result['items']) <= self._max_results:
_result['next_page_token'] = _continuations
if len(_result['items']) < self._max_results:
_result = _perform(_page_token=_continuations, _offset=0, _result=_result)
pass
pass
# trim result
if len(_result['items']) > self._max_results:
_items = _result['items']
_items = _items[:self._max_results]
_result['items'] = _items
_result['continue'] = True
pass
if len(_result['items']) < self._max_results:
if 'continue' in _result:
del _result['continue']
pass
if 'next_page_token' in _result:
del _result['next_page_token']
pass
if 'offset' in _result:
del _result['offset']
pass
pass
return _result
return _perform(_page_token=page_token, _offset=offset, _result=result)
def _perform_v3_request(self, method='GET', headers=None, path=None, post_data=None, params=None,
allow_redirects=True, quota_optimized=True):
# first set the config for the corresponding system (Frodo, Gotham, Helix, ...)
yt_config = self._config
# in any case of these APIs we change the config to a common key to save some quota
if quota_optimized and path in ['channels', 'search']:
yt_config = self.CONFIGS['youtube-for-kodi-quota']
pass
# params
if not params:
params = {}
pass
_params = {'key': yt_config['key']}
_params.update(params)
# headers
if not headers:
headers = {}
pass
_headers = {'Host': 'www.googleapis.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.36 Safari/537.36',
'Accept-Encoding': 'gzip, deflate'}
# a config can decide if a token is allowed
if self._access_token and yt_config.get('token-allowed', True):
_headers['Authorization'] = 'Bearer %s' % self._access_token
pass
_headers.update(headers)
# url
_url = 'https://www.googleapis.com/youtube/v3/%s' % path.strip('/')
result = None
if method == 'GET':
result = requests.get(_url, params=_params, headers=_headers, verify=False, allow_redirects=allow_redirects)
pass
elif method == 'POST':
_headers['content-type'] = 'application/json'
result = requests.post(_url, json=post_data, params=_params, headers=_headers, verify=False,
allow_redirects=allow_redirects)
pass
elif method == 'PUT':
_headers['content-type'] = 'application/json'
result = requests.put(_url, json=post_data, params=_params, headers=_headers, verify=False,
allow_redirects=allow_redirects)
pass
elif method == 'DELETE':
result = requests.delete(_url, params=_params, headers=_headers, verify=False,
allow_redirects=allow_redirects)
pass
if result is None:
return {}
if result.headers.get('content-type', '').startswith('application/json'):
return result.json()
pass
def _perform_v1_tv_request(self, method='GET', headers=None, path=None, post_data=None, params=None,
allow_redirects=True):
# params
if not params:
params = {}
pass
_params = {'key': self._config_tv['key']}
_params.update(params)
# headers
if not headers:
headers = {}
pass
_headers = {'Host': 'www.googleapis.com',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36',
'Origin': 'https://www.youtube.com',
'Accept': '*/*',
'DNT': '1',
'Referer': 'https://www.youtube.com/tv',
'Accept-Encoding': 'gzip',
'Accept-Language': 'en-US,en;q=0.8,de;q=0.6'}
if self._access_token_tv:
_headers['Authorization'] = 'Bearer %s' % self._access_token_tv
pass
_headers.update(headers)
# url
_url = 'https://www.googleapis.com/youtubei/v1/%s' % path.strip('/')
result = None
if method == 'GET':
result = requests.get(_url, params=_params, headers=_headers, verify=False, allow_redirects=allow_redirects)
pass
elif method == 'POST':
_headers['content-type'] = 'application/json'
result = requests.post(_url, json=post_data, params=_params, headers=_headers, verify=False,
allow_redirects=allow_redirects)
pass
elif method == 'PUT':
_headers['content-type'] = 'application/json'
result = requests.put(_url, json=post_data, params=_params, headers=_headers, verify=False,
allow_redirects=allow_redirects)
pass
elif method == 'DELETE':
result = requests.delete(_url, params=_params, headers=_headers, verify=False,
allow_redirects=allow_redirects)
pass
if result is None:
return {}
if result.headers.get('content-type', '').startswith('application/json'):
return result.json()
pass
pass
|
jody-frankowski/ansible | refs/heads/devel | lib/ansible/runner/lookup_plugins/fileglob.py | 176 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import glob
from ansible import utils
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
ret = []
for term in terms:
dwimmed = utils.path_dwim(self.basedir, term)
globbed = glob.glob(dwimmed)
ret.extend(g for g in globbed if os.path.isfile(g))
return ret
|
sumspr/scikit-learn | refs/heads/master | sklearn/linear_model/tests/test_base.py | 120 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
|
nuobit/odoo-addons | refs/heads/11.0 | account_asset_non_deductible/models/__init__.py | 8 | # Copyright NuoBiT Solutions, S.L. (<https://www.nuobit.com>)
# Eric Antones <eantones@nuobit.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from . import account_invoice
|
rybrogaard/catmap | refs/heads/master | catmap/solvers/mean_field_solver.py | 1 | from solver_base import *
from catmap.data import templates
from copy import copy
import mpmath as mp
from catmap.functions import numerical_jacobian
class MeanFieldSolver(SolverBase):
"""Class for handling mean-field type kinetic models. Can be sub-classed to
get functionality for steady-state solutions, sabatier solutions, etc."""
def __init__(self,reaction_model=ReactionModel()):
SolverBase.__init__(self,reaction_model)
defaults = dict(
tolerance = 1e-35,
perturbation_size = 1e-14,
)
self._rxm.update(defaults)
self._log_strings = {
'jacobian_fail':
"stagnated or diverging (residual = ${resid})."+\
" Assuming Jacobian is 0.",
}
def get_rxn_rates(self,coverages,rate_constants):
rates = self.elementary_rates(
rate_constants,
coverages,
self.gas_pressures,
self._mpfloat,
self._matrix
)
return [ri for ri in rates]
def get_rate(self, rxn_parameters,
coverages=None, verify_coverages=True,
**coverage_kwargs):
if not coverages:
coverages = self._coverage
if not coverages:
raise ValueError('Input coverages to use as an initial guess '+\
'for the solver.')
if verify_coverages == True:
coverages = self.get_coverage(
rxn_parameters,coverages,**coverage_kwargs)
self._coverage = coverages
rate_constants = self.get_rate_constants(rxn_parameters,coverages)
rates = self.get_rxn_rates(coverages,rate_constants)
return rates
def get_turnover_frequency(self,rxn_parameters,rates=None,verify_coverages=True):
rxn_parameters = list(rxn_parameters)
if rates is None:
rates = self.get_rate(rxn_parameters,verify_coverages=verify_coverages)
def gas_to_idxs(gas):
idxs = []
for i,rxn in enumerate(self.elementary_rxns):
if gas in rxn[0]:
idxs.append(-(i+1))
elif gas in rxn[-1]:
idxs.append(i+1)
return idxs
def idx_to_rate(idx,rates):
if idx < 0:
mult = -1.0
elif idx > 0:
mult = 1.0
i = abs(idx) -1
return mult*rates[i]
turnover_freq = []
for g in self.gas_names:
idxs = gas_to_idxs(g)
tof = sum([idx_to_rate(idx,rates) for idx in idxs])
turnover_freq.append(tof)
self._turnover_frequency = turnover_freq
return turnover_freq
def get_selectivity(self,rxn_parameters):
tofs = self.get_turnover_frequency(rxn_parameters)
if self.products is None:
self.products = [g for g,r in zip(self.gas_names,tofs) if r >0]
if self.reactants is None:
self.reactants = [g for g,r in zip(self.gas_names,tofs) if r <=0]
prod_rate = sum([max(r,0)
for g,r in zip(self.gas_names,tofs) if g in self.products])
reac_rate = sum([max(-r,0)
for g,r in zip(self.gas_names,tofs) if g in self.reactants])
selectivities = []
for g,r in zip(self.gas_names,tofs):
if g in self.products and prod_rate:
sel = max(r,0)/prod_rate
elif g in self.reactants and reac_rate:
sel = max(-r,0)/reac_rate
else:
sel = 0
selectivities.append(sel)
self._selectivities = selectivities
return selectivities
def get_rate_control(self,rxn_parameters):
kT = self._kB*self.temperature
eps = self._mpfloat(self.perturbation_size)
try:
dRdG = numerical_jacobian(self.get_turnover_frequency,rxn_parameters,self._matrix,eps)
except ValueError, strerror:
resid = str(strerror).rsplit('=',1)[1]
resid = resid.replace(')','')
resid.strip()
self.log('jacobian_fail',resid=resid)
dRdG = np.zeros((len(self.gas_names),len(self.adsorbate_names+self.transition_state_names)))
t0 = self.get_turnover_frequency(rxn_parameters)
dRdG *= -kT
dRdG = dRdG.tolist()
DRC = []
for ti, Ji in zip(t0,dRdG):
if ti == 0:
DRC.append([0.0]*len(Ji))
else:
DRC.append([float(Jj/ti) for Jj in Ji])
return DRC
def get_interacting_energies(self,rxn_parameters):
all_ads = self.adsorbate_names + self.transition_state_names
N_ads = len(all_ads)
energies = rxn_parameters[:N_ads]
eps_vector = rxn_parameters[N_ads:]
cvg = self._coverage + [0]*len(self.transition_state_names)
E_int = self.interaction_function(cvg,energies,eps_vector,self.thermodynamics.adsorbate_interactions.interaction_response_function,False)[0]
return E_int
def get_selectivity_control(self,rxn_parameters):
kT = self._kB*self.temperature
eps = self._mpfloat(self.perturbation_size)
try:
dSdG = numerical_jacobian(self.get_selectivity,rxn_parameters,self._matrix,eps)
except ValueError,strerror:
resid = str(strerror).rsplit('=',1)[1]
resid = resid.replace(')','')
resid.strip()
self.log('jacobian_fail',resid=resid)
dRdG = np.zeros((len(self.gas_names),len(self.adsorbate_names+self.transition_state_names)))
s0 = self.get_selectivity(rxn_parameters)
dSdG *= -kT
dSdG = dSdG.tolist()
DSC = []
for si, Ji in zip(s0,dSdG):
DSC.append([float(Jj/si) for Jj in Ji])
return DSC
def get_rxn_order(self,rxn_parameters,epsilon=1e-10):
current_tofs = self.get_turnover_frequency(rxn_parameters)
current_Ps = [p for p in self.gas_pressures]
DRC = []
for i,p in enumerate(current_Ps):
new_p = copy(current_Ps)
new_p[i] = current_Ps[i]*(1+epsilon)
self._rxm.gas_pressures = new_p ##HACK
#setting self.gas_pressures = new_p inexplicably breaks the solver.
new_tofs = self.get_turnover_frequency(rxn_parameters)
DRC_i = []
for j,old_tof,new_tof,gas in zip(
range(0,len(current_tofs)),current_tofs,new_tofs,self.gas_names):
if old_tof == 0:
if new_tof == 0:
dTOF = 0
else:
dTOF = (new_tof-old_tof)/new_tof
else:
dTOF = (new_tof-old_tof)/old_tof
dP = (new_p[i] - current_Ps[i])/current_Ps[i]
DRC_i.append(float(dTOF/dP))
DRC.append(DRC_i)
self._rxm.gas_pressures = current_Ps ##HACK
#setting self.gas_pressures = current_Ps inexplicably breaks the solver.
self._rxn_order = DRC
return DRC
def summary_text(self):
return ''
def rate_equation_term(self,species_list,rate_constant_string,d_wrt=None):
"""Function to compose a term in the rate equation - e.g. kf[1]*theta[0]*p[0]"""
#This clause allows for multiple site types.
site_indices={}
gas_idxs = [self.gas_names.index(gas)
for gas in species_list if gas in self.gas_names]
ads_idxs = [self.adsorbate_names.index(ads)
for ads in species_list if ads in self.adsorbate_names]
sites = [s for s in species_list
if s in self.site_names] #allows for multiple site types
if len(gas_idxs+ads_idxs+sites) != len(species_list):
raise ValueError('Undefined species in '+','.join(species_list))
rate_string = rate_constant_string
if not d_wrt:
for id in gas_idxs:
rate_string += '*p['+str(id)+']'
for id in ads_idxs:
rate_string += '*theta['+str(id)+']'
for s in sites:
rate_string += '*s['+str(self.site_names.index(s))+']'
return rate_string
else:
d_idx = self.adsorbate_names.index(d_wrt)
d_site = self.species_definitions[d_wrt]['site']
if (d_idx in ads_idxs #expression is a function of d_wrt
and d_site not in sites): #empty site not there -> easy
multiplier = ads_idxs.count(d_idx) #get order
ads_idxs.remove(d_idx) #reduce order by 1
if multiplier != 1:
rate_string = str(multiplier)+'*'+rate_string
elif (d_site in sites #empty site appears,
and d_idx not in ads_idxs): #but not the adsorbate
multiplier = sites.count(d_site)
multiplier = -1*multiplier #this accounds for the
#fact that d_site/d_ads = -1
sites.remove(d_site) #reduce the order of site by 1
rate_string = str(multiplier)+'*'+rate_string
elif (d_site in sites #function of adsorbate
and d_idx in ads_idxs): #and function of site (1-theta_i)
#need to use chain rule...
ads_mult = ads_idxs.count(d_idx)
site_mult = sites.count(d_site)*-1 #negative 1 to account for
#d_site/d_ads
ads_str = 'theta['+str(d_idx)+']'
site_str = 's['+str(self.site_names.index(d_site))+']'
sites = [s for s in sites if s != d_site] #remove d_site
#from the site list
ads_idxs = [a for a in ads_idxs if a != d_idx] #remove d_idx
#for adsorbate list
if (-site_mult-1):
op = '*'
else:
op = ''
mult_rule = '('+str(site_mult)+'*'+ads_str+op+'*'.join(
[site_str]*(-site_mult-1)) # cvg*d_site
mult_rule += ' + '+str(ads_mult)+'*'+site_str+'*'.join(
[ads_str]*(ads_mult-1))+ ')'
rate_string += '*'+mult_rule
else:
return '0'
for id in gas_idxs:
rate_string += '*p['+str(id)+']'
for id in ads_idxs:
rate_string += '*theta['+str(id)+']'
for s in sites:
rate_string += '*s['+str(self.site_names.index(s))+']'
return rate_string
def site_string_list(self):
"""Function to compose an alalytic expression for the coverage of empty sites"""
site_strings=[]
site_totals={}
for site in self.site_names:
site_totals[site] = self._mpfloat(self.species_definitions[site]['total'])
site_idxs = [[idx,self.species_definitions[ads]['n_sites']] for idx,ads in enumerate(self.adsorbate_names)
if self.species_definitions[ads]['site'] == site]
site_str = repr(site_totals[site])
for idx_i,nsites in site_idxs:
site_str += ' - theta['+str(idx_i)+']'
site_strings.append('('+site_str+')')
return site_strings
def substitutions_dict(self):
"""Dictionary of substitutions needed for static compiled functions"""
subdict = {}
subdict['temperature'] = 'T = '+repr(self.temperature)
subdict['kB'] = 'kB = '+repr(self._kB)
subdict['h'] = 'h = '+repr(self._h)
subdict['prefactor_list'] = 'prefactor_list = [kB*T/h]*'+str(len(self.elementary_rxns))
subdict['n_adsorbates'] = 'n_adsorbates = '+str(len(self.adsorbate_names))
subdict['n_transition_states'] = 'n_transition_states = '+str(len(self.transition_state_names))
max_cvg_list = []
for ads in self.adsorbate_names:
site = self.species_definitions[ads]['site']
default_max = self.species_definitions[site]['total']
max_cvg_list.append(self.species_definitions[ads].get('max_coverage',default_max))
subdict['max_coverage_list'] = 'max_coverage_list = ' + repr(max_cvg_list)
idx_dict = {}
surf_species = self.adsorbate_names+self.transition_state_names
for s in self.site_names:
idxs = [surf_species.index(a) for a in surf_species if
self.species_definitions[a]['site'] == s]
if idxs:
if self.adsorbate_interaction_model not in ['ideal',None]:
default_params = getattr(
self.thermodynamics.adsorbate_interactions,
'interaction_response_parameters',{})
else:
default_params = {}
F_params = self.species_definitions[s].get('interaction_response_parameters',default_params)
idx_dict[s] = [idxs,self.species_definitions[s]['total'],F_params]
subdict['site_info_dict'] = 'site_info_dict = ' + repr(idx_dict)
return subdict
def rate_equations(self):
"""Compose analytical expressions for the reaction rates and
change of surface species wrt time (dc/dt).
Assumes:
kf is defined as a list of forward rate-constants
kr is defined as a list of reverse rate-constants
theta is defined as a list of coverages
p is defined as a list of pressures
"""
site_strings = self.site_string_list()
rate_strings = []
rate_strings.append('s = [0]*'+str(len(self.site_names)))
for i,s in enumerate(site_strings):
rate_strings.append('s['+str(i)+'] = '+site_strings[i])
for i,rxn in enumerate(self.elementary_rxns):
fRate_string = self.rate_equation_term(rxn[0],'kf['+str(i)+']')
rRate_string = self.rate_equation_term(rxn[-1],'kr['+str(i)+']')
rateString = 'r['+str(i)+'] = '+fRate_string + ' - ' + rRate_string
rate_strings.append(rateString)
dcdt_strings = []
for i,ads in enumerate(self.adsorbate_names):
dcdt_str = 'dtheta_dt['+str(i)+'] = '
for j,rxn in enumerate(self.elementary_rxns):
rxnCounts = [-1.0*rxn[0].count(ads), 1.0*rxn[-1].count(ads)]
rxnOrder = [o for o in rxnCounts if o]
if rxnOrder:
rxnOrder = rxnOrder[0]
dcdt_str += ' + ' + str(int(rxnOrder))+'*r['+str(j)+']'
if dcdt_str.endswith('= '):
dcdt_str += '0'
dcdt_strings.append(dcdt_str)
all_strings = rate_strings + dcdt_strings
return all_strings
def jacobian_equations(self,adsorbate_interactions=True):
"""Composes analytical expressions for the Jacobian matrix.
Assumes:
kf is defined as a list of forward rate-constants
kr is defined as a list of reverse rate-constants
theta is defined as a list of coverages
p is defined as a list of pressures
If the rate constants depend on coverage, use
adsorbate_interactions = True.
Assumes:
kB is defined as Boltzmann's constant
T is defined as the temperature
dEf is defined as a list of lists where dEf[i][j] is the
derivative of forward activation free energy i wrt coverage j
dEr is defined as a list of lists where dEr[i][j] is the
derivative of reverse activation free energy i wrt coverage j
"""
site_strings = self.site_string_list()
J_strings = []
J_strings.append('s = [0]*'+str(len(self.site_names)))
for i,s in enumerate(site_strings):
J_strings.append('s['+str(i)+'] = '+site_strings[i])
if adsorbate_interactions == True:
J_strings.append('kfkBT = [0]*'+str(len(self.elementary_rxns)))
J_strings.append('krkBT = [0]*'+str(len(self.elementary_rxns)))
for i in range(len(self.elementary_rxns)):
J_strings.append('kfkBT['+str(i)+'] = kf['+str(i)+']/kBT')
J_strings.append('krkBT['+str(i)+'] = kr['+str(i)+']/kBT')
for i,ads_i in enumerate(self.adsorbate_names):
for j,ads_j in enumerate(self.adsorbate_names):
J_str = 'J['+str(i)+']['+str(j)+'] = 0'
for k,rxn in enumerate(self.elementary_rxns):
rxnCounts = [-1.0*rxn[0].count(ads_i),
1.0*rxn[-1].count(ads_i)]
rxnOrder = [o for o in rxnCounts if o]
if rxnOrder:
rxnOrder = rxnOrder[0]
fRate_string = self.rate_equation_term(rxn[0],'kf['+str(k)+']',ads_j)
rRate_string = self.rate_equation_term(rxn[-1],'kr['+str(k)+']',ads_j)
if adsorbate_interactions == True:
dfRate_string = self.rate_equation_term(rxn[0],'(kfkBT['+str(k)+'])*dEf['+str(k)+']['+str(j)+']')
drRate_string = self.rate_equation_term(rxn[-1],'(krkBT['+str(k)+'])*dEr['+str(k)+']['+str(j)+']')
fRate_string += ' + ' + dfRate_string
rRate_string += ' - ' + drRate_string
if fRate_string != '0' and rRate_string != '0':
dr_dx = '('+fRate_string + ' - ' + rRate_string+')'
elif fRate_string != '0':
dr_dx = fRate_string
elif rRate_string != '0':
dr_dx = '-1*'+rRate_string
elif fRate_string == rRate_string == '0':
dr_dx = None
if dr_dx:
J_str += ' + ' + str(int(rxnOrder))+'*'+dr_dx
J_strings.append(J_str)
return J_strings
def reaction_energy_equations(self,adsorbate_interactions=True):
"""Composes a list of analytical expressions which give the reaction
and activation energies for elementary steps. Note that while this
is useful primarily for models with adsorbate-interactions
(otherwise these energetics can easily be obtained by the reaction
model itself), they are technically valid for all mean-field models.
Assumes:
Gf is a list of formation energies ordered as
adsorbate_names+transition_state_names
If model includes adsorbate interactions then use
adsorbate_interactions = True to include dEa/dtheta in the output.
Assumes:
dGs is a matrix/array of derivatives of free energies wrt coverages
such that dGs[:,i] is a vector of derivatives of the free energy
of species i wrt each coverage ordered as adsorbate_names
"""
idx_dict = {}
for i,ads in enumerate(self.adsorbate_names):
idx_dict[ads] = str(i)
for i,TS in enumerate(self.transition_state_names):
idx_dict[TS] = str(i + len(self.adsorbate_names))
expressions = []
n_rxns = len(self.elementary_rxns)
expressions.append('G_IS = [0]*'+str(n_rxns))
expressions.append('G_TS = [0]*'+str(n_rxns))
expressions.append('G_FS = [0]*'+str(n_rxns))
expressions.append('G_af = [0]*'+str(n_rxns))
expressions.append('G_ar = [0]*'+str(n_rxns))
if adsorbate_interactions == True:
expressions.append('dG_IS = [0]*'+str(n_rxns))
expressions.append('dG_TS = [0]*'+str(n_rxns))
expressions.append('dG_FS = [0]*'+str(n_rxns))
def species_strings(state_list,list_name,include_constants=True,type='list'):
species_strs = []
for species in state_list:
if species in idx_dict:
if type == 'list':
species_strs.append(list_name+'['+idx_dict[species]+']')
elif type == 'matrix_row':
species_strs.append(list_name+'['+idx_dict[species]+',:]')
elif type == 'matrix_col':
species_strs.append(list_name+'[:,'+idx_dict[species]+']')
elif include_constants == True:
if species in self.gas_names:
idx = self.gas_names.index(species)
species_strs.append('gas_energies['+str(idx)+']')
elif species in self.site_names:
idx = self.site_names.index(species)
species_strs.append('site_energies['+str(idx)+']')
else:
raise ValueError('Undefined species '+species)
if not species_strs:
species_strs = ['[0]*'+str(len(self.adsorbate_names))]
return species_strs
for i,rx in enumerate(self.elementary_rxns):
IS = rx[0]
FS = rx[-1]
if len(rx) > 2:
TS = rx[1]
else:
TS = None
txt = 'G_IS['+str(i)+'] = ' + ' + '.join(species_strings(IS,'Gf')) + '\n '
txt += 'G_FS['+str(i)+'] = ' + ' + '.join(species_strings(FS,'Gf')) + '\n '
if adsorbate_interactions == True:
txt += 'dG_IS['+str(i)+'] = element_wise_addition([' + ' , '.join(species_strings(IS,'dGs',False,'list')) + '])\n '
txt += 'dG_FS['+str(i)+'] = element_wise_addition([' + ' , '.join(species_strings(FS,'dGs',False,'list')) + '])\n '
if TS:
TS_strings = species_strings(TS,'Gf')
txt += 'G_TS['+str(i)+'] = ' + ' + '.join(species_strings(TS,'Gf')) + '\n '
if adsorbate_interactions == True:
txt += 'dG_TS['+str(i)+'] = element_wise_addition([' + ' , '.join(species_strings(TS,'dGs',False,'list')) + '])\n '
else:
txt += 'G_TS['+str(i)+'] = max([G_IS['+str(i)+'],G_FS['+str(i)+']])' + '\n '
if adsorbate_interactions == True:
txt += 'dG_TS['+str(i)+'] = None #determined later\n '
txt = txt.replace(' + 0', '')
expressions.append(txt)
return expressions
|
marcelometal/holmes-api | refs/heads/master | tests/unit/test_cache.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import time
from gzip import GzipFile
from cStringIO import StringIO
from ujson import dumps
import msgpack
from preggy import expect
from tornado.testing import gen_test
from tornado.gen import Task
from holmes.cache import Cache
from holmes.models import Domain, Limiter, Page
from tests.unit.base import ApiTestCase
from tests.fixtures import (
DomainFactory, PageFactory, ReviewFactory, LimiterFactory,
DomainsViolationsPrefsFactory, KeyFactory
)
class CacheTestCase(ApiTestCase):
@property
def cache(self):
return self.server.application.cache
def test_cache_is_in_server(self):
expect(self.server.application.cache).to_be_instance_of(Cache)
def test_cache_has_connection_to_redis(self):
expect(self.server.application.cache.redis).not_to_be_null()
def test_cache_has_connection_to_db(self):
expect(self.server.application.cache.db).not_to_be_null()
@gen_test
def test_increment_active_review_count(self):
key = 'g.com-active-review-count'
self.cache.redis.delete(key)
gcom = DomainFactory.create(url='http://g.com', name='g.com')
page = PageFactory.create(domain=gcom)
ReviewFactory.create(
is_active=True,
is_complete=True,
domain=gcom,
page=page,
number_of_violations=1
)
page = PageFactory.create(domain=gcom)
ReviewFactory.create(
is_active=False,
is_complete=True,
domain=gcom,
page=page,
number_of_violations=3
)
page_count = yield self.cache.get_active_review_count('g.com')
expect(page_count).to_equal(1)
yield self.cache.increment_active_review_count('g.com')
page_count = yield self.cache.get_active_review_count('g.com')
expect(page_count).to_equal(2)
@gen_test
def test_can_get_active_review_count_for_domain(self):
self.db.query(Domain).delete()
globocom = DomainFactory.create(url="http://globo.com", name="globo.com")
DomainFactory.create(url="http://g1.globo.com", name="g1.globo.com")
page = PageFactory.create(domain=globocom)
ReviewFactory.create(is_active=True, is_complete=True, domain=globocom, page=page, number_of_violations=10)
page2 = PageFactory.create(domain=globocom)
ReviewFactory.create(is_active=True, is_complete=True, domain=globocom, page=page2, number_of_violations=10)
ReviewFactory.create(is_active=False, is_complete=True, domain=globocom, page=page2, number_of_violations=10)
count = yield self.cache.get_active_review_count('globo.com')
expect(count).to_equal(2)
# should get from cache
self.cache.db = None
count = yield self.cache.get_active_review_count('globo.com')
expect(count).to_equal(2)
@gen_test
def test_can_store_processed_page_lock(self):
yield self.cache.lock_page('http://www.globo.com')
result = yield Task(self.cache.redis.get, 'http://www.globo.com-lock')
expect(int(result)).to_equal(1)
@gen_test
def test_can_get_url_was_added(self):
yield self.cache.lock_page('http://www.globo.com')
result = yield self.cache.has_lock('http://www.globo.com')
expect(result).to_be_true()
@gen_test
def test_release_lock_page(self):
yield self.cache.lock_page('http://www.globo.com')
result = yield self.cache.has_lock('http://www.globo.com')
expect(result).to_be_true()
yield self.cache.release_lock_page('http://www.globo.com')
result = yield self.cache.has_lock('http://www.globo.com')
expect(result).to_be_false()
@gen_test
def test_can_remove_domain_limiters_key(self):
self.cache.redis.delete('domain-limiters')
domains = yield Task(self.cache.redis.get, 'domain-limiters')
expect(domains).to_be_null()
yield Task(self.cache.redis.setex, 'domain-limiters', 10, 10)
domains = yield Task(self.cache.redis.get, 'domain-limiters')
expect(domains).to_equal('10')
yield self.cache.remove_domain_limiters_key()
domains = yield Task(self.cache.redis.get, 'domain-limiters')
expect(domains).to_be_null()
@gen_test
def test_can_get_limit_usage(self):
url = 'http://globo.com'
key = 'limit-for-%s' % url
self.cache.redis.delete(key)
yield Task(self.cache.redis.zadd, key, {'a': 1, 'b': 2, 'c': 3})
limit = yield Task(self.cache.redis.zcard, key)
expect(limit).to_equal(3)
limit = yield self.cache.get_limit_usage(url)
expect(limit).to_equal(3)
@gen_test
def test_can_remove_limit_usage_by_domain(self):
domain_url = 'http://globo.com'
key1 = 'limit-for-%s' % domain_url
self.cache.redis.delete(key1)
key2 = 'limit-for-%s/sa/' % domain_url
self.cache.redis.delete(key2)
yield Task(self.cache.redis.zadd, key1, {'a': 1})
yield Task(self.cache.redis.zadd, key2, {'b': 1})
keys = yield Task(self.cache.redis.keys, 'limit-for-%s*' % domain_url)
expect(keys).to_length(2)
yield Task(self.cache.delete_limit_usage_by_domain, domain_url)
keys = yield Task(self.cache.redis.keys, 'limit-for-%s*' % domain_url)
expect(keys).to_length(0)
@gen_test
def test_increment_page_score(self):
self.cache.redis.delete('pages-score')
total = yield Task(self.cache.redis.zcard, 'page-scores')
expect(int(total)).to_equal(0)
yield self.cache.increment_page_score('page-1')
score = yield Task(self.cache.redis.zscore, 'page-scores', 'page-1')
expect(int(score)).to_equal(1)
yield self.cache.increment_page_score('page-1')
score = yield Task(self.cache.redis.zscore, 'page-scores', 'page-1')
expect(int(score)).to_equal(2)
@gen_test
def test_can_delete_domain_violations_prefs(self):
domain_url = 'globo.com'
key = 'violations-prefs-%s' % domain_url
self.cache.redis.delete(key)
prefs = yield Task(self.cache.redis.get, key)
expect(prefs).to_be_null()
data = dumps([{'key': 'test', 'value': '10'}])
yield Task(self.cache.redis.setex, key, 1, data)
prefs = yield Task(self.cache.redis.get, key)
expect(prefs).to_be_like(data)
yield self.cache.delete_domain_violations_prefs(domain_url)
prefs = yield Task(self.cache.redis.get, key)
expect(prefs).to_be_null()
@gen_test
def test_add_next_job_bucket(self):
key = 'next-job-bucket'
self.cache.redis.delete(key)
prefs = yield Task(self.cache.redis.get, key)
expect(prefs).to_be_null()
for x in range(2):
page = PageFactory.create(uuid='%d' %x, url='http://g%d.com' % x)
yield Task(self.cache.add_next_job_bucket, page.uuid, page.url)
data = yield Task(self.cache.redis.zrange, key, 0, 0)
expect(data).to_be_like([dumps({"url": "http://g0.com", "page": "0"})])
data = yield Task(self.cache.redis.zrange, key, 1, 1)
expect(data).to_be_like([dumps({"url": "http://g1.com", "page": "1"})])
@gen_test
def test_can_get_next_job_list(self):
key = 'next-job-bucket'
self.cache.redis.delete(key)
for x in range(2):
page = PageFactory.create(uuid='%d' %x, url='http://g%d.com' % x)
yield Task(self.cache.add_next_job_bucket, page.uuid, page.url)
data = yield Task(self.cache.get_next_job_list, 1, 10)
expect(data).to_equal([
'{"url":"http:\\/\\/g0.com","page":"0"}',
'{"url":"http:\\/\\/g1.com","page":"1"}'
])
class SyncCacheTestCase(ApiTestCase):
def setUp(self):
super(SyncCacheTestCase, self).setUp()
self.db.query(Domain).delete()
self.db.query(Page).delete()
@property
def sync_cache(self):
return self.connect_to_sync_redis()
@property
def config(self):
return self.server.application.config
def test_cache_has_connection_to_redis(self):
expect(self.sync_cache.redis).not_to_be_null()
def test_cache_has_connection_to_db(self):
expect(self.sync_cache.db).not_to_be_null()
def test_can_get_domain_limiters(self):
self.db.query(Limiter).delete()
self.sync_cache.redis.delete('domain-limiters')
domains = self.sync_cache.get_domain_limiters()
expect(domains).to_be_null()
limiter = LimiterFactory.create(url='http://test.com/')
LimiterFactory.create()
LimiterFactory.create()
domains = self.sync_cache.get_domain_limiters()
expect(domains).to_length(3)
expect(domains).to_include({limiter.url: limiter.value})
# should get from cache
self.sync_cache.db = None
domains = self.sync_cache.get_domain_limiters()
expect(domains).to_length(3)
def test_can_set_domain_limiters(self):
self.db.query(Limiter).delete()
self.sync_cache.redis.delete('domain-limiters')
domains = self.sync_cache.get_domain_limiters()
expect(domains).to_be_null()
limiters = [{u'http://test.com/': 10}]
self.sync_cache.set_domain_limiters(limiters, 120)
domains = self.sync_cache.get_domain_limiters()
expect(domains).to_length(1)
expect(domains).to_include(limiters[0])
def test_has_key(self):
self.sync_cache.redis.delete('my-key')
has_my_key = self.sync_cache.has_key('my-key')
expect(has_my_key).to_be_false()
self.sync_cache.redis.setex('my-key', 10, '')
has_my_key = self.sync_cache.has_key('my-key')
expect(has_my_key).to_be_true()
def test_get_domain_name(self):
testcom = self.sync_cache.get_domain_name('test.com')
expect(testcom).to_equal('test.com')
gcom = DomainFactory.create(url='http://g.com', name='g.com')
domain_name = self.sync_cache.get_domain_name(gcom)
expect(domain_name).to_equal('g.com')
empty_domain_name = self.sync_cache.get_domain_name('')
expect(empty_domain_name).to_equal('page')
def test_increment_active_review_count(self):
key = 'g.com-active-review-count'
self.sync_cache.redis.delete(key)
gcom = DomainFactory.create(url='http://g.com', name='g.com')
page = PageFactory.create(domain=gcom)
ReviewFactory.create(
is_active=True,
is_complete=True,
domain=gcom,
page=page,
number_of_violations=1
)
page = PageFactory.create(domain=gcom)
ReviewFactory.create(
is_active=False,
is_complete=True,
domain=gcom,
page=page,
number_of_violations=3
)
self.sync_cache.increment_active_review_count(gcom.name)
active_review_count = self.sync_cache.redis.get(key)
expect(active_review_count).to_equal('1')
self.sync_cache.increment_active_review_count(gcom.name)
active_review_count = self.sync_cache.redis.get(key)
expect(active_review_count).to_equal('2')
def test_increment_count(self):
key = 'g.com-my-key'
self.sync_cache.redis.delete(key)
gcom = DomainFactory.create(url="http://g.com", name="g.com")
PageFactory.create(domain=gcom)
self.sync_cache.increment_count(
'my-key',
gcom.name,
lambda domain: domain.get_page_count(self.db)
)
page_count = self.sync_cache.redis.get(key)
expect(page_count).to_equal('1')
self.sync_cache.increment_count(
'my-key',
gcom.name,
lambda domain: domain.get_page_count(self.db)
)
page_count = self.sync_cache.redis.get(key)
expect(page_count).to_equal('2')
def test_get_active_review_count(self):
self.sync_cache.redis.delete('g.com-active-review-count')
gcom = DomainFactory.create(url="http://g.com", name="g.com")
DomainFactory.create(url="http://g1.globo.com", name="g1.globo.com")
page = PageFactory.create(domain=gcom)
page2 = PageFactory.create(domain=gcom)
ReviewFactory.create(
is_active=True,
is_complete=True,
domain=gcom,
page=page,
number_of_violations=10
)
ReviewFactory.create(
is_active=True,
is_complete=True,
domain=gcom,
page=page2,
number_of_violations=10
)
ReviewFactory.create(
is_active=False,
is_complete=True,
domain=gcom,
page=page2,
number_of_violations=10
)
count = self.sync_cache.get_active_review_count(gcom.name)
expect(count).to_equal(2)
# should get from cache
self.sync_cache.db = None
count = self.sync_cache.get_active_review_count(gcom.name)
expect(count).to_equal(2)
def test_get_count(self):
key = 'g.com-my-key'
self.sync_cache.redis.delete(key)
gcom = DomainFactory.create(url="http://g.com", name="g.com")
PageFactory.create(domain=gcom)
count = self.sync_cache.get_count(
key,
gcom.name,
int(self.config.PAGE_COUNT_EXPIRATION_IN_SECONDS),
lambda domain: domain.get_page_count(self.db)
)
expect(count).to_equal(1)
# should get from cache
self.sync_cache.db = None
count = self.sync_cache.get_count(
key,
gcom.name,
int(self.config.PAGE_COUNT_EXPIRATION_IN_SECONDS),
lambda domain: domain.get_page_count(self.db)
)
expect(count).to_equal(1)
def test_get_request_with_url_not_cached(self):
url = 'http://g.com/test.html'
key = 'urls-%s' % url
self.sync_cache.redis.delete(key)
url, response = self.sync_cache.get_request(url)
expect(url).to_equal('http://g.com/test.html')
expect(response).to_be_null()
def test_get_request_with_url_cached(self):
url = 'http://g.com/test.html'
key = 'urls-%s' % url
self.sync_cache.redis.delete(key)
out = StringIO()
with GzipFile(fileobj=out, mode="w") as f:
f.write('')
text = out.getvalue()
value = msgpack.packb({
'url': url,
'body': text,
'status_code': 200,
'headers': None,
'cookies': None,
'effective_url': 'http://g.com/test.html',
'error': None,
'request_time': str(100)
})
self.sync_cache.redis.setex(
key,
10,
value
)
url, response = self.sync_cache.get_request(url)
expect(url).to_equal('http://g.com/test.html')
expect(response.status_code).to_equal(200)
expect(response.effective_url).to_equal(url)
expect(response.request_time).to_equal(100)
def test_set_request(self):
test_url = 'http://g.com/test.html'
key = 'urls-%s' % test_url
self.sync_cache.redis.delete(key)
url, response = self.sync_cache.get_request(test_url)
expect(url).to_equal('http://g.com/test.html')
expect(response).to_be_null()
self.sync_cache.set_request(
url=url,
status_code=200,
headers={'X-HEADER': 'test'},
cookies=None,
text='',
effective_url='http://g.com/test.html',
error=None,
request_time=100,
expiration=5
)
url, response = self.sync_cache.get_request(test_url)
expect(url).to_equal('http://g.com/test.html')
expect(response.status_code).to_equal(200)
expect(response.headers.get('X-HEADER')).to_equal('test')
expect(response.cookies).to_be_null()
expect(response.effective_url).to_equal(url)
expect(response.error).to_be_null()
expect(response.request_time).to_equal(100)
def test_set_request_with_status_code_greater_than_399(self):
test_url = 'http://g.com/test.html'
key = 'urls-%s' % test_url
self.sync_cache.redis.delete(key)
self.sync_cache.set_request(
url=test_url,
status_code=500,
headers=None,
cookies=None,
text=None,
effective_url=None,
error=None,
request_time=1,
expiration=5
)
url, response = self.sync_cache.get_request(test_url)
expect(url).to_equal('http://g.com/test.html')
expect(response).to_be_null()
def test_set_request_with_status_code_less_than_100(self):
test_url = 'http://g.com/test.html'
key = 'urls-%s' % test_url
self.sync_cache.redis.delete(key)
self.sync_cache.set_request(
url=test_url,
status_code=99,
headers=None,
cookies=None,
text=None,
effective_url=None,
error=None,
request_time=1,
expiration=5
)
url, response = self.sync_cache.get_request(test_url)
expect(url).to_equal('http://g.com/test.html')
expect(response).to_be_null()
def test_lock_next_job(self):
test_url = 'http://g.com/test.html'
key = '%s-next-job-lock' % test_url
self.sync_cache.redis.delete(key)
lock = self.sync_cache.lock_next_job(test_url, 5)
expect(lock.acquire()).to_be_true()
def test_has_next_job_lock(self):
test_url = 'http://g.com/test.html'
key = '%s-next-job-lock' % test_url
self.sync_cache.redis.delete(key)
lock = self.sync_cache.lock_next_job(test_url, 20)
expect(lock).not_to_be_null()
has_next_job_lock = self.sync_cache.has_next_job_lock(test_url, 20)
expect(has_next_job_lock).not_to_be_null()
has_next_job_lock = self.sync_cache.has_next_job_lock(test_url, 20)
expect(has_next_job_lock).to_be_null()
def test_release_next_job(self):
test_url = 'http://g.com/test.html'
key = '%s-next-job-lock' % test_url
self.sync_cache.redis.delete(key)
has_next_job_lock = self.sync_cache.has_next_job_lock(test_url, 5)
expect(has_next_job_lock).not_to_be_null()
self.sync_cache.release_next_job(has_next_job_lock)
lock = self.sync_cache.has_next_job_lock(test_url, 5)
expect(lock).not_to_be_null()
def test_increment_page_score(self):
self.sync_cache.redis.delete('page-scores')
total = self.sync_cache.redis.zcard('page-scores')
expect(total).to_equal(0)
self.sync_cache.increment_page_score('page-1')
score = self.sync_cache.redis.zscore('page-scores', 'page-1')
expect(score).to_equal(1)
self.sync_cache.increment_page_score('page-1')
score = self.sync_cache.redis.zscore('page-scores', 'page-1')
expect(score).to_equal(2)
def test_seized_pages_score(self):
self.sync_cache.redis.delete('page-scores')
for i in range(3):
self.sync_cache.increment_page_score('page-%d' % i)
total = self.sync_cache.redis.zcard('page-scores')
expect(total).to_equal(3)
values = self.sync_cache.seized_pages_score()
expect(values).to_length(3)
total = self.sync_cache.redis.zcard('page-scores')
expect(total).to_equal(0)
def test_lock_update_pages_score(self):
self.sync_cache.redis.delete('update-pages-score-lock')
lock = self.sync_cache.lock_update_pages_score(5)
expect(lock.acquire()).to_be_true()
def test_has_update_pages_lock(self):
self.sync_cache.redis.delete('update-pages-score-lock')
lock = self.sync_cache.lock_update_pages_score(20)
expect(lock).not_to_be_null()
has_update_pages_lock = self.sync_cache.has_update_pages_lock(20)
expect(has_update_pages_lock).not_to_be_null()
has_update_pages_lock = self.sync_cache.has_update_pages_lock(20)
expect(has_update_pages_lock).to_be_null()
def test_release_update_pages_lock(self):
self.sync_cache.redis.delete('update-pages-score-lock')
has_update_pages_lock = self.sync_cache.has_update_pages_lock(5)
expect(has_update_pages_lock).not_to_be_null()
self.sync_cache.release_update_pages_lock(has_update_pages_lock)
lock = self.sync_cache.has_update_pages_lock(5)
expect(lock).not_to_be_null()
def test_can_delete_domain_violations_prefs(self):
domain_url = 'globo.com'
key = 'violations-prefs-%s' % domain_url
self.sync_cache.redis.delete(key)
prefs = self.sync_cache.redis.get(key)
expect(prefs).to_be_null()
data = dumps([{'key': 'test', 'value': '10'}])
self.sync_cache.redis.setex(key, 10, data)
prefs = self.sync_cache.redis.get(key)
expect(prefs).to_be_like(data)
self.sync_cache.delete_domain_violations_prefs(domain_url)
prefs = self.sync_cache.redis.get(key)
expect(prefs).to_be_null()
def test_can_get_domain_violations_prefs(self):
domain = DomainFactory.create(name='globo.com')
self.sync_cache.redis.delete( 'violations-prefs-%s' % domain.name)
for i in range(3):
DomainsViolationsPrefsFactory.create(
domain=domain,
key=KeyFactory.create(name='some.random.%d' % i),
value='v%d' % i
)
prefs = self.sync_cache.get_domain_violations_prefs('globo.com')
expect(prefs).to_equal([
{'value': u'v0', 'key': u'some.random.0'},
{'value': u'v1', 'key': u'some.random.1'},
{'value': u'v2', 'key': u'some.random.2'}
])
# should get from cache
self.sync_cache.db = None
prefs = self.sync_cache.get_domain_violations_prefs('globo.com')
expect(prefs).to_equal([
{'value': u'v0', 'key': u'some.random.0'},
{'value': u'v1', 'key': u'some.random.1'},
{'value': u'v2', 'key': u'some.random.2'}
])
def test_add_next_job_bucket(self):
key = 'next-job-bucket'
self.sync_cache.redis.delete(key)
prefs = self.sync_cache.redis.get(key)
expect(prefs).to_be_null()
for x in range(2):
page = PageFactory.create(uuid='%d' %x, url='http://g%d.com' % x)
self.sync_cache.add_next_job_bucket(page.uuid, page.url)
data = self.sync_cache.redis.zrange(key, 0, 0)
expect(data).to_be_like([
dumps({"url": "http://g0.com", "page": "0"})
])
data = self.sync_cache.redis.zrange(key, 1, 1)
expect(data).to_be_like([
dumps({"url": "http://g1.com", "page": "1"})
])
def test_get_next_job_bucket(self):
key = 'next-job-bucket'
self.sync_cache.redis.delete(key)
prefs = self.sync_cache.redis.get(key)
expect(prefs).to_be_null()
for x in range(2):
page = PageFactory.create(uuid='%d' %x, url='http://g%d.com' % x)
self.sync_cache.redis.zadd(
'next-job-bucket',
time.time(),
dumps({'page': str(page.uuid), 'url': page.url})
)
data = self.sync_cache.get_next_job_bucket()
expect(data).to_be_like(
dumps({"url": "http://g0.com", "page": "0"})
)
data = self.sync_cache.get_next_job_bucket()
expect(data).to_be_like(
dumps({"url": "http://g1.com", "page": "1"})
)
data = self.sync_cache.get_next_job_bucket()
expect(data).to_be_null()
|
direvus/ansible | refs/heads/devel | lib/ansible/modules/storage/netapp/netapp_e_storage_system.py | 21 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: netapp_e_storage_system
version_added: "2.2"
short_description: NetApp E-Series Web Services Proxy manage storage arrays
description:
- Manage the arrays accessible via a NetApp Web Services Proxy for NetApp E-series storage arrays.
options:
api_username:
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
required: true
api_password:
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
required: true
api_url:
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
required: true
validate_certs:
description:
- Should https certificates be validated?
type: bool
default: 'yes'
ssid:
description:
- The ID of the array to manage. This value must be unique for each array.
required: true
state:
description:
- Whether the specified array should be configured on the Web Services Proxy or not.
required: true
choices: ['present', 'absent']
controller_addresses:
description:
- The list addresses for the out-of-band management adapter or the agent host. Mutually exclusive of array_wwn parameter.
required: true
array_wwn:
description:
- The WWN of the array to manage. Only necessary if in-band managing multiple arrays on the same agent host. Mutually exclusive of
controller_addresses parameter.
array_password:
description:
- The management password of the array to manage, if set.
enable_trace:
description:
- Enable trace logging for SYMbol calls to the storage system.
type: bool
default: 'no'
meta_tags:
description:
- Optional meta tags to associate to this storage system
author: Kevin Hulquest (@hulquest)
'''
EXAMPLES = '''
---
- name: Presence of storage system
netapp_e_storage_system:
ssid: "{{ item.key }}"
state: present
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
controller_addresses:
- "{{ item.value.address1 }}"
- "{{ item.value.address2 }}"
with_dict: "{{ storage_systems }}"
when: check_storage_system
'''
RETURN = '''
msg:
description: State of request
type: string
returned: always
sample: 'Storage system removed.'
'''
import json
from datetime import datetime as dt, timedelta
from time import sleep
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as err:
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_body, timeout):
(rc, resp) = request(api_url + "/storage-systems", data=request_body, headers=post_headers,
method='POST', url_username=api_usr, url_password=api_pwd,
validate_certs=validate_certs)
status = None
return_resp = resp
if 'status' in resp:
status = resp['status']
if rc == 201:
status = 'neverContacted'
fail_after_time = dt.utcnow() + timedelta(seconds=timeout)
while status == 'neverContacted':
if dt.utcnow() > fail_after_time:
raise Exception("web proxy timed out waiting for array status")
sleep(1)
(rc, system_resp) = request(api_url + "/storage-systems/%s" % ssid,
headers=dict(Accept="application/json"), url_username=api_usr,
url_password=api_pwd, validate_certs=validate_certs,
ignore_errors=True)
status = system_resp['status']
return_resp = system_resp
return status, return_resp
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
ssid=dict(required=True, type='str'),
controller_addresses=dict(type='list'),
array_wwn=dict(required=False, type='str'),
array_password=dict(required=False, type='str', no_log=True),
array_status_timeout_sec=dict(default=60, type='int'),
enable_trace=dict(default=False, type='bool'),
meta_tags=dict(type='list')
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['controller_addresses', 'array_wwn']],
required_if=[('state', 'present', ['controller_addresses'])]
)
p = module.params
state = p['state']
ssid = p['ssid']
controller_addresses = p['controller_addresses']
array_wwn = p['array_wwn']
array_password = p['array_password']
array_status_timeout_sec = p['array_status_timeout_sec']
validate_certs = p['validate_certs']
meta_tags = p['meta_tags']
enable_trace = p['enable_trace']
api_usr = p['api_username']
api_pwd = p['api_password']
api_url = p['api_url']
changed = False
array_exists = False
try:
(rc, resp) = request(api_url + "/storage-systems/%s" % ssid, headers=dict(Accept="application/json"),
url_username=api_usr, url_password=api_pwd, validate_certs=validate_certs,
ignore_errors=True)
except Exception as err:
module.fail_json(msg="Error accessing storage-system with id [%s]. Error [%s]" % (ssid, to_native(err)))
array_exists = True
array_detail = resp
if rc == 200:
if state == 'absent':
changed = True
array_exists = False
elif state == 'present':
current_addresses = frozenset(i for i in (array_detail['ip1'], array_detail['ip2']) if i)
if set(controller_addresses) != current_addresses:
changed = True
if array_detail['wwn'] != array_wwn and array_wwn is not None:
module.fail_json(
msg='It seems you may have specified a bad WWN. The storage system ID you specified, %s, currently has the WWN of %s' %
(ssid, array_detail['wwn'])
)
elif rc == 404:
if state == 'present':
changed = True
array_exists = False
else:
changed = False
module.exit_json(changed=changed, msg="Storage system was not present.")
if changed and not module.check_mode:
if state == 'present':
if not array_exists:
# add the array
array_add_req = dict(
id=ssid,
controllerAddresses=controller_addresses,
metaTags=meta_tags,
enableTrace=enable_trace
)
if array_wwn:
array_add_req['wwn'] = array_wwn
if array_password:
array_add_req['password'] = array_password
post_headers = dict(Accept="application/json")
post_headers['Content-Type'] = 'application/json'
request_data = json.dumps(array_add_req)
try:
(rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_data,
array_status_timeout_sec)
except Exception as err:
module.fail_json(msg="Failed to add storage system. Id[%s]. Request body [%s]. Error[%s]." %
(ssid, request_data, to_native(err)))
else: # array exists, modify...
post_headers = dict(Accept="application/json")
post_headers['Content-Type'] = 'application/json'
post_body = dict(
controllerAddresses=controller_addresses,
removeAllTags=True,
enableTrace=enable_trace,
metaTags=meta_tags
)
try:
(rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, post_body,
array_status_timeout_sec)
except Exception as err:
module.fail_json(msg="Failed to update storage system. Id[%s]. Request body [%s]. Error[%s]." %
(ssid, post_body, to_native(err)))
elif state == 'absent':
# delete the array
try:
(rc, resp) = request(api_url + "/storage-systems/%s" % ssid, method='DELETE',
url_username=api_usr,
url_password=api_pwd, validate_certs=validate_certs)
except Exception as err:
module.fail_json(msg="Failed to remove storage array. Id[%s]. Error[%s]." % (ssid, to_native(err)))
if rc == 422:
module.exit_json(changed=changed, msg="Storage system was not presnt.")
if rc == 204:
module.exit_json(changed=changed, msg="Storage system removed.")
module.exit_json(changed=changed, **resp)
if __name__ == '__main__':
main()
|
boyombo/django-stations | refs/heads/master | stations/heathen/migrations/0003_auto_20161128_0519.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-11-28 05:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('heathen', '0002_member_gender'),
]
operations = [
migrations.CreateModel(
name='Industry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='member',
name='nok_email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
migrations.AddField(
model_name='member',
name='nok_name',
field=models.CharField(blank=True, max_length=50),
),
migrations.AddField(
model_name='member',
name='nok_phone',
field=models.CharField(blank=True, max_length=20),
),
migrations.AddField(
model_name='member',
name='industry',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='heathen.Industry'),
),
]
|
Deepomatic/dmake | refs/heads/master | test/test_link_names.py | 1 | import pytest
from dmake.deepobuild import LinkNames, NeededServiceSerializer, DockerLinkSerializer
from dmake.serializer import ValidationError
# docker_links
def test_docker_links_different_simple_service_same_file():
"""multiple docker_links can have different link names"""
LinkNames.reset()
DockerLinkSerializer()._validate_('dmake.yml', None, {'image_name': 'foo', 'link_name': 'foo'})
DockerLinkSerializer()._validate_('dmake.yml', None, {'image_name': 'foo', 'link_name': 'foo2'})
def test_docker_links_different_simple_service_different_files():
"""multiple docker_links can have different link names"""
LinkNames.reset()
DockerLinkSerializer()._validate_('dmake.yml', None, {'image_name': 'foo', 'link_name': 'foo'})
DockerLinkSerializer()._validate_('dmake2.yml', None, {'image_name': 'foo', 'link_name': 'foo2'})
def test_docker_links_same_simple_service_same_file():
"""multiple docker_links *cannot* have same link_name"""
LinkNames.reset()
DockerLinkSerializer()._validate_('dmake.yml', None, {'image_name': 'foo', 'link_name': 'foo'})
with pytest.raises(ValidationError) as excinfo:
DockerLinkSerializer()._validate_('dmake.yml', None, {'image_name': 'foo', 'link_name': 'foo'})
assert "Duplicate link name 'foo' with different definitions: 'docker_link' in 'dmake.yml', was previously defined as 'docker_link' in 'dmake.yml'" == excinfo.value.args[0]
def test_docker_links_same_simple_service_different_files():
"""multiple docker_links *cannot* have same link_name"""
LinkNames.reset()
DockerLinkSerializer()._validate_('dmake.yml', None, {'image_name': 'foo', 'link_name': 'foo'})
with pytest.raises(ValidationError) as excinfo:
DockerLinkSerializer()._validate_('dmake2.yml', None, {'image_name': 'foo', 'link_name': 'foo'})
assert "Duplicate link name 'foo' with different definitions: 'docker_link' in 'dmake2.yml', was previously defined as 'docker_link' in 'dmake.yml'" == excinfo.value.args[0]
# needed_services
def test_needed_services_same_simple_service_same_file():
"""multiple services can need same simple service"""
LinkNames.reset()
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo'})
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo'})
def test_needed_services_same_simple_service_different_files():
"""multiple services can need same simple service"""
LinkNames.reset()
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo'})
NeededServiceSerializer()._validate_('dmake2.yml', None, {'service_name': 'foo', 'link_name': 'foo'})
def test_needed_services_same_simple_service_different_link_names_same_file():
"""multiple services can need same simple service with different link names"""
LinkNames.reset()
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo'})
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo2'})
def test_needed_services_same_simple_service_different_link_names_different_files():
"""multiple services can need same simple service with different link names"""
LinkNames.reset()
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo'})
NeededServiceSerializer()._validate_('dmake2.yml', None, {'service_name': 'foo', 'link_name': 'foo2'})
def test_needed_services_different_service_name_same_file():
"""multiple services *cannot* need different services with same link_name"""
LinkNames.reset()
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo'})
with pytest.raises(ValidationError) as excinfo:
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo2', 'link_name': 'foo'})
assert "Duplicate link name 'foo' with different definitions: 'needed_link' in 'dmake.yml', was previously defined as 'needed_link' in 'dmake.yml'" == excinfo.value.args[0]
def test_needed_services_different_service_name_different_files():
"""multiple services *cannot* need different services with same link_name"""
LinkNames.reset()
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo'})
with pytest.raises(ValidationError) as excinfo:
NeededServiceSerializer()._validate_('dmake2.yml', None, {'service_name': 'foo2', 'link_name': 'foo'})
assert "Duplicate link name 'foo' with different definitions: 'needed_link' in 'dmake2.yml', was previously defined as 'needed_link' in 'dmake.yml'" == excinfo.value.args[0]
def test_needed_services_simple_specialized_env_same_file():
"""multiple services *cannot* need simple and specialized for same link name"""
LinkNames.reset()
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo'})
with pytest.raises(ValidationError) as excinfo:
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo', 'env': {'FOO': 'bar'}})
assert "Duplicate link name 'foo' with different definitions: 'needed_link' in 'dmake.yml', was previously defined as 'needed_link' in 'dmake.yml'" == excinfo.value.args[0]
def test_needed_services_simple_specialized_env_different_files():
"""multiple services *cannot* need simple and specialized for same link name"""
LinkNames.reset()
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo'})
with pytest.raises(ValidationError) as excinfo:
NeededServiceSerializer()._validate_('dmake2.yml', None, {'service_name': 'foo', 'link_name': 'foo', 'env': {'FOO': 'bar'}})
assert "Duplicate link name 'foo' with different definitions: 'needed_link' in 'dmake2.yml', was previously defined as 'needed_link' in 'dmake.yml'" == excinfo.value.args[0]
def test_needed_services_same_specialized_env_service_same_file():
"""multiple services can need same specialized"""
LinkNames.reset()
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo', 'env': {'FOO': 'bar'}})
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo', 'env': {'FOO': 'bar'}})
def test_needed_services_same_specialized_env_service_different_files():
"""multiple services can need same specialized"""
LinkNames.reset()
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo', 'env': {'FOO': 'bar'}})
NeededServiceSerializer()._validate_('dmake2.yml', None, {'service_name': 'foo', 'link_name': 'foo', 'env': {'FOO': 'bar'}})
def test_needed_services_different_specialized_env_service_same_file():
"""multiple services *cannot* need different specialized env for same link name"""
LinkNames.reset()
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo', 'env': {'FOO': 'bar'}})
with pytest.raises(ValidationError) as excinfo:
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo', 'env': {'FOO': 'bar2'}})
assert "Duplicate link name 'foo' with different definitions: 'needed_link' in 'dmake.yml', was previously defined as 'needed_link' in 'dmake.yml'" == excinfo.value.args[0]
def test_needed_services_different_specialized_env_service_different_files():
"""multiple services *cannot* need different specialized env for same link name"""
LinkNames.reset()
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo', 'env': {'FOO': 'bar'}})
with pytest.raises(ValidationError) as excinfo:
NeededServiceSerializer()._validate_('dmake2.yml', None, {'service_name': 'foo', 'link_name': 'foo', 'env': {'FOO': 'bar2'}})
assert "Duplicate link name 'foo' with different definitions: 'needed_link' in 'dmake2.yml', was previously defined as 'needed_link' in 'dmake.yml'" == excinfo.value.args[0]
def test_needed_services_same_specialized_env_different_env_exports_service_same_file():
"""multiple services can need same specialized but different env_exports"""
LinkNames.reset()
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo', 'env': {'FOO': 'bar'}, 'env_exports': {'EXPORT': '1'}})
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo', 'env': {'FOO': 'bar'}, 'env_exports': {'EXPORT': '2'}})
def test_needed_services_same_specialized_env_different_env_exports_service_different_files():
"""multiple services can need same specialized but different env_exports"""
LinkNames.reset()
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo', 'env': {'FOO': 'bar'}, 'env_exports': {'EXPORT': '1'}})
NeededServiceSerializer()._validate_('dmake2.yml', None, {'service_name': 'foo', 'link_name': 'foo', 'env': {'FOO': 'bar'}, 'env_exports': {'EXPORT': '2'}})
def test_needed_services_same_specialized_env_service_different_link_name_same_file():
"""multiple services can need same specialized service with different link names"""
LinkNames.reset()
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo', 'env': {'FOO': 'bar'}})
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo2', 'env': {'FOO': 'bar2'}})
def test_needed_services_same_specialized_env_service_different_link_name_different_files():
"""multiple services can need same specialized service with different link names"""
LinkNames.reset()
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo', 'env': {'FOO': 'bar'}})
NeededServiceSerializer()._validate_('dmake2.yml', None, {'service_name': 'foo', 'link_name': 'foo2', 'env': {'FOO': 'bar2'}})
# mix needed_services and docker_links
def test_mix_links_different_link_names_same_file():
"""mix docker_links, needed_services can have different link names"""
LinkNames.reset()
DockerLinkSerializer()._validate_('dmake.yml', None, {'image_name': 'foo', 'link_name': 'foo'})
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo2'})
def test_mix_links_different_link_names_different_files():
"""mix docker_links, needed_services can have different link names"""
LinkNames.reset()
DockerLinkSerializer()._validate_('dmake.yml', None, {'image_name': 'foo', 'link_name': 'foo'})
NeededServiceSerializer()._validate_('dmake2.yml', None, {'service_name': 'foo', 'link_name': 'foo2'})
def test_mix_links_same_link_names_same_file():
"""mix docker_links, needed_services *cannot* have same link name"""
LinkNames.reset()
DockerLinkSerializer()._validate_('dmake.yml', None, {'image_name': 'foo', 'link_name': 'foo'})
with pytest.raises(ValidationError) as excinfo:
NeededServiceSerializer()._validate_('dmake.yml', None, {'service_name': 'foo', 'link_name': 'foo'})
assert "Duplicate link name 'foo' with different definitions: 'needed_link' in 'dmake.yml', was previously defined as 'docker_link' in 'dmake.yml'" == excinfo.value.args[0]
def test_mix_links_same_link_names_different_files():
"""mix docker_links, needed_services *cannot* have same link name"""
LinkNames.reset()
DockerLinkSerializer()._validate_('dmake.yml', None, {'image_name': 'foo', 'link_name': 'foo'})
with pytest.raises(ValidationError) as excinfo:
NeededServiceSerializer()._validate_('dmake2.yml', None, {'service_name': 'foo', 'link_name': 'foo'})
assert "Duplicate link name 'foo' with different definitions: 'needed_link' in 'dmake2.yml', was previously defined as 'docker_link' in 'dmake.yml'" == excinfo.value.args[0]
|
OpenUpgrade-dev/OpenUpgrade | refs/heads/8.0 | addons/payment_buckaroo/tests/test_buckaroo.py | 321 | # -*- coding: utf-8 -*-
from lxml import objectify
import urlparse
import openerp
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment.tests.common import PaymentAcquirerCommon
from openerp.addons.payment_buckaroo.controllers.main import BuckarooController
from openerp.tools import mute_logger
@openerp.tests.common.at_install(False)
@openerp.tests.common.post_install(False)
class BuckarooCommon(PaymentAcquirerCommon):
def setUp(self):
super(BuckarooCommon, self).setUp()
cr, uid = self.cr, self.uid
self.base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url')
# get the buckaroo account
model, self.buckaroo_id = self.registry('ir.model.data').get_object_reference(cr, uid, 'payment_buckaroo', 'payment_acquirer_buckaroo')
@openerp.tests.common.at_install(False)
@openerp.tests.common.post_install(False)
class BuckarooForm(BuckarooCommon):
def test_10_Buckaroo_form_render(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid things
buckaroo = self.payment_acquirer.browse(self.cr, self.uid, self.buckaroo_id, None)
self.assertEqual(buckaroo.environment, 'test', 'test without test environment')
# ----------------------------------------
# Test: button direct rendering
# ----------------------------------------
form_values = {
'add_returndata': None,
'Brq_websitekey': buckaroo.brq_websitekey,
'Brq_amount': '2240.0',
'Brq_currency': 'EUR',
'Brq_invoicenumber': 'SO004',
'Brq_signature': '1b8c10074c622d965272a91a9e88b5b3777d2474', # update me
'brq_test': 'True',
'Brq_return': '%s' % urlparse.urljoin(self.base_url, BuckarooController._return_url),
'Brq_returncancel': '%s' % urlparse.urljoin(self.base_url, BuckarooController._cancel_url),
'Brq_returnerror': '%s' % urlparse.urljoin(self.base_url, BuckarooController._exception_url),
'Brq_returnreject': '%s' % urlparse.urljoin(self.base_url, BuckarooController._reject_url),
'Brq_culture': 'en-US',
}
# render the button
res = self.payment_acquirer.render(
cr, uid, self.buckaroo_id,
'SO004', 2240.0, self.currency_euro_id,
partner_id=None,
partner_values=self.buyer_values,
context=context)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://testcheckout.buckaroo.nl/html/', 'Buckaroo: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'Buckaroo: wrong value for input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
# ----------------------------------------
# Test2: button using tx + validation
# ----------------------------------------
# create a new draft tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 2240.0,
'acquirer_id': self.buckaroo_id,
'currency_id': self.currency_euro_id,
'reference': 'SO004',
'partner_id': self.buyer_id,
}, context=context
)
# render the button
res = self.payment_acquirer.render(
cr, uid, self.buckaroo_id,
'should_be_erased', 2240.0, self.currency_euro,
tx_id=tx_id,
partner_id=None,
partner_values=self.buyer_values,
context=context)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://testcheckout.buckaroo.nl/html/', 'Buckaroo: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'Buckaroo: wrong value for form input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
@mute_logger('openerp.addons.payment_buckaroo.models.buckaroo', 'ValidationError')
def test_20_buckaroo_form_management(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
buckaroo = self.payment_acquirer.browse(self.cr, self.uid, self.buckaroo_id, None)
self.assertEqual(buckaroo.environment, 'test', 'test without test environment')
# typical data posted by buckaroo after client has successfully paid
buckaroo_post_data = {
'BRQ_RETURNDATA': u'',
'BRQ_AMOUNT': u'2240.00',
'BRQ_CURRENCY': u'EUR',
'BRQ_CUSTOMER_NAME': u'Jan de Tester',
'BRQ_INVOICENUMBER': u'SO004',
'BRQ_PAYMENT': u'573311D081B04069BD6336001611DBD4',
'BRQ_PAYMENT_METHOD': u'paypal',
'BRQ_SERVICE_PAYPAL_PAYERCOUNTRY': u'NL',
'BRQ_SERVICE_PAYPAL_PAYEREMAIL': u'fhe@openerp.com',
'BRQ_SERVICE_PAYPAL_PAYERFIRSTNAME': u'Jan',
'BRQ_SERVICE_PAYPAL_PAYERLASTNAME': u'Tester',
'BRQ_SERVICE_PAYPAL_PAYERMIDDLENAME': u'de',
'BRQ_SERVICE_PAYPAL_PAYERSTATUS': u'verified',
'BRQ_SIGNATURE': u'175d82dd53a02bad393fee32cb1eafa3b6fbbd91',
'BRQ_STATUSCODE': u'190',
'BRQ_STATUSCODE_DETAIL': u'S001',
'BRQ_STATUSMESSAGE': u'Transaction successfully processed',
'BRQ_TEST': u'true',
'BRQ_TIMESTAMP': u'2014-05-08 12:41:21',
'BRQ_TRANSACTIONS': u'D6106678E1D54EEB8093F5B3AC42EA7B',
'BRQ_WEBSITEKEY': u'5xTGyGyPyl',
}
# should raise error about unknown tx
with self.assertRaises(ValidationError):
self.payment_transaction.form_feedback(cr, uid, buckaroo_post_data, 'buckaroo', context=context)
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 2240.0,
'acquirer_id': self.buckaroo_id,
'currency_id': self.currency_euro_id,
'reference': 'SO004',
'partner_name': 'Norbert Buyer',
'partner_country_id': self.country_france_id,
}, context=context
)
# validate it
self.payment_transaction.form_feedback(cr, uid, buckaroo_post_data, 'buckaroo', context=context)
# check state
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertEqual(tx.state, 'done', 'Buckaroo: validation did not put tx into done state')
self.assertEqual(tx.buckaroo_txnid, buckaroo_post_data.get('BRQ_TRANSACTIONS'), 'Buckaroo: validation did not update tx payid')
# reset tx
tx.write({'state': 'draft', 'date_validate': False, 'buckaroo_txnid': False})
# now buckaroo post is ok: try to modify the SHASIGN
buckaroo_post_data['BRQ_SIGNATURE'] = '54d928810e343acf5fb0c3ee75fd747ff159ef7a'
with self.assertRaises(ValidationError):
self.payment_transaction.form_feedback(cr, uid, buckaroo_post_data, 'buckaroo', context=context)
# simulate an error
buckaroo_post_data['BRQ_STATUSCODE'] = 2
buckaroo_post_data['BRQ_SIGNATURE'] = '4164b52adb1e6a2221d3d8a39d8c3e18a9ecb90b'
self.payment_transaction.form_feedback(cr, uid, buckaroo_post_data, 'buckaroo', context=context)
# check state
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertEqual(tx.state, 'error', 'Buckaroo: erroneous validation did not put tx into error state')
|
Spiderlover/Toontown | refs/heads/master | toontown/launcher/QuickStartLauncher.py | 6 | import sys
sys.path = ['']
import Phase2
from toontown.launcher.QuickLauncher import QuickLauncher
launcher = QuickLauncher()
launcher.notify.info('Reached end of StartQuickLauncher.py.')
|
40223211/cadb_g7_w18test | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/importlib/__init__.py | 610 | """A pure Python implementation of import."""
__all__ = ['__import__', 'import_module', 'invalidate_caches']
# Bootstrap help #####################################################
# Until bootstrapping is complete, DO NOT import any modules that attempt
# to import importlib._bootstrap (directly or indirectly). Since this
# partially initialised package would be present in sys.modules, those
# modules would get an uninitialised copy of the source version, instead
# of a fully initialised version (either the frozen one or the one
# initialised below if the frozen one is not available).
import _imp # Just the builtin component, NOT the full Python module
import sys
from . import machinery #fix me brython
try:
import _frozen_importlib as _bootstrap
except ImportError:
from . import _bootstrap
_bootstrap._setup(sys, _imp)
else:
# importlib._bootstrap is the built-in import, ensure we don't create
# a second copy of the module.
_bootstrap.__name__ = 'importlib._bootstrap'
_bootstrap.__package__ = 'importlib'
_bootstrap.__file__ = __file__.replace('__init__.py', '_bootstrap.py')
sys.modules['importlib._bootstrap'] = _bootstrap
# To simplify imports in test code
_w_long = _bootstrap._w_long
_r_long = _bootstrap._r_long
# Fully bootstrapped at this point, import whatever you like, circular
# dependencies and startup overhead minimisation permitting :)
# Public API #########################################################
from ._bootstrap import __import__
def invalidate_caches():
"""Call the invalidate_caches() method on all meta path finders stored in
sys.meta_path (where implemented)."""
for finder in sys.meta_path:
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
def find_loader(name, path=None):
"""Find the loader for the specified module.
First, sys.modules is checked to see if the module was already imported. If
so, then sys.modules[name].__loader__ is returned. If that happens to be
set to None, then ValueError is raised. If the module is not in
sys.modules, then sys.meta_path is searched for a suitable loader with the
value of 'path' given to the finders. None is returned if no loader could
be found.
Dotted names do not have their parent packages implicitly imported. You will
most likely need to explicitly import all parent packages in the proper
order for a submodule to get the correct loader.
"""
try:
loader = sys.modules[name].__loader__
if loader is None:
raise ValueError('{}.__loader__ is None'.format(name))
else:
return loader
except KeyError:
pass
return _bootstrap._find_module(name, path)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
level = 0
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
for character in name:
if character != '.':
break
level += 1
return _bootstrap._gcd_import(name[level:], package, level)
#need at least one import hook for importlib stuff to work.
import basehook
sys.meta_path.append(basehook.BaseHook())
|
ormnv/os_final_project | refs/heads/master | django/utils/_os.py | 112 | import os
import stat
import sys
from os.path import join, normcase, normpath, abspath, isabs, sep, dirname
from django.utils.encoding import force_text
from django.utils import six
try:
WindowsError = WindowsError
except NameError:
class WindowsError(Exception):
pass
if not six.PY3:
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
# Under Python 2, define our own abspath function that can handle joining
# unicode paths to a current working directory that has non-ASCII characters
# in it. This isn't necessary on Windows since the Windows version of abspath
# handles this correctly. It also handles drive letters differently than the
# pure Python implementation, so it's best not to replace it.
if six.PY3 or os.name == 'nt':
abspathu = abspath
else:
def abspathu(path):
"""
Version of os.path.abspath that uses the unicode representation
of the current working directory, thus avoiding a UnicodeDecodeError
in join when the cwd has non-ASCII characters.
"""
if not isabs(path):
path = join(os.getcwdu(), path)
return normpath(path)
def upath(path):
"""
Always return a unicode path.
"""
if not six.PY3:
return path.decode(fs_encoding)
return path
def npath(path):
"""
Always return a native path, that is unicode on Python 3 and bytestring on
Python 2.
"""
if not six.PY3 and not isinstance(path, bytes):
return path.encode(fs_encoding)
return path
def safe_join(base, *paths):
"""
Joins one or more path components to the base path component intelligently.
Returns a normalized, absolute version of the final path.
The final path must be located inside of the base path component (otherwise
a ValueError is raised).
"""
base = force_text(base)
paths = [force_text(p) for p in paths]
final_path = abspathu(join(base, *paths))
base_path = abspathu(base)
# Ensure final_path starts with base_path (using normcase to ensure we
# don't false-negative on case insensitive operating systems like Windows),
# further, one of the following conditions must be true:
# a) The next character is the path separator (to prevent conditions like
# safe_join("/dir", "/../d"))
# b) The final path must be the same as the base path.
# c) The base path must be the most root path (meaning either "/" or "C:\\")
if (not normcase(final_path).startswith(normcase(base_path + sep)) and
normcase(final_path) != normcase(base_path) and
dirname(normcase(base_path)) != normcase(base_path)):
raise ValueError('The joined path (%s) is located outside of the base '
'path component (%s)' % (final_path, base_path))
return final_path
def rmtree_errorhandler(func, path, exc_info):
"""
On Windows, some files are read-only (e.g. in in .svn dirs), so when
rmtree() tries to remove them, an exception is thrown.
We catch that here, remove the read-only attribute, and hopefully
continue without problems.
"""
exctype, value = exc_info[:2]
# looking for a windows error
if exctype is not WindowsError or 'Access is denied' not in str(value):
raise
# file type should currently be read only
if ((os.stat(path).st_mode & stat.S_IREAD) != stat.S_IREAD):
raise
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
|
atosorigin/ansible | refs/heads/devel | test/units/module_utils/facts/hardware/test_linux.py | 93 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from units.compat import unittest
from units.compat.mock import Mock, patch
from ansible.module_utils.facts import timeout
from ansible.module_utils.facts.hardware import linux
from . linux_data import LSBLK_OUTPUT, LSBLK_OUTPUT_2, LSBLK_UUIDS, MTAB, MTAB_ENTRIES, BIND_MOUNTS, STATVFS_INFO, UDEVADM_UUID, UDEVADM_OUTPUT
with open(os.path.join(os.path.dirname(__file__), '../fixtures/findmount_output.txt')) as f:
FINDMNT_OUTPUT = f.read()
GET_MOUNT_SIZE = {}
def mock_get_mount_size(mountpoint):
return STATVFS_INFO.get(mountpoint, {})
class TestFactsLinuxHardwareGetMountFacts(unittest.TestCase):
# FIXME: mock.patch instead
def setUp(self):
timeout.GATHER_TIMEOUT = 10
def tearDown(self):
timeout.GATHER_TIMEOUT = None
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._mtab_entries', return_value=MTAB_ENTRIES)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._find_bind_mounts', return_value=BIND_MOUNTS)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._lsblk_uuid', return_value=LSBLK_UUIDS)
@patch('ansible.module_utils.facts.hardware.linux.get_mount_size', side_effect=mock_get_mount_size)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._udevadm_uuid', return_value=UDEVADM_UUID)
def test_get_mount_facts(self,
mock_get_mount_size,
mock_lsblk_uuid,
mock_find_bind_mounts,
mock_mtab_entries,
mock_udevadm_uuid):
module = Mock()
# Returns a LinuxHardware-ish
lh = linux.LinuxHardware(module=module, load_on_init=False)
# Nothing returned, just self.facts modified as a side effect
mount_facts = lh.get_mount_facts()
self.assertIsInstance(mount_facts, dict)
self.assertIn('mounts', mount_facts)
self.assertIsInstance(mount_facts['mounts'], list)
self.assertIsInstance(mount_facts['mounts'][0], dict)
home_expected = {'block_available': 1001578731,
'block_size': 4096,
'block_total': 105871006,
'block_used': 5713133,
'device': '/dev/mapper/fedora_dhcp129--186-home',
'fstype': 'ext4',
'inode_available': 26860880,
'inode_total': 26902528,
'inode_used': 41648,
'mount': '/home',
'options': 'rw,seclabel,relatime,data=ordered',
'size_available': 410246647808,
'size_total': 433647640576,
'uuid': 'N/A'}
home_info = [x for x in mount_facts['mounts'] if x['mount'] == '/home'][0]
self.maxDiff = 4096
self.assertDictEqual(home_info, home_expected)
@patch('ansible.module_utils.facts.hardware.linux.get_file_content', return_value=MTAB)
def test_get_mtab_entries(self, mock_get_file_content):
module = Mock()
lh = linux.LinuxHardware(module=module, load_on_init=False)
mtab_entries = lh._mtab_entries()
self.assertIsInstance(mtab_entries, list)
self.assertIsInstance(mtab_entries[0], list)
self.assertEqual(len(mtab_entries), 38)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_findmnt', return_value=(0, FINDMNT_OUTPUT, ''))
def test_find_bind_mounts(self, mock_run_findmnt):
module = Mock()
lh = linux.LinuxHardware(module=module, load_on_init=False)
bind_mounts = lh._find_bind_mounts()
# If bind_mounts becomes another seq type, feel free to change
self.assertIsInstance(bind_mounts, set)
self.assertEqual(len(bind_mounts), 1)
self.assertIn('/not/a/real/bind_mount', bind_mounts)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_findmnt', return_value=(37, '', ''))
def test_find_bind_mounts_non_zero(self, mock_run_findmnt):
module = Mock()
lh = linux.LinuxHardware(module=module, load_on_init=False)
bind_mounts = lh._find_bind_mounts()
self.assertIsInstance(bind_mounts, set)
self.assertEqual(len(bind_mounts), 0)
def test_find_bind_mounts_no_findmnts(self):
module = Mock()
module.get_bin_path = Mock(return_value=None)
lh = linux.LinuxHardware(module=module, load_on_init=False)
bind_mounts = lh._find_bind_mounts()
self.assertIsInstance(bind_mounts, set)
self.assertEqual(len(bind_mounts), 0)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT, ''))
def test_lsblk_uuid(self, mock_run_lsblk):
module = Mock()
lh = linux.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertIn(b'/dev/loop9', lsblk_uuids)
self.assertIn(b'/dev/sda1', lsblk_uuids)
self.assertEqual(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0')
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(37, LSBLK_OUTPUT, ''))
def test_lsblk_uuid_non_zero(self, mock_run_lsblk):
module = Mock()
lh = linux.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertEqual(len(lsblk_uuids), 0)
def test_lsblk_uuid_no_lsblk(self):
module = Mock()
module.get_bin_path = Mock(return_value=None)
lh = linux.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertEqual(len(lsblk_uuids), 0)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT_2, ''))
def test_lsblk_uuid_dev_with_space_in_name(self, mock_run_lsblk):
module = Mock()
lh = linux.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertIn(b'/dev/loop0', lsblk_uuids)
self.assertIn(b'/dev/sda1', lsblk_uuids)
self.assertEqual(lsblk_uuids[b'/dev/mapper/an-example-mapper with a space in the name'], b'84639acb-013f-4d2f-9392-526a572b4373')
self.assertEqual(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0')
def test_udevadm_uuid(self):
module = Mock()
module.run_command = Mock(return_value=(0, UDEVADM_OUTPUT, '')) # (rc, out, err)
lh = linux.LinuxHardware(module=module, load_on_init=False)
udevadm_uuid = lh._udevadm_uuid('mock_device')
self.assertEqual(udevadm_uuid, '57b1a3e7-9019-4747-9809-7ec52bba9179')
|
daniaki/Enrich2 | refs/heads/py3 | enrich2/base/__init__.py | 1 | # Copyright 2016-2017 Alan F Rubin, Daniel Esposito
#
# This file is part of Enrich2.
#
# Enrich2 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Enrich2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Enrich2. If not, see <http://www.gnu.org/licenses/>.
"""
Enrich2 base module
===================
This module contains core classes, constants and utility methods used by
other Enrich2 classes and methods.
"""
__all__ = [
"config_constants",
"constants",
"dataframe",
"storemanager",
"utils"
]
|
OCA/geospatial | refs/heads/12.0 | test_base_geoengine/__manifest__.py | 1 | # Copyright 2019 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': 'test-base-geoengine',
'version': '12.0.1.0.0',
'category': 'Tests',
'author': "Camptocamp,ACSONE SA/NV,Odoo Community Association (OCA)",
'license': 'AGPL-3',
'website': 'https://github.com/OCA/geospatial',
'depends': [
'base_geoengine',
],
'data': [
'views.xml',
'security/ir.model.access.csv',
],
'installable': True,
'auto_install': False,
}
|
cogmission/nupic | refs/heads/master | examples/tp/hello_tm.py | 8 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
print """
This program shows how to access the Temporal Memory directly by demonstrating
how to create a TM instance, train it with vectors, get predictions, and
inspect the state.
The code here runs a very simple version of sequence learning, with one
cell per column. The TP is trained with the simple sequence A->B->C->D->E
HOMEWORK: once you have understood exactly what is going on here, try changing
cellsPerColumn to 4. What is the difference between once cell per column and 4
cells per column?
PLEASE READ THROUGH THE CODE COMMENTS - THEY EXPLAIN THE OUTPUT IN DETAIL
"""
# Can't live without numpy
import numpy
# izip for maximum efficiency
from itertools import izip as zip, count
# Python implementation of Temporal Memory
from nupic.research.temporal_memory import TemporalMemory as TM
# Utility routine for printing the input vector
def formatRow(x):
s = ''
for c in range(len(x)):
if c > 0 and c % 10 == 0:
s += ' '
s += str(x[c])
s += ' '
return s
# Step 1: create Temporal Pooler instance with appropriate parameters
tm = TM(columnDimensions = (50,),
cellsPerColumn=2,
initialPermanence=0.5,
connectedPermanence=0.5,
minThreshold=10,
maxNewSynapseCount=20,
permanenceIncrement=0.1,
permanenceDecrement=0.0,
activationThreshold=8,
)
# Step 2: create input vectors to feed to the temporal pooler. Each input vector
# must be numberOfCols wide. Here we create a simple sequence of 5 vectors
# representing the sequence A -> B -> C -> D -> E
x = numpy.zeros((5, tm.numberOfColumns()), dtype="uint32")
x[0, 0:10] = 1 # Input SDR representing "A", corresponding to columns 0-9
x[1, 10:20] = 1 # Input SDR representing "B", corresponding to columns 10-19
x[2, 20:30] = 1 # Input SDR representing "C", corresponding to columns 20-29
x[3, 30:40] = 1 # Input SDR representing "D", corresponding to columns 30-39
x[4, 40:50] = 1 # Input SDR representing "E", corresponding to columns 40-49
# Step 3: send this simple sequence to the temporal memory for learning
# We repeat the sequence 10 times
for i in range(10):
# Send each letter in the sequence in order
for j in range(5):
activeColumns = set([i for i, j in zip(count(), x[j]) if j == 1])
# The compute method performs one step of learning and/or inference. Note:
# here we just perform learning but you can perform prediction/inference and
# learning in the same step if you want (online learning).
tm.compute(activeColumns, learn = True)
# The following print statements can be ignored.
# Useful for tracing internal states
print("active cells " + str(tm.getActiveCells()))
print("predictive cells " + str(tm.getPredictiveCells()))
print("winner cells " + str(tm.getWinnerCells()))
print("# of active segments " + str(tm.connections.numSegments()))
# The reset command tells the TP that a sequence just ended and essentially
# zeros out all the states. It is not strictly necessary but it's a bit
# messier without resets, and the TP learns quicker with resets.
tm.reset()
#######################################################################
#
# Step 3: send the same sequence of vectors and look at predictions made by
# temporal memory
for j in range(5):
print "\n\n--------","ABCDE"[j],"-----------"
print "Raw input vector : " + formatRow(x[j])
activeColumns = set([i for i, j in zip(count(), x[j]) if j == 1])
# Send each vector to the TM, with learning turned off
tm.compute(activeColumns, learn = False)
# The following print statements prints out the active cells, predictive
# cells, active segments and winner cells.
#
# What you should notice is that the columns where active state is 1
# represent the SDR for the current input pattern and the columns where
# predicted state is 1 represent the SDR for the next expected pattern
print "\nAll the active and predicted cells:"
print("active cells " + str(tm.getActiveCells()))
print("predictive cells " + str(tm.getPredictiveCells()))
print("winner cells " + str(tm.getWinnerCells()))
print("# of active segments " + str(tm.connections.numSegments()))
activeColumnsIndeces = [tm.columnForCell(i) for i in tm.getActiveCells()]
predictedColumnIndeces = [tm.columnForCell(i) for i in tm.getPredictiveCells()]
# Reconstructing the active and inactive columns with 1 as active and 0 as
# inactive representation.
actColState = ['1' if i in activeColumnsIndeces else '0' for i in range(tm.numberOfColumns())]
actColStr = ("".join(actColState))
predColState = ['1' if i in predictedColumnIndeces else '0' for i in range(tm.numberOfColumns())]
predColStr = ("".join(predColState))
# For convenience the cells are grouped
# 10 at a time. When there are multiple cells per column the printout
# is arranged so the cells in a column are stacked together
print "Active columns: " + formatRow(actColStr)
print "Predicted columns: " + formatRow(predColStr)
# predictedCells[c][i] represents the state of the i'th cell in the c'th
# column. To see if a column is predicted, we can simply take the OR
# across all the cells in that column. In numpy we can do this by taking
# the max along axis 1.
|
anaran/olympia | refs/heads/master | apps/perf/urls.py | 6 | from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('',
url('^$', views.index, name='perf.index'),
)
|
donkirkby/django | refs/heads/master | django/conf/locale/lv/formats.py | 504 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'Y. \g\a\d\a j. F'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'Y. \g\a\d\a j. F, H:i'
YEAR_MONTH_FORMAT = r'Y. \g. F'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = r'j.m.Y'
SHORT_DATETIME_FORMAT = 'j.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
]
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
'%H.%M.%S', # '14.30.59'
'%H.%M.%S.%f', # '14.30.59.000200'
'%H.%M', # '14.30'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y %H.%M.%S', # '25.10.06 14.30.59'
'%d.%m.%y %H.%M.%S.%f', # '25.10.06 14.30.59.000200'
'%d.%m.%y %H.%M', # '25.10.06 14.30'
'%d.%m.%y', # '25.10.06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' ' # Non-breaking space
NUMBER_GROUPING = 3
|
batermj/algorithm-challenger | refs/heads/master | code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/test/test_asyncio/test_futures.py | 1 | """Tests for futures.py."""
import concurrent.futures
import gc
import re
import sys
import threading
import unittest
from unittest import mock
import asyncio
from asyncio import futures
from test.test_asyncio import utils as test_utils
from test import support
def tearDownModule():
asyncio.set_event_loop_policy(None)
def _fakefunc(f):
return f
def first_cb():
pass
def last_cb():
pass
class DuckFuture:
# Class that does not inherit from Future but aims to be duck-type
# compatible with it.
_asyncio_future_blocking = False
__cancelled = False
__result = None
__exception = None
def cancel(self):
if self.done():
return False
self.__cancelled = True
return True
def cancelled(self):
return self.__cancelled
def done(self):
return (self.__cancelled
or self.__result is not None
or self.__exception is not None)
def result(self):
assert not self.cancelled()
if self.__exception is not None:
raise self.__exception
return self.__result
def exception(self):
assert not self.cancelled()
return self.__exception
def set_result(self, result):
assert not self.done()
assert result is not None
self.__result = result
def set_exception(self, exception):
assert not self.done()
assert exception is not None
self.__exception = exception
def __iter__(self):
if not self.done():
self._asyncio_future_blocking = True
yield self
assert self.done()
return self.result()
class DuckTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.addCleanup(self.loop.close)
def test_wrap_future(self):
f = DuckFuture()
g = asyncio.wrap_future(f)
assert g is f
def test_ensure_future(self):
f = DuckFuture()
g = asyncio.ensure_future(f)
assert g is f
class BaseFutureTests:
def _new_future(self, *args, **kwargs):
return self.cls(*args, **kwargs)
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.addCleanup(self.loop.close)
def test_isfuture(self):
class MyFuture:
_asyncio_future_blocking = None
def __init__(self):
self._asyncio_future_blocking = False
self.assertFalse(asyncio.isfuture(MyFuture))
self.assertTrue(asyncio.isfuture(MyFuture()))
self.assertFalse(asyncio.isfuture(1))
# As `isinstance(Mock(), Future)` returns `False`
self.assertFalse(asyncio.isfuture(mock.Mock()))
f = self._new_future(loop=self.loop)
self.assertTrue(asyncio.isfuture(f))
self.assertFalse(asyncio.isfuture(type(f)))
# As `isinstance(Mock(Future), Future)` returns `True`
self.assertTrue(asyncio.isfuture(mock.Mock(type(f))))
f.cancel()
def test_initial_state(self):
f = self._new_future(loop=self.loop)
self.assertFalse(f.cancelled())
self.assertFalse(f.done())
f.cancel()
self.assertTrue(f.cancelled())
def test_init_constructor_default_loop(self):
asyncio.set_event_loop(self.loop)
f = self._new_future()
self.assertIs(f._loop, self.loop)
self.assertIs(f.get_loop(), self.loop)
def test_constructor_positional(self):
# Make sure Future doesn't accept a positional argument
self.assertRaises(TypeError, self._new_future, 42)
def test_uninitialized(self):
# Test that C Future doesn't crash when Future.__init__()
# call was skipped.
fut = self.cls.__new__(self.cls, loop=self.loop)
self.assertRaises(asyncio.InvalidStateError, fut.result)
fut = self.cls.__new__(self.cls, loop=self.loop)
self.assertRaises(asyncio.InvalidStateError, fut.exception)
fut = self.cls.__new__(self.cls, loop=self.loop)
with self.assertRaises((RuntimeError, AttributeError)):
fut.set_result(None)
fut = self.cls.__new__(self.cls, loop=self.loop)
with self.assertRaises((RuntimeError, AttributeError)):
fut.set_exception(Exception)
fut = self.cls.__new__(self.cls, loop=self.loop)
with self.assertRaises((RuntimeError, AttributeError)):
fut.cancel()
fut = self.cls.__new__(self.cls, loop=self.loop)
with self.assertRaises((RuntimeError, AttributeError)):
fut.add_done_callback(lambda f: None)
fut = self.cls.__new__(self.cls, loop=self.loop)
with self.assertRaises((RuntimeError, AttributeError)):
fut.remove_done_callback(lambda f: None)
fut = self.cls.__new__(self.cls, loop=self.loop)
try:
repr(fut)
except (RuntimeError, AttributeError):
pass
fut = self.cls.__new__(self.cls, loop=self.loop)
try:
fut.__await__()
except RuntimeError:
pass
fut = self.cls.__new__(self.cls, loop=self.loop)
try:
iter(fut)
except RuntimeError:
pass
fut = self.cls.__new__(self.cls, loop=self.loop)
self.assertFalse(fut.cancelled())
self.assertFalse(fut.done())
def test_cancel(self):
f = self._new_future(loop=self.loop)
self.assertTrue(f.cancel())
self.assertTrue(f.cancelled())
self.assertTrue(f.done())
self.assertRaises(asyncio.CancelledError, f.result)
self.assertRaises(asyncio.CancelledError, f.exception)
self.assertRaises(asyncio.InvalidStateError, f.set_result, None)
self.assertRaises(asyncio.InvalidStateError, f.set_exception, None)
self.assertFalse(f.cancel())
def test_result(self):
f = self._new_future(loop=self.loop)
self.assertRaises(asyncio.InvalidStateError, f.result)
f.set_result(42)
self.assertFalse(f.cancelled())
self.assertTrue(f.done())
self.assertEqual(f.result(), 42)
self.assertEqual(f.exception(), None)
self.assertRaises(asyncio.InvalidStateError, f.set_result, None)
self.assertRaises(asyncio.InvalidStateError, f.set_exception, None)
self.assertFalse(f.cancel())
def test_exception(self):
exc = RuntimeError()
f = self._new_future(loop=self.loop)
self.assertRaises(asyncio.InvalidStateError, f.exception)
# StopIteration cannot be raised into a Future - CPython issue26221
self.assertRaisesRegex(TypeError, "StopIteration .* cannot be raised",
f.set_exception, StopIteration)
f.set_exception(exc)
self.assertFalse(f.cancelled())
self.assertTrue(f.done())
self.assertRaises(RuntimeError, f.result)
self.assertEqual(f.exception(), exc)
self.assertRaises(asyncio.InvalidStateError, f.set_result, None)
self.assertRaises(asyncio.InvalidStateError, f.set_exception, None)
self.assertFalse(f.cancel())
def test_exception_class(self):
f = self._new_future(loop=self.loop)
f.set_exception(RuntimeError)
self.assertIsInstance(f.exception(), RuntimeError)
def test_yield_from_twice(self):
f = self._new_future(loop=self.loop)
def fixture():
yield 'A'
x = yield from f
yield 'B', x
y = yield from f
yield 'C', y
g = fixture()
self.assertEqual(next(g), 'A') # yield 'A'.
self.assertEqual(next(g), f) # First yield from f.
f.set_result(42)
self.assertEqual(next(g), ('B', 42)) # yield 'B', x.
# The second "yield from f" does not yield f.
self.assertEqual(next(g), ('C', 42)) # yield 'C', y.
def test_future_repr(self):
self.loop.set_debug(True)
f_pending_debug = self._new_future(loop=self.loop)
frame = f_pending_debug._source_traceback[-1]
self.assertEqual(
repr(f_pending_debug),
f'<{self.cls.__name__} pending created at {frame[0]}:{frame[1]}>')
f_pending_debug.cancel()
self.loop.set_debug(False)
f_pending = self._new_future(loop=self.loop)
self.assertEqual(repr(f_pending), f'<{self.cls.__name__} pending>')
f_pending.cancel()
f_cancelled = self._new_future(loop=self.loop)
f_cancelled.cancel()
self.assertEqual(repr(f_cancelled), f'<{self.cls.__name__} cancelled>')
f_result = self._new_future(loop=self.loop)
f_result.set_result(4)
self.assertEqual(
repr(f_result), f'<{self.cls.__name__} finished result=4>')
self.assertEqual(f_result.result(), 4)
exc = RuntimeError()
f_exception = self._new_future(loop=self.loop)
f_exception.set_exception(exc)
self.assertEqual(
repr(f_exception),
f'<{self.cls.__name__} finished exception=RuntimeError()>')
self.assertIs(f_exception.exception(), exc)
def func_repr(func):
filename, lineno = test_utils.get_function_source(func)
text = '%s() at %s:%s' % (func.__qualname__, filename, lineno)
return re.escape(text)
f_one_callbacks = self._new_future(loop=self.loop)
f_one_callbacks.add_done_callback(_fakefunc)
fake_repr = func_repr(_fakefunc)
self.assertRegex(
repr(f_one_callbacks),
r'<' + self.cls.__name__ + r' pending cb=\[%s\]>' % fake_repr)
f_one_callbacks.cancel()
self.assertEqual(repr(f_one_callbacks),
f'<{self.cls.__name__} cancelled>')
f_two_callbacks = self._new_future(loop=self.loop)
f_two_callbacks.add_done_callback(first_cb)
f_two_callbacks.add_done_callback(last_cb)
first_repr = func_repr(first_cb)
last_repr = func_repr(last_cb)
self.assertRegex(repr(f_two_callbacks),
r'<' + self.cls.__name__ + r' pending cb=\[%s, %s\]>'
% (first_repr, last_repr))
f_many_callbacks = self._new_future(loop=self.loop)
f_many_callbacks.add_done_callback(first_cb)
for i in range(8):
f_many_callbacks.add_done_callback(_fakefunc)
f_many_callbacks.add_done_callback(last_cb)
cb_regex = r'%s, <8 more>, %s' % (first_repr, last_repr)
self.assertRegex(
repr(f_many_callbacks),
r'<' + self.cls.__name__ + r' pending cb=\[%s\]>' % cb_regex)
f_many_callbacks.cancel()
self.assertEqual(repr(f_many_callbacks),
f'<{self.cls.__name__} cancelled>')
def test_copy_state(self):
from asyncio.futures import _copy_future_state
f = self._new_future(loop=self.loop)
f.set_result(10)
newf = self._new_future(loop=self.loop)
_copy_future_state(f, newf)
self.assertTrue(newf.done())
self.assertEqual(newf.result(), 10)
f_exception = self._new_future(loop=self.loop)
f_exception.set_exception(RuntimeError())
newf_exception = self._new_future(loop=self.loop)
_copy_future_state(f_exception, newf_exception)
self.assertTrue(newf_exception.done())
self.assertRaises(RuntimeError, newf_exception.result)
f_cancelled = self._new_future(loop=self.loop)
f_cancelled.cancel()
newf_cancelled = self._new_future(loop=self.loop)
_copy_future_state(f_cancelled, newf_cancelled)
self.assertTrue(newf_cancelled.cancelled())
def test_iter(self):
fut = self._new_future(loop=self.loop)
def coro():
yield from fut
def test():
arg1, arg2 = coro()
with self.assertRaisesRegex(RuntimeError, "await wasn't used"):
test()
fut.cancel()
def test_log_traceback(self):
fut = self._new_future(loop=self.loop)
with self.assertRaisesRegex(ValueError, 'can only be set to False'):
fut._log_traceback = True
@mock.patch('asyncio.base_events.logger')
def test_tb_logger_abandoned(self, m_log):
fut = self._new_future(loop=self.loop)
del fut
self.assertFalse(m_log.error.called)
@mock.patch('asyncio.base_events.logger')
def test_tb_logger_not_called_after_cancel(self, m_log):
fut = self._new_future(loop=self.loop)
fut.set_exception(Exception())
fut.cancel()
del fut
self.assertFalse(m_log.error.called)
@mock.patch('asyncio.base_events.logger')
def test_tb_logger_result_unretrieved(self, m_log):
fut = self._new_future(loop=self.loop)
fut.set_result(42)
del fut
self.assertFalse(m_log.error.called)
@mock.patch('asyncio.base_events.logger')
def test_tb_logger_result_retrieved(self, m_log):
fut = self._new_future(loop=self.loop)
fut.set_result(42)
fut.result()
del fut
self.assertFalse(m_log.error.called)
@mock.patch('asyncio.base_events.logger')
def test_tb_logger_exception_unretrieved(self, m_log):
fut = self._new_future(loop=self.loop)
fut.set_exception(RuntimeError('boom'))
del fut
test_utils.run_briefly(self.loop)
support.gc_collect()
self.assertTrue(m_log.error.called)
@mock.patch('asyncio.base_events.logger')
def test_tb_logger_exception_retrieved(self, m_log):
fut = self._new_future(loop=self.loop)
fut.set_exception(RuntimeError('boom'))
fut.exception()
del fut
self.assertFalse(m_log.error.called)
@mock.patch('asyncio.base_events.logger')
def test_tb_logger_exception_result_retrieved(self, m_log):
fut = self._new_future(loop=self.loop)
fut.set_exception(RuntimeError('boom'))
self.assertRaises(RuntimeError, fut.result)
del fut
self.assertFalse(m_log.error.called)
def test_wrap_future(self):
def run(arg):
return (arg, threading.get_ident())
ex = concurrent.futures.ThreadPoolExecutor(1)
f1 = ex.submit(run, 'oi')
f2 = asyncio.wrap_future(f1, loop=self.loop)
res, ident = self.loop.run_until_complete(f2)
self.assertTrue(asyncio.isfuture(f2))
self.assertEqual(res, 'oi')
self.assertNotEqual(ident, threading.get_ident())
ex.shutdown(wait=True)
def test_wrap_future_future(self):
f1 = self._new_future(loop=self.loop)
f2 = asyncio.wrap_future(f1)
self.assertIs(f1, f2)
def test_wrap_future_use_global_loop(self):
with mock.patch('asyncio.futures.events') as events:
events.get_event_loop = lambda: self.loop
def run(arg):
return (arg, threading.get_ident())
ex = concurrent.futures.ThreadPoolExecutor(1)
f1 = ex.submit(run, 'oi')
f2 = asyncio.wrap_future(f1)
self.assertIs(self.loop, f2._loop)
ex.shutdown(wait=True)
def test_wrap_future_cancel(self):
f1 = concurrent.futures.Future()
f2 = asyncio.wrap_future(f1, loop=self.loop)
f2.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(f1.cancelled())
self.assertTrue(f2.cancelled())
def test_wrap_future_cancel2(self):
f1 = concurrent.futures.Future()
f2 = asyncio.wrap_future(f1, loop=self.loop)
f1.set_result(42)
f2.cancel()
test_utils.run_briefly(self.loop)
self.assertFalse(f1.cancelled())
self.assertEqual(f1.result(), 42)
self.assertTrue(f2.cancelled())
def test_future_source_traceback(self):
self.loop.set_debug(True)
future = self._new_future(loop=self.loop)
lineno = sys._getframe().f_lineno - 1
self.assertIsInstance(future._source_traceback, list)
self.assertEqual(future._source_traceback[-2][:3],
(__file__,
lineno,
'test_future_source_traceback'))
@mock.patch('asyncio.base_events.logger')
def check_future_exception_never_retrieved(self, debug, m_log):
self.loop.set_debug(debug)
def memory_error():
try:
raise MemoryError()
except BaseException as exc:
return exc
exc = memory_error()
future = self._new_future(loop=self.loop)
future.set_exception(exc)
future = None
test_utils.run_briefly(self.loop)
support.gc_collect()
if sys.version_info >= (3, 4):
regex = f'^{self.cls.__name__} exception was never retrieved\n'
exc_info = (type(exc), exc, exc.__traceback__)
m_log.error.assert_called_once_with(mock.ANY, exc_info=exc_info)
else:
regex = r'^Future/Task exception was never retrieved\n'
m_log.error.assert_called_once_with(mock.ANY, exc_info=False)
message = m_log.error.call_args[0][0]
self.assertRegex(message, re.compile(regex, re.DOTALL))
def test_future_exception_never_retrieved(self):
self.check_future_exception_never_retrieved(False)
def test_future_exception_never_retrieved_debug(self):
self.check_future_exception_never_retrieved(True)
def test_set_result_unless_cancelled(self):
fut = self._new_future(loop=self.loop)
fut.cancel()
futures._set_result_unless_cancelled(fut, 2)
self.assertTrue(fut.cancelled())
def test_future_stop_iteration_args(self):
fut = self._new_future(loop=self.loop)
fut.set_result((1, 2))
fi = fut.__iter__()
result = None
try:
fi.send(None)
except StopIteration as ex:
result = ex.args[0]
else:
self.fail('StopIteration was expected')
self.assertEqual(result, (1, 2))
def test_future_iter_throw(self):
fut = self._new_future(loop=self.loop)
fi = iter(fut)
self.assertRaises(TypeError, fi.throw,
Exception, Exception("elephant"), 32)
self.assertRaises(TypeError, fi.throw,
Exception("elephant"), Exception("elephant"))
self.assertRaises(TypeError, fi.throw, list)
def test_future_del_collect(self):
class Evil:
def __del__(self):
gc.collect()
for i in range(100):
fut = self._new_future(loop=self.loop)
fut.set_result(Evil())
@unittest.skipUnless(hasattr(futures, '_CFuture'),
'requires the C _asyncio module')
class CFutureTests(BaseFutureTests, test_utils.TestCase):
try:
cls = futures._CFuture
except AttributeError:
cls = None
def test_future_del_segfault(self):
fut = self._new_future(loop=self.loop)
with self.assertRaises(AttributeError):
del fut._asyncio_future_blocking
with self.assertRaises(AttributeError):
del fut._log_traceback
@unittest.skipUnless(hasattr(futures, '_CFuture'),
'requires the C _asyncio module')
class CSubFutureTests(BaseFutureTests, test_utils.TestCase):
try:
class CSubFuture(futures._CFuture):
pass
cls = CSubFuture
except AttributeError:
cls = None
class PyFutureTests(BaseFutureTests, test_utils.TestCase):
cls = futures._PyFuture
class BaseFutureDoneCallbackTests():
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
def run_briefly(self):
test_utils.run_briefly(self.loop)
def _make_callback(self, bag, thing):
# Create a callback function that appends thing to bag.
def bag_appender(future):
bag.append(thing)
return bag_appender
def _new_future(self):
raise NotImplementedError
def test_callbacks_remove_first_callback(self):
bag = []
f = self._new_future()
cb1 = self._make_callback(bag, 42)
cb2 = self._make_callback(bag, 17)
cb3 = self._make_callback(bag, 100)
f.add_done_callback(cb1)
f.add_done_callback(cb2)
f.add_done_callback(cb3)
f.remove_done_callback(cb1)
f.remove_done_callback(cb1)
self.assertEqual(bag, [])
f.set_result('foo')
self.run_briefly()
self.assertEqual(bag, [17, 100])
self.assertEqual(f.result(), 'foo')
def test_callbacks_remove_first_and_second_callback(self):
bag = []
f = self._new_future()
cb1 = self._make_callback(bag, 42)
cb2 = self._make_callback(bag, 17)
cb3 = self._make_callback(bag, 100)
f.add_done_callback(cb1)
f.add_done_callback(cb2)
f.add_done_callback(cb3)
f.remove_done_callback(cb1)
f.remove_done_callback(cb2)
f.remove_done_callback(cb1)
self.assertEqual(bag, [])
f.set_result('foo')
self.run_briefly()
self.assertEqual(bag, [100])
self.assertEqual(f.result(), 'foo')
def test_callbacks_remove_third_callback(self):
bag = []
f = self._new_future()
cb1 = self._make_callback(bag, 42)
cb2 = self._make_callback(bag, 17)
cb3 = self._make_callback(bag, 100)
f.add_done_callback(cb1)
f.add_done_callback(cb2)
f.add_done_callback(cb3)
f.remove_done_callback(cb3)
f.remove_done_callback(cb3)
self.assertEqual(bag, [])
f.set_result('foo')
self.run_briefly()
self.assertEqual(bag, [42, 17])
self.assertEqual(f.result(), 'foo')
def test_callbacks_invoked_on_set_result(self):
bag = []
f = self._new_future()
f.add_done_callback(self._make_callback(bag, 42))
f.add_done_callback(self._make_callback(bag, 17))
self.assertEqual(bag, [])
f.set_result('foo')
self.run_briefly()
self.assertEqual(bag, [42, 17])
self.assertEqual(f.result(), 'foo')
def test_callbacks_invoked_on_set_exception(self):
bag = []
f = self._new_future()
f.add_done_callback(self._make_callback(bag, 100))
self.assertEqual(bag, [])
exc = RuntimeError()
f.set_exception(exc)
self.run_briefly()
self.assertEqual(bag, [100])
self.assertEqual(f.exception(), exc)
def test_remove_done_callback(self):
bag = []
f = self._new_future()
cb1 = self._make_callback(bag, 1)
cb2 = self._make_callback(bag, 2)
cb3 = self._make_callback(bag, 3)
# Add one cb1 and one cb2.
f.add_done_callback(cb1)
f.add_done_callback(cb2)
# One instance of cb2 removed. Now there's only one cb1.
self.assertEqual(f.remove_done_callback(cb2), 1)
# Never had any cb3 in there.
self.assertEqual(f.remove_done_callback(cb3), 0)
# After this there will be 6 instances of cb1 and one of cb2.
f.add_done_callback(cb2)
for i in range(5):
f.add_done_callback(cb1)
# Remove all instances of cb1. One cb2 remains.
self.assertEqual(f.remove_done_callback(cb1), 6)
self.assertEqual(bag, [])
f.set_result('foo')
self.run_briefly()
self.assertEqual(bag, [2])
self.assertEqual(f.result(), 'foo')
def test_remove_done_callbacks_list_mutation(self):
# see http://bugs.python.org/issue28963 for details
fut = self._new_future()
fut.add_done_callback(str)
for _ in range(63):
fut.add_done_callback(id)
class evil:
def __eq__(self, other):
fut.remove_done_callback(id)
return False
fut.remove_done_callback(evil())
def test_schedule_callbacks_list_mutation_1(self):
# see http://bugs.python.org/issue28963 for details
def mut(f):
f.remove_done_callback(str)
fut = self._new_future()
fut.add_done_callback(mut)
fut.add_done_callback(str)
fut.add_done_callback(str)
fut.set_result(1)
test_utils.run_briefly(self.loop)
def test_schedule_callbacks_list_mutation_2(self):
# see http://bugs.python.org/issue30828 for details
fut = self._new_future()
fut.add_done_callback(str)
for _ in range(63):
fut.add_done_callback(id)
max_extra_cbs = 100
extra_cbs = 0
class evil:
def __eq__(self, other):
nonlocal extra_cbs
extra_cbs += 1
if extra_cbs < max_extra_cbs:
fut.add_done_callback(id)
return False
fut.remove_done_callback(evil())
@unittest.skipUnless(hasattr(futures, '_CFuture'),
'requires the C _asyncio module')
class CFutureDoneCallbackTests(BaseFutureDoneCallbackTests,
test_utils.TestCase):
def _new_future(self):
return futures._CFuture(loop=self.loop)
@unittest.skipUnless(hasattr(futures, '_CFuture'),
'requires the C _asyncio module')
class CSubFutureDoneCallbackTests(BaseFutureDoneCallbackTests,
test_utils.TestCase):
def _new_future(self):
class CSubFuture(futures._CFuture):
pass
return CSubFuture(loop=self.loop)
class PyFutureDoneCallbackTests(BaseFutureDoneCallbackTests,
test_utils.TestCase):
def _new_future(self):
return futures._PyFuture(loop=self.loop)
if __name__ == '__main__':
unittest.main()
|
Yasumoto/commons | refs/heads/master | tests/python/twitter/common/concurrent/test_concurrent.py | 12 | import time
from functools import partial
from Queue import Empty, Queue
import pytest
from twitter.common.concurrent import deadline, defer, Timeout
from twitter.common.contextutil import Timer
def test_deadline_default_timeout():
timeout = partial(time.sleep, 0.5)
with pytest.raises(Timeout):
deadline(timeout)
def test_deadline_custom_timeout():
timeout = partial(time.sleep, 0.2)
with pytest.raises(Timeout):
deadline(timeout, 0.1)
def test_deadline_no_timeout():
assert 'success' == deadline(lambda: 'success')
def test_defer():
DELAY = 0.5
results = Queue(maxsize=1)
def func():
results.put_nowait('success')
defer(func, delay=DELAY)
with Timer() as timer:
assert results.get() == 'success'
assert timer.elapsed >= DELAY
|
tinfoil/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/views/__init__.py | 6014 | # Required for Python to search this directory for module files
|
malev/deps | refs/heads/master | setup.py | 1 | from setuptools import setup, find_packages
setup(
name='deps',
version='0.1.0',
author='Marcos Vanetta',
author_email='marcosvanetta@gmail.com',
url='http://github.com/malev/deps',
description='Environment.yml parser',
packages=find_packages(),
install_requires=['pyyaml'],
entry_points={
'console_scripts': [
'deps = deps.cli:main',
]
}
)
|
AndrewGrossman/django | refs/heads/master | tests/m2m_through/models.py | 115 | from datetime import datetime
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# M2M described on one of the models
@python_2_unicode_compatible
class Person(models.Model):
name = models.CharField(max_length=128)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Group(models.Model):
name = models.CharField(max_length=128)
members = models.ManyToManyField(Person, through='Membership')
custom_members = models.ManyToManyField(Person, through='CustomMembership', related_name="custom")
nodefaultsnonulls = models.ManyToManyField(Person, through='TestNoDefaultsOrNulls', related_name="testnodefaultsnonulls")
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Membership(models.Model):
person = models.ForeignKey(Person)
group = models.ForeignKey(Group)
date_joined = models.DateTimeField(default=datetime.now)
invite_reason = models.CharField(max_length=64, null=True)
class Meta:
ordering = ('date_joined', 'invite_reason', 'group')
def __str__(self):
return "%s is a member of %s" % (self.person.name, self.group.name)
@python_2_unicode_compatible
class CustomMembership(models.Model):
person = models.ForeignKey(Person, db_column="custom_person_column", related_name="custom_person_related_name")
group = models.ForeignKey(Group)
weird_fk = models.ForeignKey(Membership, null=True)
date_joined = models.DateTimeField(default=datetime.now)
def __str__(self):
return "%s is a member of %s" % (self.person.name, self.group.name)
class Meta:
db_table = "test_table"
class TestNoDefaultsOrNulls(models.Model):
person = models.ForeignKey(Person)
group = models.ForeignKey(Group)
nodefaultnonull = models.CharField(max_length=5)
@python_2_unicode_compatible
class PersonSelfRefM2M(models.Model):
name = models.CharField(max_length=5)
friends = models.ManyToManyField('self', through="Friendship", symmetrical=False)
def __str__(self):
return self.name
class Friendship(models.Model):
first = models.ForeignKey(PersonSelfRefM2M, related_name="rel_from_set")
second = models.ForeignKey(PersonSelfRefM2M, related_name="rel_to_set")
date_friended = models.DateTimeField()
# Custom through link fields
@python_2_unicode_compatible
class Event(models.Model):
title = models.CharField(max_length=50)
invitees = models.ManyToManyField(Person, through='Invitation', through_fields=('event', 'invitee'), related_name='events_invited')
def __str__(self):
return self.title
class Invitation(models.Model):
event = models.ForeignKey(Event, related_name='invitations')
# field order is deliberately inverted. the target field is "invitee".
inviter = models.ForeignKey(Person, related_name='invitations_sent')
invitee = models.ForeignKey(Person, related_name='invitations')
@python_2_unicode_compatible
class Employee(models.Model):
name = models.CharField(max_length=5)
subordinates = models.ManyToManyField('self', through="Relationship", through_fields=('source', 'target'), symmetrical=False)
class Meta:
ordering = ('pk',)
def __str__(self):
return self.name
class Relationship(models.Model):
# field order is deliberately inverted.
another = models.ForeignKey(Employee, related_name="rel_another_set", null=True)
target = models.ForeignKey(Employee, related_name="rel_target_set")
source = models.ForeignKey(Employee, related_name="rel_source_set")
class Ingredient(models.Model):
iname = models.CharField(max_length=20, unique=True)
class Meta:
ordering = ('iname',)
class Recipe(models.Model):
rname = models.CharField(max_length=20, unique=True)
ingredients = models.ManyToManyField(
Ingredient, through='RecipeIngredient', related_name='recipes',
)
class Meta:
ordering = ('rname',)
class RecipeIngredient(models.Model):
ingredient = models.ForeignKey(Ingredient, to_field='iname')
recipe = models.ForeignKey(Recipe, to_field='rname')
|
aferrero2707/PhotoFlow | refs/heads/stable | src/external/rawspeed/docs/sphinx-pyexec.py | 3 | try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import sys
from docutils.parsers.rst import Directive
from docutils import nodes
from sphinx.util import nested_parse_with_titles
from docutils.statemachine import StringList
class ExecDirective(Directive):
has_content = True
required_arguments = 0
def execute_code(cls, code):
codeOut = StringIO()
codeErr = StringIO()
sys.stdout = codeOut
sys.stderr = codeErr
exec(code)
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
results = list()
results.append(codeOut.getvalue())
results.append(codeErr.getvalue())
results = ''.join(results)
return results
def run(self):
self.assert_has_content()
code = '\n'.join(self.content)
code_results = self.execute_code(code)
sl = StringList(code_results.replace("\r", "").split("\n"))
node = nodes.paragraph()
nested_parse_with_titles(self.state, sl, node)
output = []
output.append(node)
return output
def setup(app):
app.add_directive('exec', ExecDirective)
|
tizianasellitto/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/pywebsocket/src/example/abort_wsh.py | 465 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from mod_pywebsocket import handshake
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
raise handshake.AbortedByUserException(
"Aborted in web_socket_transfer_data")
# vi:sts=4 sw=4 et
|
40223151/2015cd0505 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/time.py | 518 | from browser import window
import javascript
# use Javascript Date constructor
date = javascript.JSConstructor(window.Date)
#daylight = 0 # fix me.. returns Non zero if DST timezone is defined
##############################################
# Added to pass some tests
# Are there timezone always in the browser?
# I'm assuming we don't have always this info
_STRUCT_TM_ITEMS = 9
##############################################
##############################################
## Helper functions
def _get_day_of_year(arg):
"""
Get the day position in the year starting from 1
Parameters
----------
arg : tuple
Returns
-------
int with the correct day of the year starting from 1
"""
ml = [31,28,31,30,31,30,31,31,30,31,30,31]
if arg[0]%4==0:
ml[1] += 1
i=1
yday=0
while i<arg[1]:
yday += ml[i-1]
i += 1
yday += arg[2]
return yday
def _get_week_of_year(arg):
"""
Get the week position in the year starting from 0. All days in a new
year preceding the first Monday are considered to be in week 0.
Parameters
----------
arg : tuple
Returns
-------
int with the correct iso week (weeks starting on Monday) of the year.
"""
d1 = date(arg[0], arg[1]-1, arg[2])
d0 = date(arg[0], 0, 1)
firstday = d0.getDay()
if firstday == 0 : firstday = 7
firstweek = 8 - firstday
doy = arg[7]
if firstday != 1:
doy = doy - firstweek
if doy % 7 == 0:
week_number = doy // 7
else:
week_number = doy // 7 + 1
return week_number
def _check_struct_time(t):
mm = t[1]
if mm == 0: mm = 1
if -1 > mm > 13: raise ValueError("month out of range")
dd = t[2]
if dd == 0: dd = 1
if -1 > dd > 32: raise ValueError("day of month out of range")
hh = t[3]
if -1 > hh > 24: raise ValueError("hour out of range")
minu = t[4]
if -1 > minu > 60: raise ValueError("minute out of range")
ss = t[5]
if -1 > ss > 62: raise ValueError("seconds out of range")
wd = t[6] % 7
if wd < -2: raise ValueError("day of week out of range")
dy = t[7]
if dy == 0: dy = 1
if -1 > dy > 367: raise ValueError("day of year out of range")
return t[0], mm, dd, hh, minu, ss, wd, dy, t[-1]
def _is_dst(secs = None):
"Check if data has daylight saving time"
d = date()
if secs is not None:
d = date(secs*1000)
# calculate if we are in daylight savings time or not.
# borrowed from http://stackoverflow.com/questions/11887934/check-if-daylight-saving-time-is-in-effect-and-if-it-is-for-how-many-hours
jan = date(d.getFullYear(), 0, 1)
jul = date(d.getFullYear(), 6, 1)
dst = int(d.getTimezoneOffset() < max(abs(jan.getTimezoneOffset()), abs(jul.getTimezoneOffset())))
return dst
def _get_tzname():
"check if timezone is available, if not return a tuple of empty str"
d = date()
d = d.toTimeString()
try:
d = d.split('(')[1].split(')')[0]
return (d, 'NotAvailable')
except:
return ('', '')
def _set_altzone():
d = date()
jan = date(d.getFullYear(), 0, 1)
jul = date(d.getFullYear(), 6, 1)
result = timezone - (jan.getTimezoneOffset() - jul.getTimezoneOffset()) * 60
return result
def _check_input(t):
if t and isinstance(t, struct_time) and len(t.args) == 9:
t = t.args
elif t and isinstance(t, tuple) and len(t) == 9:
t = t
elif t and isinstance(t, struct_time) and len(t.args) != 9:
raise TypeError("function takes exactly 9 arguments ({} given)".format(len(t.args)))
elif t and isinstance(t, tuple) and len(t) != 9:
raise TypeError("function takes exactly 9 arguments ({} given)".format(len(t.args)))
elif t and not isinstance(t, (tuple, struct_time)):
raise TypeError("Tuple or struct_time argument required")
else:
t = localtime().args
return t
## end of helper functions
##############################################
##############################################
## Values depending the timezone of the browser.
daylight = _is_dst()
timezone = date().getTimezoneOffset() * 60
tzname = _get_tzname()
altzone = _set_altzone() if daylight else timezone
##############################################
def asctime(t = None):
weekdays = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu",
4: "Fri", 5: "Sat", 6: "Sun"}
months = {1:'Jan',2:'Feb',3:'Mar',4:'Apr',5:'May',6:'Jun',
7:'Jul',8:'Aug',9:'Sep',10:'Oct',11:'Nov',12:'Dec'}
t = _check_input(t)
t = _check_struct_time(t)
result = "%s %s %2d %02d:%02d:%02d %d" % (
weekdays[t[6]], months[t[1]], t[2], t[3], t[4], t[5], t[0])
return result
def ctime(timestamp=None):
if timestamp is None:
timestamp = date().getTime() / 1000.
d = date(0)
d.setUTCSeconds(timestamp)
jan = date(d.getFullYear(), 0, 1)
jul = date(d.getFullYear(), 6, 1)
dst = int(d.getTimezoneOffset() < max(jan.getTimezoneOffset(), jul.getTimezoneOffset()))
d = date(0)
d.setUTCSeconds(timestamp + (1 + dst) * 3600)
weekdays = {1: "Mon", 2: "Tue", 3: "Wed", 4: "Thu",
5: "Fri", 6: "Sat", 0: "Sun"}
months = {0:'Jan',1:'Feb',2:'Mar',3:'Apr',4:'May',5:'Jun',
6:'Jul',7:'Aug',8:'Sep',9:'Oct',10:'Nov',11:'Dec'}
result = "%s %s %2d %02d:%02d:%02d %d" % (weekdays[d.getUTCDay()],
months[d.getUTCMonth()], d.getUTCDate(),
d.getUTCHours(), d.getUTCMinutes(), d.getUTCSeconds(),
d.getUTCFullYear())
return result
def gmtime(secs = None):
d = date()
if secs is not None:
d = date(secs*1000)
wday = d.getUTCDay() - 1 if d.getUTCDay() - 1 >= 0 else 6
tmp = struct_time([d.getUTCFullYear(),
d.getUTCMonth()+1, d.getUTCDate(),
d.getUTCHours(), d.getUTCMinutes(), d.getUTCSeconds(),
wday, 0, 0])
tmp.args[7] = _get_day_of_year(tmp.args)
return tmp
def localtime(secs = None):
d = date()
if secs is not None:
d = date(secs * 1000)
dst = _is_dst(secs)
wday = d.getDay() - 1 if d.getDay() - 1 >= 0 else 6
tmp = struct_time([d.getFullYear(),
d.getMonth()+1, d.getDate(),
d.getHours(), d.getMinutes(), d.getSeconds(),
wday, 0, dst])
tmp.args[7] = _get_day_of_year(tmp.args)
return tmp
def mktime(t):
if isinstance(t, struct_time):
d1 = date(t.tm_year, t.tm_mon - 1, t.tm_mday,
t.tm_hour, t.tm_min, t.tm_sec, 0).getTime()
elif isinstance(t, tuple):
d1 = date(t[0], t[1] - 1, t[2], t[3], t[4], t[5], 0).getTime()
else:
raise ValueError("Tuple or struct_time argument required")
d2 = date(0).getTime()
return (d1 - d2) / 1000.
def monotonic():
return javascript.JSObject(window.performance.now)()/1000.
def perf_counter():
return float(date().getTime()/1000.0)
def time():
return float(date().getTime()/1000)
def sleep(secs):
start = date().getTime()
while date().getTime() - start < secs * 1000.:
pass
def strftime(_format,t = None):
def ns(t,nb):
# left padding with 0
res = str(t)
while len(res)<nb:
res = '0'+res
return res
t = _check_input(t)
t = _check_struct_time(t)
YY = ns(t[0],4)
yy = ns(t[0],4)[2:]
mm = ns(t[1],2)
dd = ns(t[2],2)
HH = t[3]
HH24 = ns(HH,2)
HH12 = ns(HH % 12,2)
if HH12 == 0:HH12 = 12
AMPM = 'AM' if 0 <= HH < 12 else 'PM'
MM = ns(t[4],2)
SS = ns(t[5],2)
DoY = ns(t[7],3)
w = t[6] + 1 if t[6] < 6 else 0
W = ns(_get_week_of_year(t),2)
abb_weekdays = ['Sun','Mon','Tue','Wed','Thu','Fri','Sat']
full_weekdays = ['Sunday','Monday','Tuesday','Wednesday',
'Thursday','Friday','Saturday']
abb_months = ['Jan','Feb','Mar','Apr','May','Jun',
'Jul','Aug','Sep','Oct','Nov','Dec']
full_months = ['January','February','March','April','May','June',
'July','August','September','October','November','December']
res = _format
res = res.replace("%H",HH24)
res = res.replace("%I",HH12)
res = res.replace("%p",AMPM)
res = res.replace("%M",MM)
res = res.replace("%S",SS)
res = res.replace("%Y",YY)
res = res.replace("%y",yy)
res = res.replace("%m",mm)
res = res.replace("%d",dd)
res = res.replace("%a",abb_weekdays[w])
res = res.replace("%A",full_weekdays[w])
res = res.replace("%b",abb_months[int(mm)-1])
res = res.replace("%B",full_months[int(mm)-1])
res = res.replace("%j", DoY)
res = res.replace("%w", w)
res = res.replace("%W", W)
res = res.replace("%x", mm+'/'+dd+'/'+yy)
res = res.replace("%X", HH24+':'+MM+':'+SS)
res = res.replace("%c", abb_weekdays[w]+' '+abb_months[int(mm)-1]+
' '+dd+' '+HH24+':'+MM+':'+SS+' '+YY)
res = res.replace("%%", '%')
return res
class struct_time:
def __init__(self, args):
if len(args)!=9:
raise TypeError("time.struct_time() takes a 9-sequence (%s-sequence given)" %len(args))
self.args = args
@property
def tm_year(self):
return self.args[0]
@property
def tm_mon(self):
return self.args[1]
@property
def tm_mday(self):
return self.args[2]
@property
def tm_hour(self):
return self.args[3]
@property
def tm_min(self):
return self.args[4]
@property
def tm_sec(self):
return self.args[5]
@property
def tm_wday(self):
return self.args[6]
@property
def tm_yday(self):
return self.args[7]
@property
def tm_isdst(self):
return self.args[8]
def __getitem__(self, i):
return self.args[i]
def __iter__(self):
return iter(self.args)
def __repr__(self):
return ("time.structime(tm_year={}, tm_mon={}, tm_day={}, "+\
"tm_hour={}, tm_min={}, tm_sec={}, tm_wday={}, "+\
"tm_yday={}, tm_isdst={})").format(*self.args)
def __str__(self):
return self.__repr__()
def to_struct_time(ptuple):
# Receives a packed tuple, pass its attribute "arg" to struct_time
arg = ptuple.arg
# The tuple received from module _strptime has 7 elements, we must add
# the rank of day in the year in the range [1, 366]
ml = [31,28,31,30,31,30,31,31,30,31,30,31]
if arg[0]%4==0:
ml[1] += 1
i=1
yday=0
while i<arg[1]:
yday += ml[i-1]
i += 1
yday += arg[2]
arg.append(yday)
arg.append(-1)
return struct_time(arg)
def strptime(string, _format):
import _strptime
return struct_time([_strptime._strptime_datetime(to_struct_time, string, _format)])
# All the clock_xx machinery shouldn't work in the browser so some
# NotImplementedErrors or messages are shown
_clock_msg = """Browser cannot access CPU. See '%s'"""
def _clock_xx(url):
raise NotImplementedError(_clock_msg % url)
clock = lambda: _clock_xx("https://docs.python.org/3/library/time.html#time.clock")
clock_getres = lambda: _clock_xx("https://docs.python.org/3/library/time.html#time.clock_getres")
clock_gettime = lambda: _clock_xx("https://docs.python.org/3/library/time.html#time.clock_gettime")
clock_settime = lambda: _clock_xx("https://docs.python.org/3/library/time.html#time.clock_settime")
CLOCK_HIGHRES = _clock_msg % "https://docs.python.org/3/library/time.html#time.CLOCK_HIGHRES"
CLOCK_MONOTONIC = _clock_msg % "https://docs.python.org/3/library/time.html#time.CLOCK_MONOTONIC"
CLOCK_MONOTONIC_RAW = _clock_msg % "https://docs.python.org/3/library/time.html#time.CLOCK_MONOTONIC_RAW"
CLOCK_PROCESS_CPUTIME_ID = _clock_msg % "https://docs.python.org/3/library/time.html#time.CLOCK_PROCESS_CPUTIME_ID"
CLOCK_REALTIME = _clock_msg % "https://docs.python.org/3/library/time.html#time.CLOCK_REALTIME"
CLOCK_THREAD_CPUTIME_ID = _clock_msg % "https://docs.python.org/3/library/time.html#time.CLOCK_THREAD_CPUTIME_ID"
get_clock_info = lambda: _clock_xx("https://docs.python.org/3/library/time.html#time.get_clock_info")
process_time = lambda: _clock_xx("https://docs.python.org/3/library/time.html#time.process_time")
def tzset():
raise NotImplementedError()
|
gltn/stdm | refs/heads/master | stdm/tests/data/utils.py | 1 | from sqlalchemy import create_engine
from stdm import data
from stdm.data.configuration.columns import (
DateColumn,
ForeignKeyColumn,
GeometryColumn,
IntegerColumn,
LookupColumn,
MultipleSelectColumn,
TextColumn,
VarCharColumn
)
from stdm.data.configuration.entity import entity_factory
from stdm.data.configuration.social_tenure import SocialTenure
from stdm.data.configuration.value_list import value_list_factory
from stdm.data.connection import DatabaseConnection
from stdm.security.user import User
BASIC_PROFILE = 'Basic'
PERSON_ENTITY = 'person'
SPATIAL_UNIT_ENTITY = 'spatial_unit'
SPATIAL_UNIT_ENTITY_2 = 'spatial_unit2'
HOUSEHOLD_ENTITY = 'household'
SURVEYOR_ENTITY = 'suveyor'
COMMUNITY_ENTITY = 'community'
DB_USER = 'postgres'
DB_PASS = 'admin'
DB_PORT = 5467
DB_SERVER = 'localhost'
DB_NAME = 'stdm'
full_entity_opt_args = {
'create_id_column': True,
'supports_documents': True,
'is_global': False,
'is_proxy': False
}
def create_profile(config, name):
return config.create_profile(name)
def create_entity(profile, name, **kwargs):
return profile.create_entity(name, entity_factory, **kwargs)
def create_value_list(profile, name):
return profile.create_entity(name, value_list_factory)
def create_basic_profile(config):
return create_profile(config, 'Basic')
def add_basic_profile(config):
basic_profile = create_basic_profile(config)
config.add_profile(basic_profile)
return basic_profile
def create_person_entity(profile):
entity = create_entity(profile, PERSON_ENTITY, **full_entity_opt_args)
return entity
def create_spatial_unit_entity(profile):
entity = create_entity(profile, SPATIAL_UNIT_ENTITY, **full_entity_opt_args)
add_geometry_column('geom_poly', entity)
return entity
def create_spatial_unit_entity2(profile):
entity = create_entity(profile, SPATIAL_UNIT_ENTITY_2, **full_entity_opt_args)
add_geometry_column('geom_poly_2', entity)
return entity
def create_surveyor_entity(profile):
return create_entity(profile, SURVEYOR_ENTITY, **full_entity_opt_args)
def create_community_entity(profile):
return create_entity(profile, COMMUNITY_ENTITY, **full_entity_opt_args)
def add_surveyor_entity(profile):
surveyor = create_surveyor_entity(profile)
profile.add_entity(surveyor)
return surveyor
def add_community_entity(profile):
community = create_community_entity(profile)
profile.add_entity(community)
return community
def add_person_entity(profile):
entity = create_person_entity(profile)
profile.add_entity(entity)
return entity
def add_spatial_unit_entity(profile):
entity = create_spatial_unit_entity(profile)
profile.add_entity(entity)
return entity
def add_spatial_unit_entity_2(profile):
entity = create_spatial_unit_entity2(profile)
profile.add_entity(entity)
return entity
def add_geometry_column(name, entity):
geom_col = GeometryColumn(name, entity, GeometryColumn.POLYGON)
entity.add_column(geom_col)
return geom_col
def set_profile_social_tenure(profile):
party = add_person_entity(profile)
spatial_unit = add_spatial_unit_entity(profile)
profile.set_social_tenure_attr(SocialTenure.PARTY, [party])
profile.set_social_tenure_attr(SocialTenure.SPATIAL_UNIT, [spatial_unit])
def create_relation(profile, **kwargs):
return profile.create_entity_relation(**kwargs)
def create_household_entity(profile):
entity = create_entity(profile, HOUSEHOLD_ENTITY, **full_entity_opt_args)
return entity
def add_household_entity(profile):
entity = create_household_entity(profile)
profile.add_entity(entity)
return entity
def create_gender_lookup(entity):
gender_value_list = create_value_list(entity.profile, 'gender')
gender_value_list.add_value('Male')
gender_value_list.add_value('Female')
return gender_value_list
def create_secondary_tenure_lookup(profile):
sec_tenure_value_list = create_value_list(profile, 'secondary_tenure')
sec_tenure_value_list.add_value('Grazing')
sec_tenure_value_list.add_value('Farming')
sec_tenure_value_list.add_value('Fishing')
return sec_tenure_value_list
def add_secondary_tenure_value_list(profile):
tenure_vl = create_secondary_tenure_lookup(profile)
profile.add_entity(tenure_vl)
return tenure_vl
def append_person_columns(entity):
household_id = IntegerColumn('household_id', entity)
first_name = VarCharColumn('first_name', entity, maximum=30)
last_name = VarCharColumn('last_name', entity, maximum=30)
# Create gender lookup column and attach value list
gender = LookupColumn('gender', entity)
gender_value_list = create_gender_lookup(entity)
gender.value_list = gender_value_list
entity.add_column(household_id)
entity.add_column(first_name)
entity.add_column(last_name)
entity.add_column(gender)
def append_surveyor_columns(surveyor):
first_name = VarCharColumn('first_name', surveyor, maximum=30)
last_name = VarCharColumn('last_name', surveyor, maximum=30)
surveyor.add_column(first_name)
surveyor.add_column(last_name)
def append_community_columns(community):
name = VarCharColumn('comm_name', community, maximum=100)
community.add_column(name)
def populate_configuration(config):
profile = add_basic_profile(config)
rel = create_relation(profile)
person_entity = add_person_entity(profile)
append_person_columns(person_entity)
household_entity = add_household_entity(profile)
rel.parent = household_entity
rel.child = person_entity
rel.child_column = 'household_id'
rel.parent_column = 'id'
profile.add_entity_relation(rel)
# Add save option lookup
save_options = create_value_list(profile, 'save_options')
save_options.add_value('House')
save_options.add_value('Bank')
save_options.add_value('SACCO')
profile.add_entity(save_options)
# Add save options to multiple select column to person entity
save_options_column = MultipleSelectColumn('save_location', person_entity)
save_options_column.value_list = save_options
person_entity.add_column(save_options_column)
# Add community entity
community = add_community_entity(profile)
append_community_columns(community)
# Append surveyor columns
surveyor = add_surveyor_entity(profile)
append_surveyor_columns(surveyor)
spatial_unit = add_spatial_unit_entity(profile)
spatial_unit_2 = add_spatial_unit_entity_2(profile)
# Add foreign key linking spatial unit to surveyor
surveyor_id_col = ForeignKeyColumn('surveyor_id', spatial_unit)
surveyor_id_col.set_entity_relation_attr('parent', surveyor)
surveyor_id_col.set_entity_relation_attr('parent_column', 'id')
spatial_unit.add_column(surveyor_id_col)
# Set STR entities
profile.set_social_tenure_attr(SocialTenure.PARTY, [person_entity, community])
profile.set_social_tenure_attr(
SocialTenure.SPATIAL_UNIT,
[spatial_unit, spatial_unit_2]
)
# Create and add secondary tenure lookup
sec_tenure_vl = add_secondary_tenure_value_list(profile)
# Map secondary tenure to spatial unit 2
profile.social_tenure.add_spatial_tenure_mapping(spatial_unit_2, sec_tenure_vl)
# Create custom attr entity for primary tenure lookup
primary_tenure_vl = profile.social_tenure.tenure_type_collection
p_custom_ent = profile.social_tenure.custom_attribute_entity(
primary_tenure_vl
)
if p_custom_ent is None:
profile.social_tenure.initialize_custom_attributes_entity(
primary_tenure_vl
)
# Set entity
p_custom_ent = profile.social_tenure.custom_attribute_entity(
primary_tenure_vl
)
# Add custom attributes
constitution_ref_col = TextColumn('constitution_ref', p_custom_ent)
p_custom_ent.add_column(constitution_ref_col)
# Create secondary tenure custom attributes entity
s_custom_ent = profile.social_tenure.custom_attribute_entity(
sec_tenure_vl
)
if s_custom_ent is None:
profile.social_tenure.initialize_custom_attributes_entity(
sec_tenure_vl
)
# Set entity
s_custom_ent = profile.social_tenure.custom_attribute_entity(
sec_tenure_vl
)
application_date_col = DateColumn('application_date', s_custom_ent)
s_custom_ent.add_column(application_date_col)
def create_db_connection():
db_conn = DatabaseConnection(DB_SERVER, DB_PORT, DB_NAME)
user = User(DB_USER, DB_PASS)
db_conn.User = user
return db_conn
def create_alchemy_engine():
db_conn = create_db_connection()
connection_str = db_conn.toAlchemyConnection()
# Set STDMDb instance
data.app_dbconn = db_conn
return create_engine(connection_str, echo=False)
|
JoeEnnever/thrift | refs/heads/master | contrib/fb303/py/fb303_scripts/fb303_simple_mgmt.py | 171 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys, os
from optparse import OptionParser
from thrift.Thrift import *
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from fb303 import *
from fb303.ttypes import *
def service_ctrl(
command,
port,
trans_factory = None,
prot_factory = None):
"""
service_ctrl is a generic function to execute standard fb303 functions
@param command: one of stop, start, reload, status, counters, name, alive
@param port: service's port
@param trans_factory: TTransportFactory to use for obtaining a TTransport. Default is
TBufferedTransportFactory
@param prot_factory: TProtocolFactory to use for obtaining a TProtocol. Default is
TBinaryProtocolFactory
"""
if command in ["status"]:
try:
status = fb303_wrapper('status', port, trans_factory, prot_factory)
status_details = fb303_wrapper('get_status_details', port, trans_factory, prot_factory)
msg = fb_status_string(status)
if (len(status_details)):
msg += " - %s" % status_details
print msg
if (status == fb_status.ALIVE):
return 2
else:
return 3
except:
print "Failed to get status"
return 3
# scalar commands
if command in ["version","alive","name"]:
try:
result = fb303_wrapper(command, port, trans_factory, prot_factory)
print result
return 0
except:
print "failed to get ",command
return 3
# counters
if command in ["counters"]:
try:
counters = fb303_wrapper('counters', port, trans_factory, prot_factory)
for counter in counters:
print "%s: %d" % (counter, counters[counter])
return 0
except:
print "failed to get counters"
return 3
# Only root should be able to run the following commands
if os.getuid() == 0:
# async commands
if command in ["stop","reload"] :
try:
fb303_wrapper(command, port, trans_factory, prot_factory)
return 0
except:
print "failed to tell the service to ", command
return 3
else:
if command in ["stop","reload"]:
print "root privileges are required to stop or reload the service."
return 4
print "The following commands are available:"
for command in ["counters","name","version","alive","status"]:
print "\t%s" % command
print "The following commands are available for users with root privileges:"
for command in ["stop","reload"]:
print "\t%s" % command
return 0;
def fb303_wrapper(command, port, trans_factory = None, prot_factory = None):
sock = TSocket.TSocket('localhost', port)
# use input transport factory if provided
if (trans_factory is None):
trans = TTransport.TBufferedTransport(sock)
else:
trans = trans_factory.getTransport(sock)
# use input protocol factory if provided
if (prot_factory is None):
prot = TBinaryProtocol.TBinaryProtocol(trans)
else:
prot = prot_factory.getProtocol(trans)
# initialize client and open transport
fb303_client = FacebookService.Client(prot, prot)
trans.open()
if (command == 'reload'):
fb303_client.reinitialize()
elif (command == 'stop'):
fb303_client.shutdown()
elif (command == 'status'):
return fb303_client.getStatus()
elif (command == 'version'):
return fb303_client.getVersion()
elif (command == 'get_status_details'):
return fb303_client.getStatusDetails()
elif (command == 'counters'):
return fb303_client.getCounters()
elif (command == 'name'):
return fb303_client.getName()
elif (command == 'alive'):
return fb303_client.aliveSince()
trans.close()
def fb_status_string(status_enum):
if (status_enum == fb_status.DEAD):
return "DEAD"
if (status_enum == fb_status.STARTING):
return "STARTING"
if (status_enum == fb_status.ALIVE):
return "ALIVE"
if (status_enum == fb_status.STOPPING):
return "STOPPING"
if (status_enum == fb_status.STOPPED):
return "STOPPED"
if (status_enum == fb_status.WARNING):
return "WARNING"
def main():
# parse command line options
parser = OptionParser()
commands=["stop","counters","status","reload","version","name","alive"]
parser.add_option("-c", "--command", dest="command", help="execute this API",
choices=commands, default="status")
parser.add_option("-p","--port",dest="port",help="the service's port",
default=9082)
(options, args) = parser.parse_args()
status = service_ctrl(options.command, options.port)
sys.exit(status)
if __name__ == '__main__':
main()
|
stutivarshney/Bal-Aveksha | refs/heads/master | WebServer/BalAvekshaEnv/lib/python3.5/site-packages/wheel/bdist_wheel.py | 232 | """
Create a wheel (.whl) distribution.
A wheel is a built archive format.
"""
import csv
import hashlib
import os
import subprocess
import warnings
import shutil
import json
import wheel
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
import pkg_resources
safe_name = pkg_resources.safe_name
safe_version = pkg_resources.safe_version
from shutil import rmtree
from email.generator import Generator
from distutils.util import get_platform
from distutils.core import Command
from distutils.sysconfig import get_python_version
from distutils import log as logger
from .pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag
from .util import native, open_for_csv
from .archive import archive_wheelfile
from .pkginfo import read_pkg_info, write_pkg_info
from .metadata import pkginfo_to_dict
from . import pep425tags, metadata
def safer_name(name):
return safe_name(name).replace('-', '_')
def safer_version(version):
return safe_version(version).replace('-', '_')
class bdist_wheel(Command):
description = 'create a wheel distribution'
user_options = [('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
"build the archive using relative paths"
"(default: false)"),
('owner=', 'u',
"Owner name used when creating a tar file"
" [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file"
" [default: current group]"),
('universal', None,
"make a universal wheel"
" (default: false)"),
('python-tag=', None,
"Python implementation compatibility tag"
" (default: py%s)" % get_impl_ver()[0]),
]
boolean_options = ['keep-temp', 'skip-build', 'relative', 'universal']
def initialize_options(self):
self.bdist_dir = None
self.data_dir = None
self.plat_name = None
self.plat_tag = None
self.format = 'zip'
self.keep_temp = False
self.dist_dir = None
self.distinfo_dir = None
self.egginfo_dir = None
self.root_is_pure = None
self.skip_build = None
self.relative = False
self.owner = None
self.group = None
self.universal = False
self.python_tag = 'py' + get_impl_ver()[0]
self.plat_name_supplied = False
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'wheel')
self.data_dir = self.wheel_dist_name + '.data'
self.plat_name_supplied = self.plat_name is not None
need_options = ('dist_dir', 'plat_name', 'skip_build')
self.set_undefined_options('bdist',
*zip(need_options, need_options))
self.root_is_pure = not (self.distribution.has_ext_modules()
or self.distribution.has_c_libraries())
# Support legacy [wheel] section for setting universal
wheel = self.distribution.get_option_dict('wheel')
if 'universal' in wheel:
# please don't define this in your global configs
val = wheel['universal'][1].strip()
if val.lower() in ('1', 'true', 'yes'):
self.universal = True
@property
def wheel_dist_name(self):
"""Return distribution full name with - replaced with _"""
return '-'.join((safer_name(self.distribution.get_name()),
safer_version(self.distribution.get_version())))
def get_tag(self):
# bdist sets self.plat_name if unset, we should only use it for purepy
# wheels if the user supplied it.
if self.plat_name_supplied:
plat_name = self.plat_name
elif self.root_is_pure:
plat_name = 'any'
else:
plat_name = self.plat_name or get_platform()
plat_name = plat_name.replace('-', '_').replace('.', '_')
if self.root_is_pure:
if self.universal:
impl = 'py2.py3'
else:
impl = self.python_tag
tag = (impl, 'none', plat_name)
else:
impl_name = get_abbr_impl()
impl_ver = get_impl_ver()
# PEP 3149
abi_tag = str(get_abi_tag()).lower()
tag = (impl_name + impl_ver, abi_tag, plat_name)
supported_tags = pep425tags.get_supported(
supplied_platform=plat_name if self.plat_name_supplied else None)
# XXX switch to this alternate implementation for non-pure:
assert tag == supported_tags[0]
return tag
def get_archive_basename(self):
"""Return archive name without extension"""
impl_tag, abi_tag, plat_tag = self.get_tag()
archive_basename = "%s-%s-%s-%s" % (
self.wheel_dist_name,
impl_tag,
abi_tag,
plat_tag)
return archive_basename
def run(self):
build_scripts = self.reinitialize_command('build_scripts')
build_scripts.executable = 'python'
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install',
reinit_subcommands=True)
install.root = self.bdist_dir
install.compile = False
install.skip_build = self.skip_build
install.warn_dir = False
# A wheel without setuptools scripts is more cross-platform.
# Use the (undocumented) `no_ep` option to setuptools'
# install_scripts command to avoid creating entry point scripts.
install_scripts = self.reinitialize_command('install_scripts')
install_scripts.no_ep = True
# Use a custom scheme for the archive, because we have to decide
# at installation time which scheme to use.
for key in ('headers', 'scripts', 'data', 'purelib', 'platlib'):
setattr(install,
'install_' + key,
os.path.join(self.data_dir, key))
basedir_observed = ''
if os.name == 'nt':
# win32 barfs if any of these are ''; could be '.'?
# (distutils.command.install:change_roots bug)
basedir_observed = os.path.normpath(os.path.join(self.data_dir, '..'))
self.install_libbase = self.install_lib = basedir_observed
setattr(install,
'install_purelib' if self.root_is_pure else 'install_platlib',
basedir_observed)
logger.info("installing to %s", self.bdist_dir)
self.run_command('install')
archive_basename = self.get_archive_basename()
pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
if not self.relative:
archive_root = self.bdist_dir
else:
archive_root = os.path.join(
self.bdist_dir,
self._ensure_relative(install.install_base))
self.set_undefined_options(
'install_egg_info', ('target', 'egginfo_dir'))
self.distinfo_dir = os.path.join(self.bdist_dir,
'%s.dist-info' % self.wheel_dist_name)
self.egg2dist(self.egginfo_dir,
self.distinfo_dir)
self.write_wheelfile(self.distinfo_dir)
self.write_record(self.bdist_dir, self.distinfo_dir)
# Make the archive
if not os.path.exists(self.dist_dir):
os.makedirs(self.dist_dir)
wheel_name = archive_wheelfile(pseudoinstall_root, archive_root)
# Sign the archive
if 'WHEEL_TOOL' in os.environ:
subprocess.call([os.environ['WHEEL_TOOL'], 'sign', wheel_name])
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, 'dist_files', []).append(
('bdist_wheel', get_python_version(), wheel_name))
if not self.keep_temp:
if self.dry_run:
logger.info('removing %s', self.bdist_dir)
else:
rmtree(self.bdist_dir)
def write_wheelfile(self, wheelfile_base, generator='bdist_wheel (' + wheel.__version__ + ')'):
from email.message import Message
msg = Message()
msg['Wheel-Version'] = '1.0' # of the spec
msg['Generator'] = generator
msg['Root-Is-Purelib'] = str(self.root_is_pure).lower()
# Doesn't work for bdist_wininst
impl_tag, abi_tag, plat_tag = self.get_tag()
for impl in impl_tag.split('.'):
for abi in abi_tag.split('.'):
for plat in plat_tag.split('.'):
msg['Tag'] = '-'.join((impl, abi, plat))
wheelfile_path = os.path.join(wheelfile_base, 'WHEEL')
logger.info('creating %s', wheelfile_path)
with open(wheelfile_path, 'w') as f:
Generator(f, maxheaderlen=0).flatten(msg)
def _ensure_relative(self, path):
# copied from dir_util, deleted
drive, path = os.path.splitdrive(path)
if path[0:1] == os.sep:
path = drive + path[1:]
return path
def _pkginfo_to_metadata(self, egg_info_path, pkginfo_path):
return metadata.pkginfo_to_metadata(egg_info_path, pkginfo_path)
def license_file(self):
"""Return license filename from a license-file key in setup.cfg, or None."""
metadata = self.distribution.get_option_dict('metadata')
if not 'license_file' in metadata:
return None
return metadata['license_file'][1]
def setupcfg_requirements(self):
"""Generate requirements from setup.cfg as
('Requires-Dist', 'requirement; qualifier') tuples. From a metadata
section in setup.cfg:
[metadata]
provides-extra = extra1
extra2
requires-dist = requirement; qualifier
another; qualifier2
unqualified
Yields
('Provides-Extra', 'extra1'),
('Provides-Extra', 'extra2'),
('Requires-Dist', 'requirement; qualifier'),
('Requires-Dist', 'another; qualifier2'),
('Requires-Dist', 'unqualified')
"""
metadata = self.distribution.get_option_dict('metadata')
# our .ini parser folds - to _ in key names:
for key, title in (('provides_extra', 'Provides-Extra'),
('requires_dist', 'Requires-Dist')):
if not key in metadata:
continue
field = metadata[key]
for line in field[1].splitlines():
line = line.strip()
if not line:
continue
yield (title, line)
def add_requirements(self, metadata_path):
"""Add additional requirements from setup.cfg to file metadata_path"""
additional = list(self.setupcfg_requirements())
if not additional: return
pkg_info = read_pkg_info(metadata_path)
if 'Provides-Extra' in pkg_info or 'Requires-Dist' in pkg_info:
warnings.warn('setup.cfg requirements overwrite values from setup.py')
del pkg_info['Provides-Extra']
del pkg_info['Requires-Dist']
for k, v in additional:
pkg_info[k] = v
write_pkg_info(metadata_path, pkg_info)
def egg2dist(self, egginfo_path, distinfo_path):
"""Convert an .egg-info directory into a .dist-info directory"""
def adios(p):
"""Appropriately delete directory, file or link."""
if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p):
shutil.rmtree(p)
elif os.path.exists(p):
os.unlink(p)
adios(distinfo_path)
if not os.path.exists(egginfo_path):
# There is no egg-info. This is probably because the egg-info
# file/directory is not named matching the distribution name used
# to name the archive file. Check for this case and report
# accordingly.
import glob
pat = os.path.join(os.path.dirname(egginfo_path), '*.egg-info')
possible = glob.glob(pat)
err = "Egg metadata expected at %s but not found" % (egginfo_path,)
if possible:
alt = os.path.basename(possible[0])
err += " (%s found - possible misnamed archive file?)" % (alt,)
raise ValueError(err)
if os.path.isfile(egginfo_path):
# .egg-info is a single file
pkginfo_path = egginfo_path
pkg_info = self._pkginfo_to_metadata(egginfo_path, egginfo_path)
os.mkdir(distinfo_path)
else:
# .egg-info is a directory
pkginfo_path = os.path.join(egginfo_path, 'PKG-INFO')
pkg_info = self._pkginfo_to_metadata(egginfo_path, pkginfo_path)
# ignore common egg metadata that is useless to wheel
shutil.copytree(egginfo_path, distinfo_path,
ignore=lambda x, y: set(('PKG-INFO',
'requires.txt',
'SOURCES.txt',
'not-zip-safe',)))
# delete dependency_links if it is only whitespace
dependency_links_path = os.path.join(distinfo_path, 'dependency_links.txt')
with open(dependency_links_path, 'r') as dependency_links_file:
dependency_links = dependency_links_file.read().strip()
if not dependency_links:
adios(dependency_links_path)
write_pkg_info(os.path.join(distinfo_path, 'METADATA'), pkg_info)
# XXX deprecated. Still useful for current distribute/setuptools.
metadata_path = os.path.join(distinfo_path, 'METADATA')
self.add_requirements(metadata_path)
# XXX intentionally a different path than the PEP.
metadata_json_path = os.path.join(distinfo_path, 'metadata.json')
pymeta = pkginfo_to_dict(metadata_path,
distribution=self.distribution)
if 'description' in pymeta:
description_filename = 'DESCRIPTION.rst'
description_text = pymeta.pop('description')
description_path = os.path.join(distinfo_path,
description_filename)
with open(description_path, "wb") as description_file:
description_file.write(description_text.encode('utf-8'))
pymeta['extensions']['python.details']['document_names']['description'] = description_filename
# XXX heuristically copy any LICENSE/LICENSE.txt?
license = self.license_file()
if license:
license_filename = 'LICENSE.txt'
shutil.copy(license, os.path.join(self.distinfo_dir, license_filename))
pymeta['extensions']['python.details']['document_names']['license'] = license_filename
with open(metadata_json_path, "w") as metadata_json:
json.dump(pymeta, metadata_json, sort_keys=True)
adios(egginfo_path)
def write_record(self, bdist_dir, distinfo_dir):
from wheel.util import urlsafe_b64encode
record_path = os.path.join(distinfo_dir, 'RECORD')
record_relpath = os.path.relpath(record_path, bdist_dir)
def walk():
for dir, dirs, files in os.walk(bdist_dir):
dirs.sort()
for f in sorted(files):
yield os.path.join(dir, f)
def skip(path):
"""Wheel hashes every possible file."""
return (path == record_relpath)
with open_for_csv(record_path, 'w+') as record_file:
writer = csv.writer(record_file)
for path in walk():
relpath = os.path.relpath(path, bdist_dir)
if skip(relpath):
hash = ''
size = ''
else:
with open(path, 'rb') as f:
data = f.read()
digest = hashlib.sha256(data).digest()
hash = 'sha256=' + native(urlsafe_b64encode(digest))
size = len(data)
record_path = os.path.relpath(
path, bdist_dir).replace(os.path.sep, '/')
writer.writerow((record_path, hash, size))
|
pra85/calibre | refs/heads/master | src/calibre/ebooks/mobi/writer2/resources.py | 3 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre.ebooks.mobi import MAX_THUMB_DIMEN, MAX_THUMB_SIZE
from calibre.ebooks.mobi.utils import (rescale_image, mobify_image,
write_font_record)
from calibre.ebooks import generate_masthead
from calibre.ebooks.oeb.base import OEB_RASTER_IMAGES
from calibre.utils.imghdr import what
PLACEHOLDER_GIF = b'GIF89a\x01\x00\x01\x00\x80\x00\x00\x00\x00\x00\xff\xff\xff!\xf9\x04\x01\x00\x00\x00\x00,\x00\x00\x00\x00\x01\x00\x01\x00@\x02\x01D\x00;'
class Resources(object):
def __init__(self, oeb, opts, is_periodical, add_fonts=False,
process_images=True):
self.oeb, self.log, self.opts = oeb, oeb.log, opts
self.is_periodical = is_periodical
self.process_images = process_images
self.item_map = {}
self.records = []
self.mime_map = {}
self.masthead_offset = 0
self.used_image_indices = set()
self.image_indices = set()
self.cover_offset = self.thumbnail_offset = None
self.has_fonts = False
self.add_resources(add_fonts)
def process_image(self, data):
if not self.process_images:
return data
return (mobify_image(data) if self.opts.mobi_keep_original_images else
rescale_image(data))
def add_resources(self, add_fonts):
oeb = self.oeb
oeb.logger.info('Serializing resources...')
index = 1
mh_href = None
if 'masthead' in oeb.guide and oeb.guide['masthead'].href:
mh_href = oeb.guide['masthead'].href
self.records.append(None)
index += 1
self.used_image_indices.add(0)
self.image_indices.add(0)
elif self.is_periodical:
# Generate a default masthead
data = generate_masthead(unicode(self.oeb.metadata['title'][0]))
self.records.append(data)
self.used_image_indices.add(0)
self.image_indices.add(0)
index += 1
cover_href = self.cover_offset = self.thumbnail_offset = None
if (oeb.metadata.cover and
unicode(oeb.metadata.cover[0]) in oeb.manifest.ids):
cover_id = unicode(oeb.metadata.cover[0])
item = oeb.manifest.ids[cover_id]
cover_href = item.href
for item in self.oeb.manifest.values():
if item.media_type not in OEB_RASTER_IMAGES:
continue
try:
data = self.process_image(item.data)
except:
self.log.warn('Bad image file %r' % item.href)
continue
else:
if mh_href and item.href == mh_href:
self.records[0] = data
continue
self.image_indices.add(len(self.records))
self.records.append(data)
self.item_map[item.href] = index
self.mime_map[item.href] = 'image/%s'%what(None, data)
index += 1
if cover_href and item.href == cover_href:
self.cover_offset = self.item_map[item.href] - 1
self.used_image_indices.add(self.cover_offset)
try:
data = rescale_image(item.data, dimen=MAX_THUMB_DIMEN,
maxsizeb=MAX_THUMB_SIZE)
except:
self.log.warn('Failed to generate thumbnail')
else:
self.image_indices.add(len(self.records))
self.records.append(data)
self.thumbnail_offset = index - 1
self.used_image_indices.add(self.thumbnail_offset)
index += 1
finally:
item.unload_data_from_memory()
if add_fonts:
for item in self.oeb.manifest.values():
if item.href and item.href.rpartition('.')[-1].lower() in {
'ttf', 'otf'} and isinstance(item.data, bytes):
self.records.append(write_font_record(item.data))
self.item_map[item.href] = len(self.records)
self.has_fonts = True
def add_extra_images(self):
'''
Add any images that were created after the call to add_resources()
'''
for item in self.oeb.manifest.values():
if (item.media_type not in OEB_RASTER_IMAGES or item.href in self.item_map):
continue
try:
data = self.process_image(item.data)
except:
self.log.warn('Bad image file %r' % item.href)
else:
self.records.append(data)
self.item_map[item.href] = len(self.records)
finally:
item.unload_data_from_memory()
def serialize(self, records, used_images):
used_image_indices = self.used_image_indices | {
v-1 for k, v in self.item_map.iteritems() if k in used_images}
for i in self.image_indices-used_image_indices:
self.records[i] = PLACEHOLDER_GIF
records.extend(self.records)
def __bool__(self):
return bool(self.records)
__nonzero__ = __bool__
|
aterrel/blaze | refs/heads/master | blaze/expr/__init__.py | 1 | from .table import TableExpr
|
cryptobanana/ansible | refs/heads/devel | lib/ansible/module_utils/network/aireos/aireos.py | 85 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.connection import exec_command
_DEVICE_CONFIGS = {}
aireos_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'timeout': dict(type='int'),
}
aireos_argument_spec = {
'provider': dict(type='dict', options=aireos_provider_spec)
}
aireos_top_spec = {
'host': dict(removed_in_version=2.9),
'port': dict(removed_in_version=2.9, type='int'),
'username': dict(removed_in_version=2.9),
'password': dict(removed_in_version=2.9, no_log=True),
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
'timeout': dict(removed_in_version=2.9, type='int'),
}
aireos_argument_spec.update(aireos_top_spec)
def sanitize(resp):
# Takes response from device and strips whitespace from all lines
# Aireos adds in extra preceding whitespace which netcfg parses as children/parents, which Aireos does not do
# Aireos also adds in trailing whitespace that is unused
cleaned = []
for line in resp.splitlines():
cleaned.append(line.strip())
return '\n'.join(cleaned).strip()
def get_provider_argspec():
return aireos_provider_spec
def check_args(module, warnings):
pass
def get_config(module, flags=None):
flags = [] if flags is None else flags
cmd = 'show run-config commands '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return _DEVICE_CONFIGS[cmd]
except KeyError:
rc, out, err = exec_command(module, cmd)
if rc != 0:
module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_then_replace'))
cfg = sanitize(to_text(out, errors='surrogate_then_replace').strip())
_DEVICE_CONFIGS[cmd] = cfg
return cfg
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
responses = list()
commands = to_commands(module, to_list(commands))
for cmd in commands:
cmd = module.jsonify(cmd)
rc, out, err = exec_command(module, cmd)
if check_rc and rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), rc=rc)
responses.append(sanitize(to_text(out, errors='surrogate_then_replace')))
return responses
def load_config(module, commands):
rc, out, err = exec_command(module, 'config')
if rc != 0:
module.fail_json(msg='unable to enter configuration mode', err=to_text(out, errors='surrogate_then_replace'))
for command in to_list(commands):
if command == 'end':
continue
rc, out, err = exec_command(module, command)
if rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc)
exec_command(module, 'end')
|
iborko/fmindex | refs/heads/master | sandbox/OccTrivial.py | 1 | """
Provides a trivial Occurence table implementation,
for testing purposes.
"""
import numpy as np
import logging
log = logging.getLogger(__name__)
class OccTrivial(object):
def __init__(self, string):
super(OccTrivial, self).__init__()
# get the string alphabet
self.alphabet = sorted(set(string))
int_string = [self.alphabet.index(x) for x in string]
self.occ = np.zeros((len(self.alphabet), len(string) + 1), dtype=np.int)
for ind in range(1, len(string) + 1):
self.occ[:, ind] = self.occ[:, ind - 1].copy()
self.occ[int_string[ind - 1], ind] += 1
def rank(self, c, i):
return self.occ[self.alphabet.index(c), i]
def main():
logging.basicConfig(level=logging.DEBUG)
log.info('Displaying a basic Trivial Occ table')
string = 'TGGACCACGTTGCAGCCCCA'
occ = OccTrivial(string)
log.info('Occ for string %s is :\n%r', string, occ.occ)
if __name__ == '__main__':
main()
|
Arkapravo/morse-0.6 | refs/heads/master | src/morse/middleware/ros/read_vw_twist.py | 1 | import roslib; roslib.load_manifest('roscpp'); roslib.load_manifest('rospy'); roslib.load_manifest('geometry_msgs'); roslib.load_manifest('rosgraph_msgs')
import rospy
import std_msgs
import math
from geometry_msgs.msg import Twist
def init_extra_module(self, component_instance, function, mw_data):
""" Setup the middleware connection with this data
Prepare the middleware to handle the serialised data as necessary.
"""
component_name = component_instance.blender_obj.name
parent_name = component_instance.robot_parent.blender_obj.name
# Add the new method to the component
component_instance.input_functions.append(function)
self._topics.append(rospy.Subscriber(parent_name + "/" + component_name, Twist, callback_wp, component_instance))
def callback_wp(data, component_instance):
""" this function is called as soon as Twist messages are published on the specific topic """
component_instance.local_data["v"] = data.linear.x
yaw = data.angular.z
component_instance.local_data["w"] = yaw
def read_twist(self, component_instance):
""" dummy function for Waypoints """
|
ehashman/oh-mainline | refs/heads/master | vendor/packages/Django/tests/regressiontests/m2m_through_regress/__init__.py | 45382 | |
alisidd/tensorflow | refs/heads/asgd-dc | tensorflow/python/kernel_tests/random_shuffle_queue_test.py | 65 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.Queue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import re
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class RandomShuffleQueueTest(test.TestCase):
def setUp(self):
# Useful for debugging when a test times out.
super(RandomShuffleQueueTest, self).setUp()
tf_logging.error("Starting: %s", self._testMethodName)
def tearDown(self):
super(RandomShuffleQueueTest, self).tearDown()
tf_logging.error("Finished: %s", self._testMethodName)
def testEnqueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
self.assertAllEqual(0, q.size().eval())
enqueue_op.run()
self.assertAllEqual(1, q.size().eval())
def testEnqueueWithShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=tensor_shape.TensorShape([3, 2]))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
self.assertAllEqual(1, q.size().eval())
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
def testEnqueueManyWithShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertAllEqual(4, q.size().eval())
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shapes=tensor_shape.TensorShape([3]))
q2.enqueue(([1, 2, 3],))
q2.enqueue_many(([[1, 2, 3]],))
def testScalarShapes(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (1,)])
q.enqueue_many([[1, 2, 3, 4], [[5], [6], [7], [8]]]).run()
q.enqueue([9, [10]]).run()
dequeue_t = q.dequeue()
results = []
for _ in range(2):
a, b = sess.run(dequeue_t)
results.append((a, b))
a, b = sess.run(q.dequeue_many(3))
for i in range(3):
results.append((a[i], b[i]))
self.assertItemsEqual([(1, [5]), (2, [6]), (3, [7]), (4, [8]), (9, [10])],
results)
def testParallelEnqueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
vals = [dequeued_t.eval() for _ in xrange(len(elems))]
self.assertItemsEqual(elems, vals)
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(3, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, results)
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
results = []
for _ in xrange(len(elems)):
x, y = sess.run(dequeued_t)
results.append((x, y))
self.assertItemsEqual(elems, results)
def testQueueSizeEmpty(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
self.assertEqual(0, q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual([1], size.eval())
dequeued_t.op.run()
self.assertEqual([0], size.eval())
def testEnqueueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + elems, results)
def testEmptyEnqueueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual(0, size_t.eval())
enqueue_op.run()
self.assertEqual(0, size_t.eval())
def testEmptyDequeueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueUpTo(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_many(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueMany
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
def testEmptyDequeueUpToWithNoShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_up_to(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueUpTo
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
def testMultiEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
float_val, int_val = sess.run(dequeued_t)
results.append((float_val, [int_val[0], int_val[1]]))
expected = list(zip(float_elems, int_elems)) * 2
self.assertItemsEqual(expected, results)
def testDequeueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(5)
enqueue_op.run()
results = dequeued_t.eval().tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testDequeueUpToNoBlocking(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(5)
enqueue_op.run()
results = dequeued_t.eval().tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testMultiDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testMultiDequeueUpToNoBlocking(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = sess.run(dequeued_t)
# dequeue_up_to has undefined shape.
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testHighDimension(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.int32, (
(4, 4, 4, 4)))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertItemsEqual(dequeued_t.eval().tolist(), elems.tolist())
def testParallelEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpToRandomPartition(self):
with self.test_session() as sess:
dequeue_sizes = [random.randint(50, 150) for _ in xrange(10)]
total_elements = sum(dequeue_sizes)
q = data_flow_ops.RandomShuffleQueue(
total_elements, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in xrange(total_elements)]
enqueue_op = q.enqueue_many((elems,))
dequeue_ops = [q.dequeue_up_to(size) for size in dequeue_sizes]
enqueue_op.run()
# Dequeue random number of items in parallel on 10 threads.
dequeued_elems = []
def dequeue(dequeue_op):
dequeued_elems.extend(sess.run(dequeue_op))
threads = []
for dequeue_op in dequeue_ops:
threads.append(self.checkedThread(target=dequeue, args=(dequeue_op,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueUpToWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesUpTo
# that number of elements.
dequeued_t = q.dequeue_up_to(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 2, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
results = [dequeued_t.eval() for _ in elems]
expected = [[elem] for elem in elems]
self.assertItemsEqual(expected, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 2, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
results = []
def dequeue():
for _ in elems:
results.append(sess.run(dequeued_t))
self.assertItemsEqual(elems, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
# The dequeue thread blocked when it hit the min_size requirement.
self.assertEqual(len(results), 2)
close_op.run()
dequeue_thread.join()
# Once the queue is closed, the min_size requirement is lifted.
self.assertEqual(len(results), 4)
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
close_op = q.close()
dequeued_t = q.dequeue()
finished = [] # Needs to be a mutable type
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
finished.append(True)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(finished), 0)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(finished), 1)
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
progress = [] # Must be mutable
def dequeue():
self.assertItemsEqual(elems, sess.run(dequeued_t))
progress.append(1)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
progress.append(2)
self.assertEqual(len(progress), 0)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
for _ in range(100):
time.sleep(0.01)
if len(progress) == 1:
break
self.assertEqual(len(progress), 1)
time.sleep(0.01)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(progress), 2)
def testBlockingDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEquals(3, len(results))
results.extend(sess.run(dequeued_t))
self.assertEquals(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueUpToSmallerThanMinAfterDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
capacity=10,
min_after_dequeue=2,
dtypes=dtypes_lib.float32,
shapes=((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEquals(3, len(results))
# min_after_dequeue is 2, we ask for 3 elements, and we end up only
# getting the remaining 1.
results.extend(sess.run(dequeued_t))
self.assertEquals(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueManyFromClosedQueueWithElementsRemaining(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue_many(q.size())
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEqual(len(results), 3)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
# While the last dequeue failed, we want to insure that it returns
# any elements that it potentially reserved to dequeue. Thus the
# next cleanup should return a single element.
results.extend(sess.run(cleanup_dequeue_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(results), 4)
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 4, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0], results)
# There wasn't room for 50.0 in the queue when the first element was
# dequeued.
self.assertNotEqual(50.0, results[0])
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
time.sleep(0.01)
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0, 60.0], results)
# There wasn't room for 50.0 or 60.0 in the queue when the first
# element was dequeued.
self.assertNotEqual(50.0, results[0])
self.assertNotEqual(60.0, results[0])
# Similarly for 60.0 and the second element.
self.assertNotEqual(60.0, results[1])
thread.join()
def testBlockingEnqueueToClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed since it will complete
# before the queue is closed.
sess.run(blocking_enqueue_op)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
sess.run(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# The close_op should run after the first blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def blocking_close():
sess.run(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Wait for the close op to block before unblocking the enqueue.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
# Dequeue to unblock the first blocking_enqueue_op, after which the
# close will complete.
results.append(dequeued_t.eval())
self.assertTrue(results[0] in elems)
thread2.join()
thread1.join()
def testBlockingEnqueueManyToClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
size_t = q.size()
enqueue_op.run()
self.assertEqual(size_t.eval(), 3)
def blocking_enqueue():
# This will block until the dequeue after the close.
sess.run(blocking_enqueue_op)
# At this point the close operation will become unblocked, so the
# next enqueue will fail.
with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
sess.run(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
# First blocking_enqueue_op of blocking_enqueue has enqueued 1 of 2
# elements, and is blocked waiting for one more element to be dequeue.
self.assertEqual(size_t.eval(), 4)
def blocking_close():
sess.run(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# The close_op should run before the second blocking_enqueue_op
# has started.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
# Unblock the first blocking_enqueue_op in blocking_enqueue.
q.dequeue().eval()
thread2.join()
thread1.join()
def testSharedQueueSameSession(self):
with self.test_session():
q1 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
# TensorFlow TestCase adds a default graph seed (=87654321). We check if
# the seed computed from the default graph seed is reproduced.
seed = 887634792
q2 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=seed)
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q2.dequeue().eval(), 10.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q1.dequeue().eval(), 20.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
def testSharedQueueSameSessionGraphSeedNone(self):
with self.test_session():
q1 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=98765432)
q1.enqueue((10.0,)).run()
# If both graph and op seeds are not provided, the default value must be
# used, and in case a shared queue is already created, the second queue op
# must accept any previous seed value.
random_seed.set_random_seed(None)
q2 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
q_a_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_a")
q_a_2 = data_flow_ops.RandomShuffleQueue(
15, 5, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.RandomShuffleQueue(
10, 0, dtypes_lib.float32, shared_name="q_b")
q_b_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("min_after_dequeue"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_c")
q_c_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_d")
q_d_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_f")
q_f_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_f_2.queue_ref.op.run()
q_g_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_g")
q_g_2 = data_flow_ops.RandomShuffleQueue(
10, 5, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_g")
q_g_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_g_2.queue_ref.op.run()
q_h_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=12, shared_name="q_h")
q_h_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=21, shared_name="q_h")
q_h_1.queue_ref.op.run()
with self.assertRaisesOpError("random seeds"):
q_h_2.queue_ref.op.run()
def testSelectQueue(self):
with self.test_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(
data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.RandomShuffleQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
q1 = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
q2 = data_flow_ops.RandomShuffleQueue(15, 0, dtypes_lib.float32)
enq_q = data_flow_ops.RandomShuffleQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_many_op)
def _blockingDequeueUpTo(self, sess, dequeue_up_to_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_up_to_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
q_empty = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, (
(),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
dequeue_up_to_op = q_empty.dequeue_up_to(1)
q_full = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingDequeueUpTo, args=(sess, dequeue_up_to_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testDequeueManyInDifferentOrders(self):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_many(5)
deq2 = q2.dequeue_many(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueUpToInDifferentOrders(self):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_up_to(5)
deq2 = q2.dequeue_up_to(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueInDifferentOrders(self):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue()
deq2 = q2.dequeue()
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
for _ in range(5):
results[0].append(deq1.eval())
results[1].append(deq2.eval())
q1.close().run()
q2.close().run()
for _ in range(5):
results[2].append(deq1.eval())
results[3].append(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testBigEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertItemsEqual(elem, results)
def testBigDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(2, 0, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertItemsEqual(elem, results)
if __name__ == "__main__":
test.main()
|
wevote/WebAppPublic | refs/heads/master | star/views_admin.py | 1 | # star/views_admin.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .models import StarItem
from .serializers import StarItemSerializer
from rest_framework.views import APIView
from rest_framework.response import Response
import wevote_functions.admin
logger = wevote_functions.admin.get_logger(__name__)
# This page does not need to be protected.
# NOTE: @login_required() throws an error. Needs to be figured out if we ever want to secure this page.
class ExportStarItemDataView(APIView):
def get(self, request, format=None):
star_list = StarItem.objects.all()
serializer = StarItemSerializer(star_list, many=True)
return Response(serializer.data)
|
Bionetbook/bionetbook | refs/heads/master | bnbapp/bionetbook/profiles/tests/test_middleware.py | 2 | from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from profiles.models import Profile
class ProfileMiddlewareTests(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username="testuser",
password="password",
email="test@example.com"
)
def test_my_profile_existence(self):
self.assertTrue(self.client.login(username='testuser', password='password'))
url = reverse("dashboard")
response = self.client.get(url, follow=True)
self.assertContains(response, "Please fill out your profile.") |
yongtang/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/sparse_ops_test.py | 5 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Python ops defined in sparse_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
class SparseToIndicatorTest(test_util.TensorFlowTestCase):
def _SparseTensor_5x6(self, dtype):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x3x4(self, dtype):
# Includes two entries with the form [1, 1, x] : 150.
ind = np.array([[0, 0, 1], [0, 1, 0], [0, 1, 2], [1, 0, 3], [1, 1, 0],
[1, 1, 1], [1, 1, 2], [1, 2, 2]])
val = np.array([1, 10, 12, 103, 150, 149, 150, 122])
shape = np.array([2, 3, 4])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, dtypes.int64))
def testInt32(self):
with test_util.force_cpu():
sp_input = self._SparseTensor_5x6(dtypes.int32)
output = sparse_ops.sparse_to_indicator(sp_input, 50)
expected_output = np.zeros((5, 50), dtype=np.bool)
expected_trues = ((0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33))
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
def testInt64(self):
with test_util.force_cpu():
sp_input = self._SparseTensor_5x6(dtypes.int64)
output = sparse_ops.sparse_to_indicator(sp_input, 50)
expected_output = np.zeros((5, 50), dtype=np.bool)
expected_trues = [(0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33)]
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
def testHigherRank(self):
with test_util.force_cpu():
sp_input = self._SparseTensor_2x3x4(dtypes.int64)
output = sparse_ops.sparse_to_indicator(sp_input, 200)
expected_output = np.zeros((2, 3, 200), dtype=np.bool)
expected_trues = [(0, 0, 1), (0, 1, 10), (0, 1, 12), (1, 0, 103),
(1, 1, 149), (1, 1, 150), (1, 2, 122)]
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
class SparseMergeTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_3x50(self, indices_dtype, values_dtype):
# NOTE: This input is intentionally not sorted to validate the
# already_sorted flag below.
ind = np.array([[0, 0], [1, 0], [1, 2], [2, 0], [2, 1], [1, 1]])
# NB: these are not sorted
indices = np.array([0, 13, 10, 33, 32, 14])
values = np.array([-3, 4, 1, 9, 5, 1])
shape = np.array([3, 3])
indices = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices, indices_dtype), np.array(shape, np.int64))
values = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(values, values_dtype), np.array(shape, np.int64))
return indices, values
def _SparseTensor_3x50(self, indices_dtype, values_dtype):
indices, values = self._SparseTensorValue_3x50(indices_dtype, values_dtype)
return (sparse_tensor.SparseTensor.from_value(indices),
sparse_tensor.SparseTensor.from_value(values))
def _AssertResultsSorted(self, output, vocab_size):
self.assertAllEqual(output.indices,
[[0, 0], [1, 10], [1, 13], [1, 14], [2, 32], [2, 33]])
self.assertAllEqual(output.values, [-3, 1, 4, 1, 5, 9])
self.assertAllEqual(output.dense_shape, [3, vocab_size])
def _AssertResultsNotSorted(self, output, vocab_size):
self.assertAllEqual(output.indices,
[[0, 0], [1, 13], [1, 10], [2, 33], [2, 32], [1, 14]])
self.assertAllEqual(output.values, [-3, 4, 1, 9, 5, 1])
self.assertAllEqual(output.dense_shape, [3, vocab_size])
def testInt32AndFloat32(self):
vocab_size = 50
indices_v, values_v = self._SparseTensorValue_3x50(np.int32, np.float32)
with test_util.force_cpu():
for indices in (indices_v,
sparse_tensor.SparseTensor.from_value(indices_v)):
for values in (values_v,
sparse_tensor.SparseTensor.from_value(values_v)):
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = self.evaluate(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat32(self):
vocab_size = 50
with test_util.force_cpu():
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = self.evaluate(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64(self):
vocab_size = 50
with test_util.force_cpu():
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = self.evaluate(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt32AndFloat32NonCanonicalOrder(self):
vocab_size = 50
with test_util.force_cpu():
indices, values = self._SparseTensor_3x50(np.int32, np.float32)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size, already_sorted=True)
output = self.evaluate(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
def testInt64AndFloat32NonCanonicalOrder(self):
vocab_size = 50
with test_util.force_cpu():
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size, already_sorted=True)
output = self.evaluate(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
def testInt64AndFloat64NonCanonicalOrder(self):
vocab_size = 50
vocab_size_tensor = constant_op.constant(vocab_size, dtypes.int64)
with test_util.force_cpu():
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size_tensor, already_sorted=True)
output = self.evaluate(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
def testShouldSetLastDimensionInDynamicShape(self):
with ops.Graph().as_default():
shape = constant_op.constant([2, 2], dtype=dtypes.int64)
dynamic_shape = array_ops.placeholder_with_default(shape, shape=[2])
ids = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]],
values=[1, 3],
dense_shape=dynamic_shape)
values = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]],
values=[0.4, 0.7],
dense_shape=dynamic_shape)
merged = sparse_ops.sparse_merge(
sp_ids=ids, sp_values=values, vocab_size=5)
self.assertEqual(5, merged.get_shape()[1])
class SparseMergeHighDimTest(test_util.TensorFlowTestCase):
def _SparseTensor_3x50(self, indices_dtype, values_dtype):
# NOTE: This input is intentionally not sorted to validate the
# already_sorted flag below.
ind = np.array([[0, 0], [1, 0], [1, 2], [2, 0], [2, 1], [1, 1]])
# NB: these are not sorted
indices0 = np.array([0, 13, 10, 33, 32, 14])
indices1 = np.array([12, 4, 0, 0, 1, 30])
values = np.array([-3, 4, 1, 9, 5, 1])
shape = np.array([3, 3])
indices0 = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices0, indices_dtype), np.array(shape, np.int64))
indices1 = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices1, indices_dtype), np.array(shape, np.int64))
values = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(values, values_dtype), np.array(shape, np.int64))
return ([sparse_tensor.SparseTensor.from_value(indices0),
sparse_tensor.SparseTensor.from_value(indices1)],
sparse_tensor.SparseTensor.from_value(values))
def _AssertResultsSorted(self, output, vocab_size):
self.assertAllEqual(
output.indices,
[[0, 0, 12], [1, 10, 0], [1, 13, 4], [1, 14, 30], [2, 32, 1],
[2, 33, 0]])
self.assertAllEqual(output.values, [-3, 1, 4, 1, 5, 9])
self.assertAllEqual(output.dense_shape, [3] + vocab_size)
def testInt64AndFloat32(self):
vocab_size = [50, 31]
with test_util.force_cpu():
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = self.evaluate(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64(self):
vocab_size = [50, 31]
with test_util.force_cpu():
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = self.evaluate(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64Shape(self):
vocab_size = [50, 30]
with test_util.force_cpu():
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = self.evaluate(sp_output)
self._AssertResultsSorted(output, vocab_size)
class SparseRetainTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_5x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(val, np.int32), np.array(shape, np.int64))
def _SparseTensor_5x6(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_5x6())
def testBasic(self):
with test_util.force_cpu():
for sp_input in (self._SparseTensorValue_5x6(), self._SparseTensor_5x6()):
to_retain = np.array([1, 0, 0, 1, 1, 0], dtype=np.bool)
sp_output = sparse_ops.sparse_retain(sp_input, to_retain)
output = self.evaluate(sp_output)
self.assertAllEqual(output.indices, [[0, 0], [1, 4], [3, 2]])
self.assertAllEqual(output.values, [0, 14, 32])
self.assertAllEqual(output.dense_shape, [5, 6])
def testRetainNone(self):
with test_util.force_cpu():
sp_input = self._SparseTensor_5x6()
to_retain = np.zeros((6,), dtype=np.bool)
sp_output = sparse_ops.sparse_retain(sp_input, to_retain)
output = self.evaluate(sp_output)
self.assertAllEqual(output.indices, np.array([]).reshape((0, 2)))
self.assertAllEqual(output.values, [])
self.assertAllEqual(output.dense_shape, [5, 6])
def testMismatchedRetainShape(self):
with test_util.force_cpu():
sp_input = self._SparseTensor_5x6()
to_retain = np.array([1, 0, 0, 1, 0], dtype=np.bool)
with self.assertRaises(ValueError):
sparse_ops.sparse_retain(sp_input, to_retain)
class SparseResetShapeTest(test_util.TensorFlowTestCase):
_IND_2_5_6 = np.array(
[[0, 0, 0], [0, 1, 0], [0, 1, 3], [1, 1, 4], [1, 3, 2], [1, 3, 3]],
dtype=np.int64)
_VAL_2_5_6 = np.array([0, 10, 13, 14, 32, 33], dtype=np.int32)
_SHP_2_5_6 = np.array([2, 5, 6], dtype=np.int64)
def _SparseTensor_2x5x6(self):
return sparse_tensor.SparseTensor(
constant_op.constant(self._IND_2_5_6, dtypes.int64),
constant_op.constant(self._VAL_2_5_6, dtypes.int32),
constant_op.constant(self._SHP_2_5_6, dtypes.int64))
def _SparseTensor_2x5x6_Empty(self):
return sparse_tensor.SparseTensor(
constant_op.constant(
np.empty(shape=[0, 3], dtype=np.int64), dtypes.int64),
constant_op.constant(np.empty(shape=[0], dtype=np.int32), dtypes.int32),
constant_op.constant(self._SHP_2_5_6, dtypes.int64))
def _SparseTensorValue_2x5x6(self):
return sparse_tensor.SparseTensorValue(self._IND_2_5_6, self._VAL_2_5_6,
self._SHP_2_5_6)
def testStaticShapeInfoPreservedWhenNewShapeIsProvidedAndStatic(self):
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
self.assertAllEqual([3, 6, 7], sp_output.get_shape())
def testBasic(self):
with test_util.force_cpu():
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
output = self.evaluate(sp_output)
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
def testInputUnavailableInGraphConstructionOk(self):
with test_util.force_cpu():
sp_input = self._SparseTensorValue_2x5x6()
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
output = self.evaluate(sp_output)
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
@test_util.run_deprecated_v1
def testFeedInputUnavailableInGraphConstructionOk(self):
with self.session(use_gpu=False) as sess:
sp_input = array_ops.sparse_placeholder(dtype=dtypes.int32)
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
output = sess.run(sp_output,
feed_dict={sp_input: self._SparseTensorValue_2x5x6()})
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
def testTightBoundingBox(self):
with test_util.force_cpu():
sp_input = self._SparseTensor_2x5x6()
sp_output = sparse_ops.sparse_reset_shape(sp_input)
output = self.evaluate(sp_output)
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [2, 4, 5])
def testTightBoundingBoxEmpty(self):
with test_util.force_cpu():
sp_input = self._SparseTensor_2x5x6_Empty()
sp_output = sparse_ops.sparse_reset_shape(sp_input)
output = self.evaluate(sp_output)
self.assertAllEqual(output.indices.shape, [0, 3])
self.assertAllEqual(output.values.shape, [0])
self.assertAllEqual(output.dense_shape, [0, 0, 0])
def testInvalidRank(self):
with test_util.force_cpu():
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 7], dtype=np.int64)
with self.assertRaises(ValueError):
sparse_ops.sparse_reset_shape(sp_input, new_shape)
@test_util.run_deprecated_v1
def testInvalidRankNewShapeUnavailableInGraphConstruction(self):
with self.session(use_gpu=False) as sess:
new_shape = array_ops.placeholder(dtype=dtypes.int64)
sp_input = self._SparseTensor_2x5x6()
out = sparse_ops.sparse_reset_shape(sp_input, new_shape)
with self.assertRaisesOpError("x == y did not hold element-wise"):
sess.run(out, feed_dict={new_shape: np.array([3, 7], dtype=np.int64)})
def testInvalidDimensionSizeStatic(self):
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 7, 5], dtype=np.int64)
with self.assertRaisesRegex(ValueError, "should have dimension sizes"):
sparse_ops.sparse_reset_shape(sp_input, new_shape)
@test_util.run_deprecated_v1
def testInvalidDimensionSizeDynamic(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x5x6()
new_shape = array_ops.placeholder(dtype=dtypes.int32)
out = sparse_ops.sparse_reset_shape(sp_input, new_shape)
with self.assertRaisesOpError("x <= y did not hold element-wise"):
sess.run(out, feed_dict={new_shape: [3, 7, 5]})
@test_util.run_deprecated_v1
def testInvalidDimensionSizeInputUnavailableInGraphConstruction(self):
sp_input = array_ops.sparse_placeholder(dtype=dtypes.int32)
with self.session(use_gpu=False) as sess:
new_shape = np.array([3, 7, 5], dtype=np.int64)
out = sparse_ops.sparse_reset_shape(sp_input, new_shape)
with self.assertRaisesOpError("x <= y did not hold element-wise"):
sess.run(out, feed_dict={sp_input: self._SparseTensorValue_2x5x6()})
class SparseFillEmptyRowsTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_5x6(self, dtype=np.int32):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64), np.array(val, dtype), np.array(
shape, np.int64))
def _SparseTensor_5x6(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_5x6())
def _SparseTensor_String5x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array(["a", "b", "c", "d", "e", "f"])
shape = np.array([5, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.string),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4]])
val = np.array([0, 10, 13, 14])
shape = np.array([2, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.int32),
constant_op.constant(shape, dtypes.int64))
def testFillNumber(self):
with test_util.use_gpu():
for sp_input in (self._SparseTensorValue_5x6(), self._SparseTensor_5x6()):
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = self.evaluate(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values, [0, 10, 13, 14, -1, 32, 33, -1])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
@test_util.run_deprecated_v1
def testFillFloat(self):
with self.session():
values = constant_op.constant(
[0.0, 10.0, 13.0, 14.0, 32.0, 33.0], dtype=dtypes.float64)
default_value = constant_op.constant(-1.0, dtype=dtypes.float64)
sp_input = sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]]),
values=values,
dense_shape=np.array([5, 6]))
sp_output, empty_row_indicator = (sparse_ops.sparse_fill_empty_rows(
sp_input, default_value))
output, empty_row_indicator_out = self.evaluate(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 0], [1, 0], [1, 3], [1, 4],
[2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllClose(output.values, [0, 10, 13, 14, -1, 32, 33, -1])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
values_grad_err = gradient_checker.compute_gradient_error(
values, values.shape.as_list(), sp_output.values, [8], delta=1e-8)
self.assertGreater(values_grad_err, 0)
self.assertLess(values_grad_err, 1e-8)
default_value_grad_err = gradient_checker.compute_gradient_error(
default_value,
default_value.shape.as_list(),
sp_output.values, [8],
delta=1e-8)
self.assertGreater(default_value_grad_err, 0)
self.assertLess(default_value_grad_err, 1e-8)
def testFillString(self):
with test_util.force_cpu():
sp_input = self._SparseTensor_String5x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, ""))
output, empty_row_indicator_out = self.evaluate(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values,
[b"a", b"b", b"c", b"d", b"", b"e", b"f", b""])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
def testNoEmptyRows(self):
with test_util.use_gpu():
sp_input = self._SparseTensor_2x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = self.evaluate(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 0], [1, 0], [1, 3], [1, 4]])
self.assertAllEqual(output.values, [0, 10, 13, 14])
self.assertAllEqual(output.dense_shape, [2, 6])
self.assertAllEqual(empty_row_indicator_out, np.zeros(2).astype(np.bool))
def testNoEmptyRowsAndUnordered(self):
with test_util.use_gpu():
sp_input = sparse_tensor.SparseTensor(
indices=np.array([[1, 2], [1, 3], [0, 1], [0, 3]]),
values=np.array([1, 3, 2, 4]),
dense_shape=np.array([2, 5]))
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = self.evaluate(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 1], [0, 3], [1, 2], [1, 3]])
self.assertAllEqual(output.values, [2, 4, 1, 3])
self.assertAllEqual(output.dense_shape, [2, 5])
self.assertAllEqual(empty_row_indicator_out, np.zeros(2).astype(np.bool))
def testUnordered(self):
with test_util.use_gpu():
sp_input = sparse_tensor.SparseTensor(
indices=np.array([[2, 3], [2, 2], [0, 1], [0, 3]]),
values=np.array([1, 3, 2, 4]),
dense_shape=np.array([3, 5]))
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = self.evaluate(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices,
[[0, 1], [0, 3], [1, 0], [2, 3], [2, 2]])
self.assertAllEqual(output.values, [2, 4, -1, 1, 3])
self.assertAllEqual(output.dense_shape, [3, 5])
self.assertAllEqual(empty_row_indicator_out, [False, True, False])
def testEmptyIndicesTensor(self):
with test_util.use_gpu():
sp_input = sparse_tensor.SparseTensor(
indices=np.ones([0, 2]),
values=np.ones([0]),
dense_shape=np.array([2, 5]))
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = self.evaluate(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 0], [1, 0]])
self.assertAllEqual(output.values, [-1, -1])
self.assertAllEqual(output.dense_shape, [2, 5])
self.assertAllEqual(empty_row_indicator_out, np.ones(2).astype(np.bool))
def testEmptyOutput(self):
with test_util.use_gpu():
sp_input = sparse_tensor.SparseTensor(
indices=np.ones([0, 2]),
values=np.ones([0]),
dense_shape=np.array([0, 3]))
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = self.evaluate(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, np.ones([0, 2]))
self.assertAllEqual(output.values, np.ones([0]))
self.assertAllEqual(output.dense_shape, [0, 3])
self.assertAllEqual(empty_row_indicator_out, [])
def testInvalidIndices(self):
with test_util.use_gpu():
sp_input = sparse_tensor.SparseTensor(
indices=np.array([[1, 2], [1, 3], [99, 1], [99, 3]]),
values=np.array([1, 3, 2, 4]),
dense_shape=np.array([2, 5]))
with self.assertRaisesRegex(errors.InvalidArgumentError,
r"indices\(2, 0\) is invalid"):
self.evaluate(sparse_ops.sparse_fill_empty_rows(sp_input, -1))
class SparseAddTest(test_util.TensorFlowTestCase):
def testValuesInVariable(self):
indices = constant_op.constant([[1]], dtype=dtypes.int64)
values = variables.Variable([1], trainable=False, dtype=dtypes.float32)
shape = constant_op.constant([1], dtype=dtypes.int64)
sp_input = sparse_tensor.SparseTensor(indices, values, shape)
sp_output = sparse_ops.sparse_add(sp_input, sp_input)
with test_util.force_cpu():
self.evaluate(variables.global_variables_initializer())
output = self.evaluate(sp_output)
self.assertAllEqual(output.values, [2])
class SparseReduceTest(test_util.TensorFlowTestCase):
# [[1, ?, 2]
# [?, 3, ?]]
# where ? is implicitly-zero.
ind = np.array([[0, 0], [0, 2], [1, 1]]).astype(np.int64)
vals = np.array([1, 1, 1]).astype(np.int32)
dense_shape = np.array([2, 3]).astype(np.int64)
def _compare(self, sp_t, reduction_axes, ndims, keep_dims, do_sum):
densified = self.evaluate(sparse_ops.sparse_tensor_to_dense(sp_t))
np_ans = densified
if reduction_axes is None:
if do_sum:
np_ans = np.sum(np_ans, keepdims=keep_dims)
else:
np_ans = np.max(np_ans, keepdims=keep_dims)
else:
if not isinstance(reduction_axes, list): # Single scalar.
reduction_axes = [reduction_axes]
reduction_axes = np.array(reduction_axes).astype(np.int32)
# Handles negative axes.
reduction_axes = (reduction_axes + ndims) % ndims
# Loop below depends on sorted.
reduction_axes.sort()
for ra in reduction_axes.ravel()[::-1]:
if do_sum:
np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
else:
np_ans = np.max(np_ans, axis=ra, keepdims=keep_dims)
with self.cached_session():
if do_sum:
tf_dense_ans = sparse_ops.sparse_reduce_sum(sp_t, reduction_axes,
keep_dims)
else:
tf_dense_ans = sparse_ops.sparse_reduce_max(sp_t, reduction_axes,
keep_dims)
out_dense = self.evaluate(tf_dense_ans)
if do_sum:
tf_sparse_ans = sparse_ops.sparse_reduce_sum_sparse(sp_t,
reduction_axes,
keep_dims)
else:
tf_sparse_ans = sparse_ops.sparse_reduce_max_sparse(sp_t,
reduction_axes,
keep_dims)
# Convert to dense for comparison purposes.
out_sparse = sparse_ops.sparse_tensor_to_dense(tf_sparse_ans)
self.assertAllClose(np_ans, out_dense)
self.assertAllClose(np_ans, out_sparse)
def _compare_all(self, sp_t, reduction_axes, ndims):
self._compare(sp_t, reduction_axes, ndims, False, False)
self._compare(sp_t, reduction_axes, ndims, False, True)
self._compare(sp_t, reduction_axes, ndims, True, False)
self._compare(sp_t, reduction_axes, ndims, True, True)
# (TODO:b/133851381): Re-enable this test.
def disabledtestSimpleAndRandomInputs(self):
if np.__version__ == "1.13.0":
self.skipTest("numpy 1.13.0 bug")
sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape)
with test_util.force_cpu():
self._compare_all(sp_t, None, ndims=2)
self._compare_all(sp_t, 0, ndims=2)
self._compare_all(sp_t, [1], ndims=2)
self._compare_all(sp_t, [0, 1], ndims=2)
self._compare_all(sp_t, [1, 0], ndims=2)
self._compare_all(sp_t, [-1], ndims=2)
self._compare_all(sp_t, [1, -2], ndims=2)
np.random.seed(1618)
test_dims = [(1618, 1, 11, 7, 1), (1,), (1, 1, 1)]
with test_util.force_cpu():
for dims in test_dims:
sp_t, unused_nnz = _sparsify(np.random.randn(*dims))
# reduce all using None
self._compare_all(sp_t, None, ndims=len(dims))
# reduce random axes from 1D to N-D
for d in range(1, len(dims) + 1):
axes = np.random.choice(len(dims), size=d, replace=False).tolist()
self._compare_all(sp_t, axes, ndims=len(dims))
def testInvalidAxes(self):
sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape)
with test_util.force_cpu():
with self.assertRaisesOpError("Invalid reduction dimension -3"):
self.evaluate(sparse_ops.sparse_reduce_sum(sp_t, -3))
with self.assertRaisesOpError("Invalid reduction dimension 2"):
self.evaluate(sparse_ops.sparse_reduce_sum(sp_t, 2))
with self.assertRaisesOpError("Invalid reduction dimension -3"):
self.evaluate(sparse_ops.sparse_reduce_max(sp_t, -3))
with self.assertRaisesOpError("Invalid reduction dimension 2"):
self.evaluate(sparse_ops.sparse_reduce_max(sp_t, 2))
@test_util.run_deprecated_v1
def testGradient(self):
np.random.seed(8161)
test_dims = [(11, 1, 5, 7, 1), (2, 2)]
with self.session(use_gpu=False):
for dims in test_dims:
sp_t, nnz = _sparsify(np.random.randn(*dims))
# reduce random axes from 1D to N-D
for d in range(1, len(dims) + 1):
axes = np.random.choice(len(dims), size=d, replace=False).tolist()
reduced = sparse_ops.sparse_reduce_sum(sp_t, axes)
err = gradient_checker.compute_gradient_error(
sp_t.values, (nnz,), reduced,
self.evaluate(reduced).shape)
self.assertLess(err, 1e-3)
# Tests for negative axes.
reduced = sparse_ops.sparse_reduce_sum(sp_t, -1)
err = gradient_checker.compute_gradient_error(
sp_t.values, (nnz,), reduced,
self.evaluate(reduced).shape)
self.assertLess(err, 1e-3)
def _testSparseReduceShape(self, sp_t, reduction_axes, ndims, keep_dims,
do_sum):
densified = self.evaluate(sparse_ops.sparse_tensor_to_dense(sp_t))
np_op = np.sum
tf_op = sparse_ops.sparse_reduce_sum
if not do_sum:
np_op = np.max
tf_op = sparse_ops.sparse_reduce_max
np_ans = densified
if reduction_axes is None:
np_ans = np_op(np_ans, keepdims=keep_dims)
else:
if not isinstance(reduction_axes, list): # Single scalar.
reduction_axes = [reduction_axes]
reduction_axes = np.array(reduction_axes).astype(np.int32)
# Handles negative axes.
reduction_axes = (reduction_axes + ndims) % ndims
# Loop below depends on sorted.
reduction_axes.sort()
for ra in reduction_axes.ravel()[::-1]:
np_ans = np_op(np_ans, axis=ra, keepdims=keep_dims)
tf_ans = tf_op(sp_t, reduction_axes, keep_dims)
self.assertAllEqual(np_ans.shape, tf_ans.get_shape().as_list())
# (TODO:b/133851381): Re-enable this test
def disabledtestSparseReduceSumOrMaxShape(self):
sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape)
with test_util.force_cpu():
for do_sum in [True, False]:
for keep_dims in [True, False]:
self._testSparseReduceShape(sp_t, None, 2, keep_dims, do_sum)
self._testSparseReduceShape(sp_t, 0, 2, keep_dims, do_sum)
self._testSparseReduceShape(sp_t, [1], 2, keep_dims, do_sum)
self._testSparseReduceShape(sp_t, [0, 1], 2, keep_dims, do_sum)
self._testSparseReduceShape(sp_t, [1, 0], 2, keep_dims, do_sum)
self._testSparseReduceShape(sp_t, [-1], 2, keep_dims, do_sum)
self._testSparseReduceShape(sp_t, [1, -2], 2, keep_dims, do_sum)
class SparseMathOpsTest(test_util.TensorFlowTestCase):
def _check(self, result_tensor, result_np, input_sp_t):
self.assertTrue(isinstance(result_tensor, sparse_tensor.SparseTensor))
self.assertTrue(isinstance(input_sp_t, sparse_tensor.SparseTensor))
self.assertAllCloseAccordingToType(input_sp_t.indices,
result_tensor.indices)
self.assertAllCloseAccordingToType(input_sp_t.dense_shape,
result_tensor.dense_shape)
res_densified = sparse_ops.sparse_to_dense(
result_tensor.indices, result_tensor.dense_shape, result_tensor.values)
self.assertAllCloseAccordingToType(result_np, res_densified)
@test_util.run_deprecated_v1
def testCwiseShapeValidation(self):
# Test case for GitHub 24072.
with test_util.force_cpu():
a = array_ops.ones([3, 4, 1], dtype=dtypes.int32)
b = sparse_tensor.SparseTensor([[0, 0, 1, 0], [0, 0, 3, 0]], [10, 20],
[1, 1, 4, 2])
c = a * b
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"broadcasts dense to sparse only; got incompatible shapes"):
self.evaluate(c)
def testCwiseDivAndMul(self):
np.random.seed(1618)
sp_shapes = [(10, 10, 10), (5, 5), (1618,), (3, 3, 7)]
dense_shapes = [(10, 10, 1), (5, 5), (1,), (1, 7)]
with test_util.force_cpu():
for dtype in [np.float32, np.float64, np.int32, np.int64]:
for sp_shape, dense_shape in zip(sp_shapes, dense_shapes):
sp_vals_np = np.random.rand(*sp_shape).astype(dtype) + 1
dense_vals_np = np.random.rand(*dense_shape).astype(dtype) + 1
sp_t, unused_nnz = _sparsify(sp_vals_np, thresh=1.5)
sp_t_densified = sparse_ops.sparse_tensor_to_dense(sp_t)
dense_t = constant_op.constant(dense_vals_np)
self._check(sp_t / dense_t, sp_t_densified / dense_vals_np, sp_t)
# Check commutative.
self._check(sp_t * dense_t, sp_t_densified * dense_vals_np, sp_t)
self._check(dense_t * sp_t, sp_t_densified * dense_vals_np, sp_t)
if dtype in [np.int32, np.int64]:
res = sp_t / dense_t # should invoke "__truediv__"
self.assertEqual(res.values.dtype, np.float64)
def testCwiseAdd(self):
with test_util.force_cpu():
# Identity(2) + AllOnes(2,2). Should be equal to 2 * Identity(2).
indices = [[0, 0], [1, 1]]
vals = [1, 1]
shape = (2, 2)
sp_t = sparse_tensor.SparseTensor(indices, vals, shape)
dense_t = array_ops.ones(shape, dtype=dtypes.int32)
self._check(
sparse_ops.sparse_dense_cwise_add(sp_t, dense_t),
np.identity(2) * 2, sp_t)
# Variant of above, but broadcasts the dense side.
dense_t = array_ops.ones([1], dtype=dtypes.int32)
self._check(
sparse_ops.sparse_dense_cwise_add(sp_t, dense_t),
np.identity(2) * 2, sp_t)
@test_util.run_deprecated_v1
def testGradients(self):
np.random.seed(1618)
sp_shapes = [(10, 10, 10), (5, 5), (1618,), (3, 3, 7)]
dense_shapes = [(10, 10, 1), (5, 5), (1,), (1, 7)]
with self.session(use_gpu=False):
for dtype in [np.float32, np.float64]:
for sp_shape, dense_shape in zip(sp_shapes, dense_shapes):
sp_vals_np = np.random.rand(*sp_shape).astype(dtype) + 1
dense_vals_np = np.random.rand(*dense_shape).astype(dtype) + 1
sp_t, nnz = _sparsify(sp_vals_np, thresh=1.5)
dense_t = constant_op.constant(dense_vals_np)
cmul = sp_t * dense_t
err = gradient_checker.compute_gradient_error([sp_t.values, dense_t],
[(nnz,), dense_shape],
cmul.values, (nnz,))
self.assertLess(err, 1e-4)
cdiv = sp_t / dense_t
err = gradient_checker.compute_gradient_error(sp_t.values, (nnz,),
cdiv.values, (nnz,))
self.assertLess(err, 1e-4)
err = gradient_checker.compute_gradient_error(
dense_t,
dense_shape,
cdiv.values, (nnz,),
x_init_value=dense_vals_np)
self.assertLess(err, 2e-4)
class SparseSoftmaxTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testEquivalentToDensified(self):
np.random.seed(1618)
n, m = np.random.choice(20, size=2)
for dtype in [np.float32, np.float64]:
sp_vals_np = np.random.rand(n, m).astype(dtype)
batched_sp_t, unused_nnz1 = _sparsify(
sp_vals_np.reshape((1, n, m)), thresh=0.) # No masking.
with test_util.force_cpu():
densified = constant_op.constant(sp_vals_np)
sp_result = self.evaluate(
sparse_ops.sparse_softmax(batched_sp_t)).values.reshape((n, m))
dense_result = nn_ops.softmax(densified)
self.assertAllClose(dense_result, sp_result)
def testHigherRanks(self):
# For the first shape:
# First batch:
# [? e.]
# [1. ? ]
# Second batch:
# [e ? ]
# [e e ]
#
# The softmax results should be:
# [? 1.] [1 ?]
# [1. ? ] and [.5 .5]
# where ? means implicitly zero.
#
# The second shape: same input data, but with a higher-rank shape.
shapes = [[2, 2, 2], [2, 1, 2, 2]]
for shape in shapes:
values = np.asarray(
[0., np.e, 1., 0., np.e, 0., np.e, np.e]).reshape(shape)
sp_t, unused_nnz = _sparsify(values, thresh=1e-2)
expected_values = [1., 1., 1., .5, .5]
with test_util.force_cpu():
result = sparse_ops.sparse_softmax(sp_t)
self.assertAllEqual(expected_values, result.values)
self.assertAllEqual(sp_t.indices, result.indices)
self.assertAllEqual(shape, result.dense_shape)
@test_util.run_deprecated_v1
def testGradient(self):
x_shape = [2, 5, 10]
with self.cached_session(use_gpu=False):
for dtype in [np.float32, np.float64]:
x_np = np.random.randn(*x_shape).astype(dtype)
x_tf, nnz = _sparsify(x_np)
y_tf = sparse_ops.sparse_softmax(x_tf)
err = gradient_checker.compute_gradient_error(x_tf.values, (nnz,),
y_tf.values, (nnz,))
self.assertLess(err, 1e-4)
class SparseMinimumMaximumTest(test_util.TensorFlowTestCase):
def _assertSparseTensorValueEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
def testBasic(self):
with test_util.force_cpu():
# 1-D, values at index 0.
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [7])
max_tf = sparse_ops.sparse_maximum(sp_zero, sp_one)
min_tf = sparse_ops.sparse_minimum(sp_zero, sp_one)
self._assertSparseTensorValueEqual(sp_one, max_tf)
self._assertSparseTensorValueEqual(sp_zero, min_tf)
# Values at different indices.
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_zero_2 = sparse_tensor.SparseTensor([[1]], [0], [7])
expected = sparse_tensor.SparseTensor([[0], [1]], [0, 0], [7])
max_tf = sparse_ops.sparse_maximum(sp_zero, sp_zero_2)
min_tf = sparse_ops.sparse_minimum(sp_zero, sp_zero_2)
self._assertSparseTensorValueEqual(expected, max_tf)
self._assertSparseTensorValueEqual(expected, min_tf)
@test_util.run_deprecated_v1
def testRandom(self):
np.random.seed(1618)
shapes = [(13,), (6, 8), (1, 7, 1)]
for shape in shapes:
for dtype in [np.int32, np.int64, np.float16, np.float32, np.float64]:
a_np = np.random.randn(*shape).astype(dtype)
b_np = np.random.randn(*shape).astype(dtype)
sp_a, unused_a_nnz = _sparsify(a_np, thresh=-.5)
sp_b, unused_b_nnz = _sparsify(b_np, thresh=-.5)
with self.cached_session(use_gpu=False):
maximum_tf = sparse_ops.sparse_maximum(sp_a, sp_b)
maximum_tf_densified = sparse_ops.sparse_tensor_to_dense(
maximum_tf).eval()
minimum_tf = sparse_ops.sparse_minimum(sp_a, sp_b)
minimum_tf_densified = sparse_ops.sparse_tensor_to_dense(
minimum_tf).eval()
a_densified = sparse_ops.sparse_tensor_to_dense(sp_a).eval()
b_densified = sparse_ops.sparse_tensor_to_dense(sp_b).eval()
self.assertAllEqual(
np.maximum(a_densified, b_densified), maximum_tf_densified)
self.assertAllEqual(
np.minimum(a_densified, b_densified), minimum_tf_densified)
def testMismatchedShapes(self):
with test_util.force_cpu():
sp_zero = sparse_tensor.SparseTensor([[0, 0]], [0], [1, 1])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [2])
with self.assertRaisesOpError("Operands do not have the same ranks"):
self.evaluate(sparse_ops.sparse_maximum(sp_zero, sp_one))
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [1])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [2])
with self.assertRaisesOpError("Operands' shapes do not match"):
self.evaluate(sparse_ops.sparse_maximum(sp_zero, sp_one))
class SparseTransposeTest(test.TestCase):
def testTranspose(self):
if np.__version__ == "1.13.0":
self.skipTest("numpy 1.13.0 bug")
with test_util.force_cpu():
np.random.seed(1618)
shapes = [np.random.randint(1, 10, size=rank) for rank in range(1, 6)]
for shape in shapes:
for dtype in [np.int32, np.int64, np.float32, np.float64]:
dn_input = np.random.randn(*shape).astype(dtype)
rank = self.evaluate(array_ops.rank(dn_input))
perm = np.random.choice(rank, rank, False)
sp_input, unused_a_nnz = _sparsify(dn_input)
sp_trans = sparse_ops.sparse_transpose(sp_input, perm=perm)
dn_trans = sparse_ops.sparse_tensor_to_dense(sp_trans)
expected_trans = array_ops.transpose(dn_input, perm=perm)
self.assertAllEqual(expected_trans.shape, sp_trans.get_shape())
self.assertAllEqual(dn_trans, expected_trans)
class SparsePlaceholderTest(test.TestCase):
@test_util.run_deprecated_v1
def testPlaceholder(self):
foo = array_ops.sparse_placeholder(dtypes.float32, shape=(10, 47))
self.assertAllEqual([10, 47], foo.get_shape())
self.assertAllEqual([None, 2], foo.indices.get_shape().as_list())
@test_util.run_deprecated_v1
def testPartialShapePlaceholder(self):
foo = array_ops.sparse_placeholder(dtypes.float32, shape=(None, 47))
self.assertAllEqual([None, 47], foo.get_shape().as_list())
self.assertAllEqual([None, 2], foo.indices.get_shape().as_list())
@test_util.run_deprecated_v1
def testNoShapePlaceholder(self):
foo = array_ops.sparse_placeholder(dtypes.float32, shape=None)
self.assertAllEqual(None, foo.get_shape())
self.assertAllEqual([None, None], foo.indices.get_shape().as_list())
if __name__ == "__main__":
googletest.main()
|
neowinx/chow-chow | refs/heads/master | samples/prompting_combobox.py | 1 | import wx
class PromptingComboBox(wx.ComboBox):
def __init__(self, parent, value, choices=[], style=0, **par):
wx.ComboBox.__init__(self, parent, wx.ID_ANY, value, style=style|wx.CB_DROPDOWN, choices=choices, **par)
self.choices = choices
self.Bind(wx.EVT_TEXT, self.EvtText)
self.Bind(wx.EVT_CHAR, self.EvtChar)
self.Bind(wx.EVT_COMBOBOX, self.EvtCombobox)
self.ignoreEvtText = False
def EvtCombobox(self, event):
self.ignoreEvtText = True
event.Skip()
def EvtChar(self, event):
if event.GetKeyCode() == 8:
self.ignoreEvtText = True
event.Skip()
def EvtText(self, event):
if self.ignoreEvtText:
self.ignoreEvtText = False
return
currentText = event.GetString()
found = False
for choice in self.choices :
if choice.startswith(currentText):
self.ignoreEvtText = True
self.SetValue(choice)
self.SetInsertionPoint(len(currentText))
self.SetMark(len(currentText), len(choice))
found = True
break
if not found:
event.Skip()
class TrialPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, wx.ID_ANY)
choices = ['grandmother', 'grandfather', 'cousin', 'aunt', 'uncle', 'grandson', 'granddaughter']
for relative in ['mother', 'father', 'sister', 'brother', 'daughter', 'son']:
choices.extend(self.derivedRelatives(relative))
cb = PromptingComboBox(self, "default value", choices, style=wx.CB_SORT)
def derivedRelatives(self, relative):
return [relative, 'step' + relative, relative + '-in-law']
if __name__ == '__main__':
app = wx.App()
frame = wx.Frame (None, -1, 'Demo PromptingComboBox Control', size=(400, 50))
TrialPanel(frame)
frame.Show()
app.MainLoop() |
yuezhou/telephony | refs/heads/master | telephony/Classes/pjproject-2.2.1/tests/pjsua/scripts-pesq/200_codec_g722.py | 59 | # $Id: 200_codec_g722.py 2063 2008-06-26 18:52:16Z nanang $
#
from inc_cfg import *
ADD_PARAM = ""
if (HAS_SND_DEV == 0):
ADD_PARAM += "--null-audio"
# Call with G722 codec
test_param = TestParam(
"PESQ codec G722",
[
InstanceParam("UA1", ADD_PARAM + " --max-calls=1 --add-codec g722 --clock-rate 16000 --play-file wavs/input.16.wav"),
InstanceParam("UA2", "--null-audio --max-calls=1 --add-codec g722 --clock-rate 16000 --rec-file wavs/tmp.16.wav --auto-answer 200")
]
)
pesq_threshold = 3.7
|
talos/gitdict | refs/heads/master | test/helpers.py | 1 | import os
import unittest
import shutil
from gitdict import DictRepository
REPO_DIR = os.path.join('test', 'tmp')
class RepoTestCase(unittest.TestCase):
def setUp(self):
"""
Build a new test dir for each run.
"""
self.repo = DictRepository(REPO_DIR)
def tearDown(self):
"""
Kill the old test dir after each run.
"""
shutil.rmtree(REPO_DIR)
|
mgagne/nova | refs/heads/master | nova/api/openstack/compute/contrib/server_start_stop.py | 51 | # Copyright 2012 Midokura Japan K.K.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.i18n import _
from nova import objects
class ServerStartStopActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ServerStartStopActionController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def _get_instance(self, context, instance_uuid):
try:
attrs = ['system_metadata', 'metadata']
return objects.Instance.get_by_uuid(context, instance_uuid,
expected_attrs=attrs)
except exception.NotFound:
msg = _("Instance not found")
raise webob.exc.HTTPNotFound(explanation=msg)
@wsgi.action('os-start')
def _start_server(self, req, id, body):
"""Start an instance."""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
extensions.check_compute_policy(context, 'start', instance)
try:
self.compute_api.start(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'start', id)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
return webob.Response(status_int=202)
@wsgi.action('os-stop')
def _stop_server(self, req, id, body):
"""Stop an instance."""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
extensions.check_compute_policy(context, 'stop', instance)
try:
self.compute_api.stop(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'stop', id)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
return webob.Response(status_int=202)
class Server_start_stop(extensions.ExtensionDescriptor):
"""Start/Stop instance compute API support."""
name = "ServerStartStop"
alias = "os-server-start-stop"
namespace = "http://docs.openstack.org/compute/ext/servers/api/v1.1"
updated = "2012-01-23T00:00:00Z"
def get_controller_extensions(self):
controller = ServerStartStopActionController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
|
2014c2g12/c2g12 | refs/heads/master | wsgi/w2/static/Brython2.0.0-20140209-164925/Lib/pickle.py | 1265 | from json import * |
sloria/webargs | refs/heads/dev | src/webargs/fields.py | 1 | # -*- coding: utf-8 -*-
"""Field classes.
Includes all fields from `marshmallow.fields` in addition to a custom
`Nested` field and `DelimitedList`.
All fields can optionally take a special `location` keyword argument, which
tells webargs where to parse the request argument from.
.. code-block:: python
args = {
"active": fields.Bool(location='query'),
"content_type": fields.Str(data_key="Content-Type", location="headers"),
}
Note: `data_key` replaced `load_from` in marshmallow 3.
When using marshmallow 2, use `load_from`.
"""
import marshmallow as ma
# Expose all fields from marshmallow.fields.
from marshmallow.fields import * # noqa: F40
from webargs.compat import MARSHMALLOW_VERSION_INFO
from webargs.dict2schema import dict2schema
__all__ = ["DelimitedList"] + ma.fields.__all__
class Nested(ma.fields.Nested):
"""Same as `marshmallow.fields.Nested`, except can be passed a dictionary as
the first argument, which will be converted to a `marshmallow.Schema`.
.. note::
The schema class here will always be `marshmallow.Schema`, regardless
of whether a custom schema class is set on the parser. Pass an explicit schema
class if necessary.
"""
def __init__(self, nested, *args, **kwargs):
if isinstance(nested, dict):
nested = dict2schema(nested)
super(Nested, self).__init__(nested, *args, **kwargs)
class DelimitedList(ma.fields.List):
"""Same as `marshmallow.fields.List`, except can load from either a list or
a delimited string (e.g. "foo,bar,baz").
:param Field cls_or_instance: A field class or instance.
:param str delimiter: Delimiter between values.
:param bool as_string: Dump values to string.
"""
delimiter = ","
def __init__(self, cls_or_instance, delimiter=None, as_string=False, **kwargs):
self.delimiter = delimiter or self.delimiter
self.as_string = as_string
super(DelimitedList, self).__init__(cls_or_instance, **kwargs)
def _serialize(self, value, attr, obj):
ret = super(DelimitedList, self)._serialize(value, attr, obj)
if self.as_string:
return self.delimiter.join(format(each) for each in ret)
return ret
def _deserialize(self, value, attr, data, **kwargs):
try:
ret = (
value
if ma.utils.is_iterable_but_not_string(value)
else value.split(self.delimiter)
)
except AttributeError:
if MARSHMALLOW_VERSION_INFO[0] < 3:
self.fail("invalid")
else:
raise self.make_error("invalid")
return super(DelimitedList, self)._deserialize(ret, attr, data, **kwargs)
|
ltilve/ChromiumGStreamerBackend | refs/heads/master | tools/telemetry/third_party/gsutilz/third_party/protorpc/protorpc/generate_python.py | 39 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import with_statement
__author__ = 'rafek@google.com (Rafe Kaplan)'
from . import descriptor
from . import generate
from . import message_types
from . import messages
from . import util
__all__ = ['format_python_file']
_MESSAGE_FIELD_MAP = {
message_types.DateTimeMessage.definition_name(): message_types.DateTimeField,
}
def _write_enums(enum_descriptors, out):
"""Write nested and non-nested Enum types.
Args:
enum_descriptors: List of EnumDescriptor objects from which to generate
enums.
out: Indent writer used for generating text.
"""
# Write enums.
for enum in enum_descriptors or []:
out << ''
out << ''
out << 'class %s(messages.Enum):' % enum.name
out << ''
with out.indent():
if not enum.values:
out << 'pass'
else:
for enum_value in enum.values:
out << '%s = %s' % (enum_value.name, enum_value.number)
def _write_fields(field_descriptors, out):
"""Write fields for Message types.
Args:
field_descriptors: List of FieldDescriptor objects from which to generate
fields.
out: Indent writer used for generating text.
"""
out << ''
for field in field_descriptors or []:
type_format = ''
label_format = ''
message_field = _MESSAGE_FIELD_MAP.get(field.type_name)
if message_field:
module = 'message_types'
field_type = message_field
else:
module = 'messages'
field_type = messages.Field.lookup_field_type_by_variant(field.variant)
if field_type in (messages.EnumField, messages.MessageField):
type_format = '\'%s\', ' % field.type_name
if field.label == descriptor.FieldDescriptor.Label.REQUIRED:
label_format = ', required=True'
elif field.label == descriptor.FieldDescriptor.Label.REPEATED:
label_format = ', repeated=True'
if field_type.DEFAULT_VARIANT != field.variant:
variant_format = ', variant=messages.Variant.%s' % field.variant
else:
variant_format = ''
if field.default_value:
if field_type in [messages.BytesField,
messages.StringField,
]:
default_value = repr(field.default_value)
elif field_type is messages.EnumField:
try:
default_value = str(int(field.default_value))
except ValueError:
default_value = repr(field.default_value)
else:
default_value = field.default_value
default_format = ', default=%s' % (default_value,)
else:
default_format = ''
out << '%s = %s.%s(%s%s%s%s%s)' % (field.name,
module,
field_type.__name__,
type_format,
field.number,
label_format,
variant_format,
default_format)
def _write_messages(message_descriptors, out):
"""Write nested and non-nested Message types.
Args:
message_descriptors: List of MessageDescriptor objects from which to
generate messages.
out: Indent writer used for generating text.
"""
for message in message_descriptors or []:
out << ''
out << ''
out << 'class %s(messages.Message):' % message.name
with out.indent():
if not (message.enum_types or message.message_types or message.fields):
out << ''
out << 'pass'
else:
_write_enums(message.enum_types, out)
_write_messages(message.message_types, out)
_write_fields(message.fields, out)
def _write_methods(method_descriptors, out):
"""Write methods of Service types.
All service method implementations raise NotImplementedError.
Args:
method_descriptors: List of MethodDescriptor objects from which to
generate methods.
out: Indent writer used for generating text.
"""
for method in method_descriptors:
out << ''
out << "@remote.method('%s', '%s')" % (method.request_type,
method.response_type)
out << 'def %s(self, request):' % (method.name,)
with out.indent():
out << ('raise NotImplementedError'
"('Method %s is not implemented')" % (method.name))
def _write_services(service_descriptors, out):
"""Write Service types.
Args:
service_descriptors: List of ServiceDescriptor instances from which to
generate services.
out: Indent writer used for generating text.
"""
for service in service_descriptors or []:
out << ''
out << ''
out << 'class %s(remote.Service):' % service.name
with out.indent():
if service.methods:
_write_methods(service.methods, out)
else:
out << ''
out << 'pass'
@util.positional(2)
def format_python_file(file_descriptor, output, indent_space=2):
"""Format FileDescriptor object as a single Python module.
Services generated by this function will raise NotImplementedError.
All Python classes generated by this function use delayed binding for all
message fields, enum fields and method parameter types. For example a
service method might be generated like so:
class MyService(remote.Service):
@remote.method('my_package.MyRequestType', 'my_package.MyResponseType')
def my_method(self, request):
raise NotImplementedError('Method my_method is not implemented')
Args:
file_descriptor: FileDescriptor instance to format as python module.
output: File-like object to write module source code to.
indent_space: Number of spaces for each level of Python indentation.
"""
out = generate.IndentWriter(output, indent_space=indent_space)
out << 'from protorpc import message_types'
out << 'from protorpc import messages'
if file_descriptor.service_types:
out << 'from protorpc import remote'
if file_descriptor.package:
out << "package = '%s'" % file_descriptor.package
_write_enums(file_descriptor.enum_types, out)
_write_messages(file_descriptor.message_types, out)
_write_services(file_descriptor.service_types, out)
|
geng890518/editor-ui | refs/heads/master | node_modules/node-gyp/gyp/tools/pretty_sln.py | 1831 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import re
import sys
import pretty_vcproj
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print project
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
r'}"\) = "(.*)", "(.*)", "(.*)"$')
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile(
r'ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print "---------------------------------------"
print "Dependencies for all projects"
print "---------------------------------------"
print "-- --"
for (project, dep_list) in sorted(deps.items()):
print "Project : %s" % project
print "Path : %s" % projects[project][0]
if dep_list:
for dep in dep_list:
print " - %s" % dep
print ""
print "-- --"
def PrintBuildOrder(projects, deps):
print "---------------------------------------"
print "Build order "
print "---------------------------------------"
print "-- --"
built = []
for (project, _) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print "-- --"
def PrintVCProj(projects):
for project in projects:
print "-------------------------------------"
print "-------------------------------------"
print project
print project
print project
print "-------------------------------------"
print "-------------------------------------"
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
return 1
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
return 0
if __name__ == '__main__':
sys.exit(main())
|
CivicTechTO/open-cabinet | refs/heads/master | openorders/alerts/emailtoken.py | 2 | from itsdangerous import URLSafeTimedSerializer
import ALERTCONFIG
def generate_confirmation_token(email):
serializer = URLSafeTimedSerializer( ALERTCONFIG.EMAIL_KEY )
return serializer.dumps(email, salt=ALERTCONFIG.EMAIL_SALT )
def confirm_token(token, expiration=3600):
serializer = URLSafeTimedSerializer( ALERTCONFIG.EMAIL_KEY )
try:
email = serializer.loads( token, salt=ALERTCONFIG.EMAIL_SALT, max_age=expiration )
except:
return False
return email |
codenote/chromium-test | refs/heads/master | tools/telemetry/telemetry/core/chrome/inspector_page.py | 1 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
from telemetry.core import util
class InspectorPage(object):
def __init__(self, inspector_backend):
self._inspector_backend = inspector_backend
self._inspector_backend.RegisterDomain(
'Page',
self._OnNotification,
self._OnClose)
self._navigation_pending = False
def _OnNotification(self, msg):
logging.debug('Notification: %s', json.dumps(msg, indent=2))
if msg['method'] == 'Page.frameNavigated' and self._navigation_pending:
url = msg['params']['frame']['url']
if not url == 'chrome://newtab/' and not url == 'about:blank':
# Marks the navigation as complete and unblocks the
# PerformActionAndWaitForNavigate call.
self._navigation_pending = False
def _OnClose(self):
pass
def PerformActionAndWaitForNavigate(self, action_function, timeout=60):
"""Executes action_function, and waits for the navigation to complete.
action_function is expect to result in a navigation. This function returns
when the navigation is complete or when the timeout has been exceeded.
"""
# Turn on notifications. We need them to get the Page.frameNavigated event.
request = {
'method': 'Page.enable'
}
res = self._inspector_backend.SyncRequest(request, timeout)
assert len(res['result'].keys()) == 0
def DisablePageNotifications():
request = {
'method': 'Page.disable'
}
res = self._inspector_backend.SyncRequest(request, timeout)
assert len(res['result'].keys()) == 0
self._navigation_pending = True
try:
action_function()
except:
DisablePageNotifications()
raise
def IsNavigationDone(time_left):
self._inspector_backend.DispatchNotifications(time_left)
return not self._navigation_pending
util.WaitFor(IsNavigationDone, timeout, pass_time_left_to_func=True)
DisablePageNotifications()
def Navigate(self, url, script_to_evaluate_on_commit=None, timeout=60):
"""Navigates to |url|.
If |script_to_evaluate_on_commit| is given, the script source string will be
evaluated when the navigation is committed. This is after the context of
the page exists, but before any script on the page itself has executed.
"""
def DoNavigate():
# Navigate the page. However, there seems to be a bug in chrome devtools
# protocol where the request id for this event gets held on the browser
# side pretty much indefinitely.
#
# So, instead of waiting for the event to actually complete, wait for the
# Page.frameNavigated event.
request = {
'method': 'Page.navigate',
'params': {
'url': url,
}
}
self._inspector_backend.SendAndIgnoreResponse(request)
if script_to_evaluate_on_commit:
request = {
'method': 'Page.addScriptToEvaluateOnLoad',
'params': {
'scriptSource': script_to_evaluate_on_commit,
}
}
self._inspector_backend.SendAndIgnoreResponse(request)
self.PerformActionAndWaitForNavigate(DoNavigate, timeout)
def GetCookieByName(self, name, timeout=60):
"""Returns the value of the cookie by the given |name|."""
request = {
'method': 'Page.getCookies'
}
res = self._inspector_backend.SyncRequest(request, timeout)
cookies = res['result']['cookies']
for cookie in cookies:
if cookie['name'] == name:
return cookie['value']
return None
|
tangfeixiong/nova | refs/heads/stable/juno | nova/compute/resources/base.py | 95 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Resource(object):
"""This base class defines the interface used for compute resource
plugins. It is not necessary to use this base class, but all compute
resource plugins must implement the abstract methods found here.
An instance of the plugin object is instantiated when it is loaded
by calling __init__() with no parameters.
"""
@abc.abstractmethod
def reset(self, resources, driver):
"""Set the resource to an initial state based on the resource
view discovered from the hypervisor.
"""
pass
@abc.abstractmethod
def test(self, usage, limits):
"""Test to see if we have sufficient resources to allocate for
an instance with the given resource usage.
:param usage: the resource usage of the instances
:param limits: limits to apply
:returns: None if the test passes or a string describing the reason
why the test failed
"""
pass
@abc.abstractmethod
def add_instance(self, usage):
"""Update resource information adding allocation according to the
given resource usage.
:param usage: the resource usage of the instance being added
:returns: None
"""
pass
@abc.abstractmethod
def remove_instance(self, usage):
"""Update resource information removing allocation according to the
given resource usage.
:param usage: the resource usage of the instance being removed
:returns: None
"""
pass
@abc.abstractmethod
def write(self, resources):
"""Write resource data to populate resources.
:param resources: the resources data to be populated
:returns: None
"""
pass
@abc.abstractmethod
def report_free(self):
"""Log free resources.
This method logs how much free resource is held by
the resource plugin.
:returns: None
"""
pass
|
divio/django | refs/heads/master | tests/schema/tests.py | 14 | import datetime
import itertools
import unittest
from copy import copy
from django.db import (
DatabaseError, IntegrityError, OperationalError, connection,
)
from django.db.models import Model
from django.db.models.fields import (
AutoField, BigIntegerField, BinaryField, BooleanField, CharField,
DateField, DateTimeField, IntegerField, PositiveIntegerField, SlugField,
TextField, TimeField,
)
from django.db.models.fields.related import (
ForeignKey, ManyToManyField, OneToOneField,
)
from django.db.transaction import atomic
from django.test import TransactionTestCase, skipIfDBFeature
from .fields import (
CustomManyToManyField, InheritedManyToManyField, MediumBlobField,
)
from .models import (
Author, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book, BookWeak,
BookWithLongName, BookWithO2O, BookWithoutAuthor, BookWithSlug, IntegerPK,
Note, NoteRename, Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing,
UniqueTest, new_apps,
)
class SchemaTests(TransactionTestCase):
"""
Tests that the schema-alteration code works correctly.
Be aware that these tests are more liable than most to false results,
as sometimes the code to check if a test has worked is almost as complex
as the code it is testing.
"""
available_apps = []
models = [
Author, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book,
BookWeak, BookWithLongName, BookWithO2O, BookWithSlug, IntegerPK, Note,
Tag, TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest,
]
# Utility functions
def setUp(self):
# local_models should contain test dependent model classes that will be
# automatically removed from the app cache on test tear down.
self.local_models = []
def tearDown(self):
# Delete any tables made for our models
self.delete_tables()
new_apps.clear_cache()
for model in new_apps.get_models():
model._meta._expire_cache()
if 'schema' in new_apps.all_models:
for model in self.local_models:
del new_apps.all_models['schema'][model._meta.model_name]
def delete_tables(self):
"Deletes all model tables for our models for a clean test environment"
converter = connection.introspection.table_name_converter
with connection.cursor() as cursor:
connection.disable_constraint_checking()
table_names = connection.introspection.table_names(cursor)
for model in itertools.chain(SchemaTests.models, self.local_models):
# Remove any M2M tables first
for field in model._meta.local_many_to_many:
with atomic():
tbl = converter(field.remote_field.through._meta.db_table)
if tbl in table_names:
cursor.execute(connection.schema_editor().sql_delete_table % {
"table": connection.ops.quote_name(tbl),
})
table_names.remove(tbl)
# Then remove the main tables
with atomic():
tbl = converter(model._meta.db_table)
if tbl in table_names:
cursor.execute(connection.schema_editor().sql_delete_table % {
"table": connection.ops.quote_name(tbl),
})
table_names.remove(tbl)
connection.enable_constraint_checking()
def column_classes(self, model):
with connection.cursor() as cursor:
columns = {
d[0]: (connection.introspection.get_field_type(d[1], d), d)
for d in connection.introspection.get_table_description(
cursor,
model._meta.db_table,
)
}
# SQLite has a different format for field_type
for name, (type, desc) in columns.items():
if isinstance(type, tuple):
columns[name] = (type[0], desc)
# SQLite also doesn't error properly
if not columns:
raise DatabaseError("Table does not exist (empty pragma)")
return columns
def get_indexes(self, table):
"""
Get the indexes on the table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_indexes(cursor, table)
def get_constraints(self, table):
"""
Get the constraints on a table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
# Tests
def test_creation_deletion(self):
"""
Tries creating a model's table, and then deleting it.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Check that it's there
list(Author.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Author)
# Check that it's gone
self.assertRaises(
DatabaseError,
lambda: list(Author.objects.all()),
)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_fk(self):
"Tests that creating tables out of FK order, then repointing, works"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Book)
editor.create_model(Author)
editor.create_model(Tag)
# Check that initial tables are there
list(Author.objects.all())
list(Book.objects.all())
# Make sure the FK constraint is present
with self.assertRaises(IntegrityError):
Book.objects.create(
author_id=1,
title="Much Ado About Foreign Keys",
pub_date=datetime.datetime.now(),
)
# Repoint the FK constraint
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Tag)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Make sure the new FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_tag', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_fk_db_constraint(self):
"Tests that the db_constraint parameter is respected"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(Author)
editor.create_model(BookWeak)
# Check that initial tables are there
list(Author.objects.all())
list(Tag.objects.all())
list(BookWeak.objects.all())
# Check that BookWeak doesn't have an FK constraint
constraints = self.get_constraints(BookWeak._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.fail("FK constraint for author_id found")
# Make a db_constraint=False FK
new_field = ForeignKey(Tag, db_constraint=False)
new_field.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Make sure no FK constraint is present
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["tag_id"] and details['foreign_key']:
self.fail("FK constraint for tag_id found")
# Alter to one with a constraint
new_field2 = ForeignKey(Tag)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
# Make sure the new FK constraint is present
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["tag_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_tag', 'id'))
break
else:
self.fail("No FK constraint for tag_id found")
# Alter to one without a constraint again
new_field2 = ForeignKey(Tag)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field2, new_field, strict=True)
# Make sure no FK constraint is present
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["tag_id"] and details['foreign_key']:
self.fail("FK constraint for tag_id found")
def _test_m2m_db_constraint(self, M2MFieldClass):
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(LocalAuthorWithM2M)
# Check that initial tables are there
list(LocalAuthorWithM2M.objects.all())
list(Tag.objects.all())
# Make a db_constraint=False FK
new_field = M2MFieldClass(Tag, related_name="authors", db_constraint=False)
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
# Make sure no FK constraint is present
constraints = self.get_constraints(new_field.remote_field.through._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["tag_id"] and details['foreign_key']:
self.fail("FK constraint for tag_id found")
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_m2m_db_constraint(self):
self._test_m2m_db_constraint(ManyToManyField)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_m2m_db_constraint_custom(self):
self._test_m2m_db_constraint(CustomManyToManyField)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_m2m_db_constraint_inherited(self):
self._test_m2m_db_constraint(InheritedManyToManyField)
def test_add_field(self):
"""
Tests adding fields to models
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add the new field
new_field = IntegerField(null=True)
new_field.set_attributes_from_name("age")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['age'][0], "IntegerField")
self.assertEqual(columns['age'][1][6], True)
def test_add_field_temp_default(self):
"""
Tests adding fields to models with a temporary default
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = CharField(max_length=30, default="Godwin")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['surname'][0], "CharField")
self.assertEqual(columns['surname'][1][6],
connection.features.interprets_empty_strings_as_nulls)
def test_add_field_temp_default_boolean(self):
"""
Tests adding fields to models with a temporary default where
the default is False. (#21783)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = BooleanField(default=False)
new_field.set_attributes_from_name("awesome")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
# BooleanField are stored as TINYINT(1) on MySQL.
field_type = columns['awesome'][0]
self.assertEqual(field_type, connection.features.introspected_boolean_field_type(new_field, created_separately=True))
def test_add_field_default_transform(self):
"""
Tests adding fields to models with a default that is not directly
valid in the database (#22581)
"""
class TestTransformField(IntegerField):
# Weird field that saves the count of items in its value
def get_default(self):
return self.default
def get_prep_value(self, value):
if value is None:
return 0
return len(value)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add the field with a default it needs to cast (to string in this case)
new_field = TestTransformField(default={1: 2})
new_field.set_attributes_from_name("thing")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is there
columns = self.column_classes(Author)
field_type, field_info = columns['thing']
self.assertEqual(field_type, 'IntegerField')
# Make sure the values were transformed correctly
self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2)
def test_add_field_binary(self):
"""
Tests binary fields get a sane default (#22851)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field
new_field = BinaryField(blank=True)
new_field.set_attributes_from_name("bits")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
# MySQL annoyingly uses the same backend, so it'll come back as one of
# these two types.
self.assertIn(columns['bits'][0], ("BinaryField", "TextField"))
@unittest.skipUnless(connection.vendor == 'mysql', "MySQL specific")
def test_add_binaryfield_mediumblob(self):
"""
Test adding a custom-sized binary field on MySQL (#24846).
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field with default
new_field = MediumBlobField(blank=True, default=b'123')
new_field.set_attributes_from_name('bits')
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
columns = self.column_classes(Author)
# Introspection treats BLOBs as TextFields
self.assertEqual(columns['bits'][0], "TextField")
def test_alter(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls))
# Alter the name field to a TextField
old_field = Author._meta.get_field("name")
new_field = TextField(null=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "TextField")
self.assertEqual(columns['name'][1][6], True)
# Change nullability again
new_field2 = TextField(null=False)
new_field2.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "TextField")
self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls))
def test_alter_text_field(self):
# Regression for "BLOB/TEXT column 'info' can't have a default value")
# on MySQL.
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("info")
new_field = TextField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
def test_alter_text_field_to_date_field(self):
"""
#25002 - Test conversion of text field to date field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info='1988-05-05')
old_field = Note._meta.get_field('info')
new_field = DateField(blank=True)
new_field.set_attributes_from_name('info')
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns['info'][1][6])
def test_alter_text_field_to_datetime_field(self):
"""
#25002 - Test conversion of text field to datetime field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info='1988-05-05 3:16:17.4567')
old_field = Note._meta.get_field('info')
new_field = DateTimeField(blank=True)
new_field.set_attributes_from_name('info')
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns['info'][1][6])
def test_alter_text_field_to_time_field(self):
"""
#25002 - Test conversion of text field to time field.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
Note.objects.create(info='3:16:17.4567')
old_field = Note._meta.get_field('info')
new_field = TimeField(blank=True)
new_field.set_attributes_from_name('info')
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
# Make sure the field isn't nullable
columns = self.column_classes(Note)
self.assertFalse(columns['info'][1][6])
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_alter_textual_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = CharField(max_length=50)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
def test_alter_numeric_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug='aaa')
old_field = UniqueTest._meta.get_field("year")
new_field = BigIntegerField()
new_field.set_attributes_from_name("year")
with connection.schema_editor() as editor:
editor.alter_field(UniqueTest, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug='bbb')
def test_alter_null_to_not_null(self):
"""
#23609 - Tests handling of default values when altering from NULL to NOT NULL.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertTrue(columns['height'][1][6])
# Create some test data
Author.objects.create(name='Not null author', height=12)
Author.objects.create(name='Null author')
# Verify null value
self.assertEqual(Author.objects.get(name='Not null author').height, 12)
self.assertIsNone(Author.objects.get(name='Null author').height)
# Alter the height field to NOT NULL with default
old_field = Author._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertFalse(columns['height'][1][6])
# Verify default value
self.assertEqual(Author.objects.get(name='Not null author').height, 12)
self.assertEqual(Author.objects.get(name='Null author').height, 42)
def test_alter_charfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_null when changing a CharField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change the CharField to null
old_field = Author._meta.get_field('name')
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field)
def test_alter_textfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_null when changing a TextField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
# Change the TextField to null
old_field = Note._meta.get_field('info')
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field)
@unittest.skipUnless(connection.features.supports_combined_alters, "No combined ALTER support")
def test_alter_null_to_not_null_keeping_default(self):
"""
#23738 - Can change a nullable field with default to non-nullable
with the same default.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithDefaultHeight)
# Ensure the field is right to begin with
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertTrue(columns['height'][1][6])
# Alter the height field to NOT NULL keeping the previous default
old_field = AuthorWithDefaultHeight._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(AuthorWithDefaultHeight, old_field, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertFalse(columns['height'][1][6])
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_alter_fk(self):
"""
Tests altering of FKs
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Make sure the FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_author', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
# Alter the FK
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Author, editable=False)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Make sure the FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_author', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_alter_to_fk(self):
"""
#24447 - Tests adding a FK constraint for an existing column
"""
class LocalBook(Model):
author = IntegerField()
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalBook]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBook)
# Ensure no FK constraint exists
constraints = self.get_constraints(LocalBook._meta.db_table)
for name, details in constraints.items():
if details['foreign_key']:
self.fail('Found an unexpected FK constraint to %s' % details['columns'])
old_field = LocalBook._meta.get_field("author")
new_field = ForeignKey(Author)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(LocalBook, old_field, new_field, strict=True)
constraints = self.get_constraints(LocalBook._meta.db_table)
# Ensure FK constraint exists
for name, details in constraints.items():
if details['foreign_key'] and details['columns'] == ["author_id"]:
self.assertEqual(details['foreign_key'], ('schema_author', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_alter_o2o_to_fk(self):
"""
#24163 - Tests altering of OneToOneField to ForeignKey
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
# Ensure the field is right to begin with
columns = self.column_classes(BookWithO2O)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is unique
author = Author.objects.create(name="Joe")
BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
BookWithO2O.objects.all().delete()
# Make sure the FK constraint is present
constraints = self.get_constraints(BookWithO2O._meta.db_table)
author_is_fk = False
for name, details in constraints.items():
if details['columns'] == ['author_id']:
if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'):
author_is_fk = True
self.assertTrue(author_is_fk, "No FK constraint for author_id found")
# Alter the OneToOneField to ForeignKey
old_field = BookWithO2O._meta.get_field("author")
new_field = ForeignKey(Author)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is not unique anymore
Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
# Make sure the FK constraint is still present
constraints = self.get_constraints(Book._meta.db_table)
author_is_fk = False
for name, details in constraints.items():
if details['columns'] == ['author_id']:
if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'):
author_is_fk = True
self.assertTrue(author_is_fk, "No FK constraint for author_id found")
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_alter_fk_to_o2o(self):
"""
#24163 - Tests altering of ForeignKey to OneToOneField
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is not unique
author = Author.objects.create(name="Joe")
Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
Book.objects.all().delete()
# Make sure the FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
author_is_fk = False
for name, details in constraints.items():
if details['columns'] == ['author_id']:
if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'):
author_is_fk = True
self.assertTrue(author_is_fk, "No FK constraint for author_id found")
# Alter the ForeignKey to OneToOneField
old_field = Book._meta.get_field("author")
new_field = OneToOneField(Author)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(BookWithO2O)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is unique now
BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
# Make sure the FK constraint is present
constraints = self.get_constraints(BookWithO2O._meta.db_table)
author_is_fk = False
for name, details in constraints.items():
if details['columns'] == ['author_id']:
if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'):
author_is_fk = True
self.assertTrue(author_is_fk, "No FK constraint for author_id found")
def test_alter_implicit_id_to_explicit(self):
"""
Should be able to convert an implicit "id" field to an explicit "id"
primary key field.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = IntegerField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# This will fail if DROP DEFAULT is inadvertently executed on this
# field which drops the id sequence, at least on PostgreSQL.
Author.objects.create(name='Foo')
def test_alter_int_pk_to_autofield_pk(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
AutoField(primary_key=True).
"""
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
old_field = IntegerPK._meta.get_field('i')
new_field = AutoField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name('i')
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
def test_alter_int_pk_to_int_unique(self):
"""
Should be able to rename an IntegerField(primary_key=True) to
IntegerField(unique=True).
"""
class IntegerUnique(Model):
i = IntegerField(unique=True)
j = IntegerField(primary_key=True)
class Meta:
app_label = 'schema'
apps = new_apps
db_table = 'INTEGERPK'
with connection.schema_editor() as editor:
editor.create_model(IntegerPK)
# model requires a new PK
old_field = IntegerPK._meta.get_field('j')
new_field = IntegerField(primary_key=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name('j')
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
old_field = IntegerPK._meta.get_field('i')
new_field = IntegerField(unique=True)
new_field.model = IntegerPK
new_field.set_attributes_from_name('i')
with connection.schema_editor() as editor:
editor.alter_field(IntegerPK, old_field, new_field, strict=True)
# Ensure unique constraint works.
IntegerUnique.objects.create(i=1, j=1)
with self.assertRaises(IntegrityError):
IntegerUnique.objects.create(i=1, j=2)
def test_rename(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
self.assertNotIn("display_name", columns)
# Alter the name field's name
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=254)
new_field.set_attributes_from_name("display_name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['display_name'][0], "CharField")
self.assertNotIn("name", columns)
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_rename_keep_null_status(self):
"""
Renaming a field shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = TextField()
new_field.set_attributes_from_name("detail_info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(columns['detail_info'][0], "TextField")
self.assertNotIn("info", columns)
with self.assertRaises(IntegrityError):
NoteRename.objects.create(detail_info=None)
def _test_m2m_create(self, M2MFieldClass):
"""
Tests M2M fields on models during creation
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [
LocalBookWithM2M,
LocalBookWithM2M._meta.get_field('tags').remote_field.through,
]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2M)
# Ensure there is now an m2m table there
columns = self.column_classes(LocalBookWithM2M._meta.get_field("tags").remote_field.through)
self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField")
def test_m2m_create(self):
self._test_m2m_create(ManyToManyField)
def test_m2m_create_custom(self):
self._test_m2m_create(CustomManyToManyField)
def test_m2m_create_inherited(self):
self._test_m2m_create(InheritedManyToManyField)
def _test_m2m_create_through(self, M2MFieldClass):
"""
Tests M2M fields on models during creation with through models
"""
class LocalTagThrough(Model):
book = ForeignKey("schema.LocalBookWithM2MThrough")
tag = ForeignKey("schema.TagM2MTest")
class Meta:
app_label = 'schema'
apps = new_apps
class LocalBookWithM2MThrough(Model):
tags = M2MFieldClass("TagM2MTest", related_name="books", through=LocalTagThrough)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalTagThrough, LocalBookWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalTagThrough)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2MThrough)
# Ensure there is now an m2m table there
columns = self.column_classes(LocalTagThrough)
self.assertEqual(columns['book_id'][0], "IntegerField")
self.assertEqual(columns['tag_id'][0], "IntegerField")
def test_m2m_create_through(self):
self._test_m2m_create_through(ManyToManyField)
def test_m2m_create_through_custom(self):
self._test_m2m_create_through(CustomManyToManyField)
def test_m2m_create_through_inherited(self):
self._test_m2m_create_through(InheritedManyToManyField)
def _test_m2m(self, M2MFieldClass):
"""
Tests adding/removing M2M fields on models
"""
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorWithM2M)
editor.create_model(TagM2MTest)
# Create an M2M field
new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors")
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
self.local_models += [new_field.remote_field.through]
# Ensure there's no m2m table there
self.assertRaises(DatabaseError, self.column_classes, new_field.remote_field.through)
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
# Ensure there is now an m2m table there
columns = self.column_classes(new_field.remote_field.through)
self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField")
# "Alter" the field. This should not rename the DB table to itself.
with connection.schema_editor() as editor:
editor.alter_field(LocalAuthorWithM2M, new_field, new_field)
# Remove the M2M table again
with connection.schema_editor() as editor:
editor.remove_field(LocalAuthorWithM2M, new_field)
# Ensure there's no m2m table there
self.assertRaises(DatabaseError, self.column_classes, new_field.remote_field.through)
def test_m2m(self):
self._test_m2m(ManyToManyField)
def test_m2m_custom(self):
self._test_m2m(CustomManyToManyField)
def test_m2m_inherited(self):
self._test_m2m(InheritedManyToManyField)
def _test_m2m_through_alter(self, M2MFieldClass):
"""
Tests altering M2Ms with explicit through models (should no-op)
"""
class LocalAuthorTag(Model):
author = ForeignKey("schema.LocalAuthorWithM2MThrough")
tag = ForeignKey("schema.TagM2MTest")
class Meta:
app_label = 'schema'
apps = new_apps
class LocalAuthorWithM2MThrough(Model):
name = CharField(max_length=255)
tags = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalAuthorTag, LocalAuthorWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorTag)
editor.create_model(LocalAuthorWithM2MThrough)
editor.create_model(TagM2MTest)
# Ensure the m2m table is there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
# "Alter" the field's blankness. This should not actually do anything.
old_field = LocalAuthorWithM2MThrough._meta.get_field("tags")
new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag)
new_field.contribute_to_class(LocalAuthorWithM2MThrough, "tags")
with connection.schema_editor() as editor:
editor.alter_field(LocalAuthorWithM2MThrough, old_field, new_field)
# Ensure the m2m table is still there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
def test_m2m_through_alter(self):
self._test_m2m_through_alter(ManyToManyField)
def test_m2m_through_alter_custom(self):
self._test_m2m_through_alter(CustomManyToManyField)
def test_m2m_through_alter_inherited(self):
self._test_m2m_through_alter(InheritedManyToManyField)
def _test_m2m_repoint(self, M2MFieldClass):
"""
Tests repointing M2M fields
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [
LocalBookWithM2M,
LocalBookWithM2M._meta.get_field('tags').remote_field.through,
]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBookWithM2M)
editor.create_model(TagM2MTest)
editor.create_model(UniqueTest)
# Ensure the M2M exists and points to TagM2MTest
constraints = self.get_constraints(LocalBookWithM2M._meta.get_field("tags").remote_field.through._meta.db_table)
if connection.features.supports_foreign_keys:
for name, details in constraints.items():
if details['columns'] == ["tagm2mtest_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_tagm2mtest', 'id'))
break
else:
self.fail("No FK constraint for tagm2mtest_id found")
# Repoint the M2M
old_field = LocalBookWithM2M._meta.get_field("tags")
new_field = M2MFieldClass(UniqueTest)
new_field.contribute_to_class(LocalBookWithM2M, "uniques")
self.local_models += [new_field.remote_field.through]
with connection.schema_editor() as editor:
editor.alter_field(LocalBookWithM2M, old_field, new_field)
# Ensure old M2M is gone
self.assertRaises(DatabaseError, self.column_classes, LocalBookWithM2M._meta.get_field("tags").remote_field.through)
# Ensure the new M2M exists and points to UniqueTest
constraints = self.get_constraints(new_field.remote_field.through._meta.db_table)
if connection.features.supports_foreign_keys:
for name, details in constraints.items():
if details['columns'] == ["uniquetest_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_uniquetest', 'id'))
break
else:
self.fail("No FK constraint for uniquetest_id found")
def test_m2m_repoint(self):
self._test_m2m_repoint(ManyToManyField)
def test_m2m_repoint_custom(self):
self._test_m2m_repoint(CustomManyToManyField)
def test_m2m_repoint_inherited(self):
self._test_m2m_repoint(InheritedManyToManyField)
@unittest.skipUnless(connection.features.supports_column_check_constraints, "No check constraints")
def test_check_constraints(self):
"""
Tests creating/deleting CHECK constraints
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the constraint exists
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
break
else:
self.fail("No check constraint for height found")
# Alter the column to remove it
old_field = Author._meta.get_field("height")
new_field = IntegerField(null=True, blank=True)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
self.fail("Check constraint for height found")
# Alter the column to re-add it
new_field2 = Author._meta.get_field("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
break
else:
self.fail("No check constraint for height found")
def test_unique(self):
"""
Tests removing and adding unique constraints to a single column.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the field is unique to begin with
Tag.objects.create(title="foo", slug="foo")
self.assertRaises(IntegrityError, Tag.objects.create, title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be non-unique
old_field = Tag._meta.get_field("slug")
new_field = SlugField(unique=False)
new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, old_field, new_field, strict=True)
# Ensure the field is no longer unique
Tag.objects.create(title="foo", slug="foo")
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be unique
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field, new_field2, strict=True)
# Ensure the field is unique again
Tag.objects.create(title="foo", slug="foo")
self.assertRaises(IntegrityError, Tag.objects.create, title="bar", slug="foo")
Tag.objects.all().delete()
# Rename the field
new_field3 = SlugField(unique=True)
new_field3.set_attributes_from_name("slug2")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field2, new_field3, strict=True)
# Ensure the field is still unique
TagUniqueRename.objects.create(title="foo", slug2="foo")
self.assertRaises(IntegrityError, TagUniqueRename.objects.create, title="bar", slug2="foo")
Tag.objects.all().delete()
def test_unique_together(self):
"""
Tests removing and adding unique_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
# Ensure the fields are unique to begin with
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2011, slug="foo")
UniqueTest.objects.create(year=2011, slug="bar")
self.assertRaises(IntegrityError, UniqueTest.objects.create, year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter the model to its non-unique-together companion
with connection.schema_editor() as editor:
editor.alter_unique_together(UniqueTest, UniqueTest._meta.unique_together, [])
# Ensure the fields are no longer unique
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_unique_together(UniqueTest, [], UniqueTest._meta.unique_together)
# Ensure the fields are unique again
UniqueTest.objects.create(year=2012, slug="foo")
self.assertRaises(IntegrityError, UniqueTest.objects.create, year=2012, slug="foo")
UniqueTest.objects.all().delete()
def test_unique_together_with_fk(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [['author', 'title']])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [['author', 'title']], [])
def test_unique_together_with_fk_with_existing_index(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key, where the foreign key is added after the model is
created.
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithoutAuthor)
new_field = ForeignKey(Author)
new_field.set_attributes_from_name('author')
editor.add_field(BookWithoutAuthor, new_field)
# Ensure the fields aren't unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [['author', 'title']])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [['author', 'title']], [])
def test_index_together(self):
"""
Tests removing and adding index_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure there's no index on the year/slug columns first
self.assertEqual(
False,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
# Alter the model to add an index
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [], [("slug", "title")])
# Ensure there is now an index
self.assertEqual(
True,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [("slug", "title")], [])
# Ensure there's no index
self.assertEqual(
False,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
def test_index_together_with_fk(self):
"""
Tests removing and adding index_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.index_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [], [['author', 'title']])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [['author', 'title']], [])
def test_create_index_together(self):
"""
Tests creating models with index_together already defined
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(TagIndexed)
# Ensure there is an index
self.assertEqual(
True,
any(
c["index"]
for c in self.get_constraints("schema_tagindexed").values()
if c['columns'] == ["slug", "title"]
),
)
def test_db_table(self):
"""
Tests renaming of the table
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the table is there to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
# Alter the table
with connection.schema_editor() as editor:
editor.alter_db_table(Author, "schema_author", "schema_otherauthor")
# Ensure the table is there afterwards
Author._meta.db_table = "schema_otherauthor"
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
# Alter the table again
with connection.schema_editor() as editor:
editor.alter_db_table(Author, "schema_otherauthor", "schema_author")
# Ensure the table is still there
Author._meta.db_table = "schema_author"
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
def test_indexes(self):
"""
Tests creation/altering of indexes
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the table is there and has the right index
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to remove the index
old_field = Book._meta.get_field("title")
new_field = CharField(max_length=100, db_index=False)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the table is there and has no index
self.assertNotIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to re-add the index
new_field2 = Book._meta.get_field("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, new_field, new_field2, strict=True)
# Ensure the table is there and has the index again
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Add a unique column, verify that creates an implicit index
new_field3 = BookWithSlug._meta.get_field("slug")
with connection.schema_editor() as editor:
editor.add_field(Book, new_field3)
self.assertIn(
"slug",
self.get_indexes(Book._meta.db_table),
)
# Remove the unique, check the index goes with it
new_field4 = CharField(max_length=20, unique=False)
new_field4.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(BookWithSlug, new_field3, new_field4, strict=True)
self.assertNotIn(
"slug",
self.get_indexes(Book._meta.db_table),
)
def test_primary_key(self):
"""
Tests altering of the primary key
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the table is there and has the right PK
self.assertTrue(
self.get_indexes(Tag._meta.db_table)['id']['primary_key'],
)
# Alter to change the PK
id_field = Tag._meta.get_field("id")
old_field = Tag._meta.get_field("slug")
new_field = SlugField(primary_key=True)
new_field.set_attributes_from_name("slug")
new_field.model = Tag
with connection.schema_editor() as editor:
editor.remove_field(Tag, id_field)
editor.alter_field(Tag, old_field, new_field)
# Ensure the PK changed
self.assertNotIn(
'id',
self.get_indexes(Tag._meta.db_table),
)
self.assertTrue(
self.get_indexes(Tag._meta.db_table)['slug']['primary_key'],
)
def test_context_manager_exit(self):
"""
Ensures transaction is correctly closed when an error occurs
inside a SchemaEditor context.
"""
class SomeError(Exception):
pass
try:
with connection.schema_editor():
raise SomeError
except SomeError:
self.assertFalse(connection.in_atomic_block)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_foreign_key_index_long_names_regression(self):
"""
Regression test for #21497.
Only affects databases that supports foreign keys.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Find the properly shortened column name
column_name = connection.ops.quote_name("author_foreign_key_with_really_long_field_name_id")
column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase
# Ensure the table is there and has an index on the column
self.assertIn(
column_name,
self.get_indexes(BookWithLongName._meta.db_table),
)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_add_foreign_key_long_names(self):
"""
Regression test for #23009.
Only affects databases that supports foreign keys.
"""
# Create the initial tables
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Add a second FK, this would fail due to long ref name before the fix
new_field = ForeignKey(AuthorWithEvenLongerName, related_name="something")
new_field.set_attributes_from_name("author_other_really_long_named_i_mean_so_long_fk")
with connection.schema_editor() as editor:
editor.add_field(BookWithLongName, new_field)
def test_creation_deletion_reserved_names(self):
"""
Tries creating a model's table, and then deleting it when it has a
SQL reserved name.
"""
# Create the table
with connection.schema_editor() as editor:
try:
editor.create_model(Thing)
except OperationalError as e:
self.fail("Errors when applying initial migration for a model "
"with a table named after a SQL reserved word: %s" % e)
# Check that it's there
list(Thing.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Thing)
# Check that it's gone
self.assertRaises(
DatabaseError,
lambda: list(Thing.objects.all()),
)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_remove_constraints_capital_letters(self):
"""
#23065 - Constraint names must be quoted if they contain capital letters.
"""
def get_field(*args, **kwargs):
kwargs['db_column'] = "CamelCase"
field = kwargs.pop('field_class', IntegerField)(*args, **kwargs)
field.set_attributes_from_name("CamelCase")
return field
model = Author
field = get_field()
table = model._meta.db_table
column = field.column
with connection.schema_editor() as editor:
editor.create_model(model)
editor.add_field(model, field)
editor.execute(
editor.sql_create_index % {
"table": editor.quote_name(table),
"name": editor.quote_name("CamelCaseIndex"),
"columns": editor.quote_name(column),
"extra": "",
}
)
editor.alter_field(model, get_field(db_index=True), field)
editor.execute(
editor.sql_create_unique % {
"table": editor.quote_name(table),
"name": editor.quote_name("CamelCaseUniqConstraint"),
"columns": editor.quote_name(field.column),
}
)
editor.alter_field(model, get_field(unique=True), field)
editor.execute(
editor.sql_create_fk % {
"table": editor.quote_name(table),
"name": editor.quote_name("CamelCaseFKConstraint"),
"column": editor.quote_name(column),
"to_table": editor.quote_name(table),
"to_column": editor.quote_name(model._meta.auto_field.column),
}
)
editor.alter_field(model, get_field(Author, field_class=ForeignKey), field)
def test_add_field_use_effective_default(self):
"""
#23987 - effective_default() should be used as the field default when
adding a new field.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name='Anonymous1')
# Add new CharField to ensure default will be used from effective_default
new_field = CharField(max_length=15, blank=True)
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(item[0], None if connection.features.interprets_empty_strings_as_nulls else '')
def test_add_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name='Anonymous1')
# Add new CharField with a default
new_field = CharField(max_length=15, blank=True, default='surname default')
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(item[0], 'surname default')
# And that the default is no longer set in the database.
field = next(
f for f in connection.introspection.get_table_description(cursor, "schema_author")
if f.name == "surname"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
def test_alter_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Create a row
Author.objects.create(name='Anonymous1')
self.assertEqual(Author.objects.get().height, None)
old_field = Author._meta.get_field('height')
# The default from the new field is used in updating existing rows.
new_field = IntegerField(blank=True, default=42)
new_field.set_attributes_from_name('height')
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field)
self.assertEqual(Author.objects.get().height, 42)
# The database default should be removed.
with connection.cursor() as cursor:
field = next(
f for f in connection.introspection.get_table_description(cursor, "schema_author")
if f.name == "height"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
|
hujiajie/chromium-crosswalk | refs/heads/master | build/android/gyp/create_device_library_links.py | 18 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates symlinks to native libraries for an APK.
The native libraries should have previously been pushed to the device (in
options.target_dir). This script then creates links in an apk's lib/ folder to
those native libraries.
"""
import optparse
import os
import sys
from util import build_device
from util import build_utils
BUILD_ANDROID_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(BUILD_ANDROID_DIR)
import devil_chromium
from devil.android import apk_helper
from pylib import constants
def RunShellCommand(device, cmd):
output = device.RunShellCommand(cmd, check_return=True)
if output:
raise Exception(
'Unexpected output running command: ' + cmd + '\n' +
'\n'.join(output))
def CreateSymlinkScript(options):
libraries = build_utils.ParseGypList(options.libraries)
link_cmd = (
'rm $APK_LIBRARIES_DIR/%(lib_basename)s > /dev/null 2>&1 \n'
'ln -s $STRIPPED_LIBRARIES_DIR/%(lib_basename)s '
'$APK_LIBRARIES_DIR/%(lib_basename)s \n'
)
script = '#!/bin/sh \n'
for lib in libraries:
script += link_cmd % { 'lib_basename': lib }
with open(options.script_host_path, 'w') as scriptfile:
scriptfile.write(script)
def TriggerSymlinkScript(options):
device = build_device.GetBuildDeviceFromPath(
options.build_device_configuration)
if not device:
return
apk_package = apk_helper.GetPackageName(options.apk)
apk_libraries_dir = '/data/data/%s/lib' % apk_package
device_dir = os.path.dirname(options.script_device_path)
mkdir_cmd = ('if [ ! -e %(dir)s ]; then mkdir -p %(dir)s; fi ' %
{ 'dir': device_dir })
RunShellCommand(device, mkdir_cmd)
device.PushChangedFiles([(os.path.abspath(options.script_host_path),
options.script_device_path)])
trigger_cmd = (
'APK_LIBRARIES_DIR=%(apk_libraries_dir)s; '
'STRIPPED_LIBRARIES_DIR=%(target_dir)s; '
'. %(script_device_path)s'
) % {
'apk_libraries_dir': apk_libraries_dir,
'target_dir': options.target_dir,
'script_device_path': options.script_device_path
}
RunShellCommand(device, trigger_cmd)
def main(args):
args = build_utils.ExpandFileArgs(args)
parser = optparse.OptionParser()
parser.add_option('--apk', help='Path to the apk.')
parser.add_option('--script-host-path',
help='Path on the host for the symlink script.')
parser.add_option('--script-device-path',
help='Path on the device to push the created symlink script.')
parser.add_option('--libraries',
help='List of native libraries.')
parser.add_option('--target-dir',
help='Device directory that contains the target libraries for symlinks.')
parser.add_option('--stamp', help='Path to touch on success.')
parser.add_option('--build-device-configuration',
help='Path to build device configuration.')
parser.add_option('--configuration-name',
help='The build CONFIGURATION_NAME')
parser.add_option('--output-directory',
help='The output directory')
options, _ = parser.parse_args(args)
required_options = ['apk', 'libraries', 'script_host_path',
'script_device_path', 'target_dir', 'configuration_name']
build_utils.CheckOptions(options, parser, required=required_options)
constants.SetBuildType(options.configuration_name)
devil_chromium.Initialize(
output_directory=os.path.abspath(options.output_directory))
CreateSymlinkScript(options)
TriggerSymlinkScript(options)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
gandarez/wakatime | refs/heads/master | wakatime/packages/pygments_py2/pygments/console.py | 135 | # -*- coding: utf-8 -*-
"""
pygments.console
~~~~~~~~~~~~~~~~
Format colored console output.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
esc = "\x1b["
codes = {}
codes[""] = ""
codes["reset"] = esc + "39;49;00m"
codes["bold"] = esc + "01m"
codes["faint"] = esc + "02m"
codes["standout"] = esc + "03m"
codes["underline"] = esc + "04m"
codes["blink"] = esc + "05m"
codes["overline"] = esc + "06m"
dark_colors = ["black", "darkred", "darkgreen", "brown", "darkblue",
"purple", "teal", "lightgray"]
light_colors = ["darkgray", "red", "green", "yellow", "blue",
"fuchsia", "turquoise", "white"]
x = 30
for d, l in zip(dark_colors, light_colors):
codes[d] = esc + "%im" % x
codes[l] = esc + "%i;01m" % x
x += 1
del d, l, x
codes["darkteal"] = codes["turquoise"]
codes["darkyellow"] = codes["brown"]
codes["fuscia"] = codes["fuchsia"]
codes["white"] = codes["bold"]
def reset_color():
return codes["reset"]
def colorize(color_key, text):
return codes[color_key] + text + codes["reset"]
def ansiformat(attr, text):
"""
Format ``text`` with a color and/or some attributes::
color normal color
*color* bold color
_color_ underlined color
+color+ blinking color
"""
result = []
if attr[:1] == attr[-1:] == '+':
result.append(codes['blink'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '*':
result.append(codes['bold'])
attr = attr[1:-1]
if attr[:1] == attr[-1:] == '_':
result.append(codes['underline'])
attr = attr[1:-1]
result.append(codes[attr])
result.append(text)
result.append(codes['reset'])
return ''.join(result)
|
dorotan/pythontraining | refs/heads/master | test/test_modify_group.py | 1 | __author__ = 'dorota'
# -*- coding: utf-8 -*-
from model.group import Group
from random import randrange
def test_modify_group_name(app, db):
if app.group.count() == 0:
app.group.create()
old_groups = db.get_group_list()
index = randrange(len(old_groups))
group = Group(name="test")
group.id = old_groups[index].id
app.group.modify_group_by_index(index, group)
assert len(old_groups) == app.group.count()
new_groups = db.get_group_list()
old_groups[index] = group
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
#def test_modify_group_header(app):
# old_groups = app.group.get_group_list()
# if app.group.count() == 0:
# app.group.create(Group(name = "test"))
# app.group.modify_first_group((Group(header="New header")))
# new_groups = app.group.get_group_list()
# assert len(old_groups) == len(new_groups)
#def test_modify_group_footer(app):
# old_groups = app.group.get_group_list()
# if app.group.count() == 0:
# app.group.create(Group(name = "test"))
# app.group.modify_first_group((Group(footer="New footer")))
# new_groups = app.group.get_group_list()
# assert len(old_groups) == len(new_groups) |
gauribhoite/personfinder | refs/heads/master | env/site-packages/django/core/handlers/base.py | 92 | from __future__ import unicode_literals
import logging
import sys
import types
from django import http
from django.conf import settings
from django.core import signals, urlresolvers
from django.core.exceptions import (
MiddlewareNotUsed, PermissionDenied, SuspiciousOperation,
)
from django.db import connections, transaction
from django.http.multipartparser import MultiPartParserError
from django.utils import six
from django.utils.encoding import force_text
from django.utils.module_loading import import_string
from django.views import debug
logger = logging.getLogger('django.request')
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.fix_location_header,
http.conditional_content_removal,
]
def __init__(self):
self._request_middleware = None
self._view_middleware = None
self._template_response_middleware = None
self._response_middleware = None
self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__ in subclasses).
"""
self._view_middleware = []
self._template_response_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
mw_class = import_string(middleware_path)
try:
mw_instance = mw_class()
except MiddlewareNotUsed as exc:
if settings.DEBUG:
if six.text_type(exc):
logger.debug('MiddlewareNotUsed(%r): %s', middleware_path, exc)
else:
logger.debug('MiddlewareNotUsed: %r', middleware_path)
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.insert(0, mw_instance.process_template_response)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def make_view_atomic(self, view):
non_atomic_requests = getattr(view, '_non_atomic_requests', set())
for db in connections.all():
if (db.settings_dict['ATOMIC_REQUESTS']
and db.alias not in non_atomic_requests):
view = transaction.atomic(using=db.alias)(view)
return view
def get_exception_response(self, request, resolver, status_code):
try:
callback, param_dict = resolver.resolve_error_handler(status_code)
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
return response
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
# Setup default url resolver for this thread, this code is outside
# the try/except so we don't get a spurious "unbound local
# variable" exception in the event an exception is raised before
# resolver is set
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
try:
response = None
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if response is None:
if hasattr(request, 'urlconf'):
# Reset url resolver with a custom urlconf.
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
resolver_match = resolver.resolve(request.path_info)
callback, callback_args, callback_kwargs = resolver_match
request.resolver_match = resolver_match
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if response is None:
wrapped_callback = self.make_view_atomic(callback)
try:
response = wrapped_callback(request, *callback_args, **callback_kwargs)
except Exception as e:
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
# response, use that. Otherwise, reraise the exception.
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
break
if response is None:
raise
# Complain if the view returned None (a common error).
if response is None:
if isinstance(callback, types.FunctionType): # FBV
view_name = callback.__name__
else: # CBV
view_name = callback.__class__.__name__ + '.__call__'
raise ValueError("The view %s.%s didn't return an HttpResponse object. It returned None instead."
% (callback.__module__, view_name))
# If the response supports deferred rendering, apply template
# response middleware and then render the response
if hasattr(response, 'render') and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
# Complain if the template response middleware returned None (a common error).
if response is None:
raise ValueError(
"%s.process_template_response didn't return an "
"HttpResponse object. It returned None instead."
% (middleware_method.__self__.__class__.__name__))
response = response.render()
except http.Http404 as e:
logger.warning('Not Found: %s', request.path,
extra={
'status_code': 404,
'request': request
})
if settings.DEBUG:
response = debug.technical_404_response(request, e)
else:
response = self.get_exception_response(request, resolver, 404)
except PermissionDenied:
logger.warning(
'Forbidden (Permission denied): %s', request.path,
extra={
'status_code': 403,
'request': request
})
response = self.get_exception_response(request, resolver, 403)
except MultiPartParserError:
logger.warning(
'Bad request (Unable to parse request body): %s', request.path,
extra={
'status_code': 400,
'request': request
})
response = self.get_exception_response(request, resolver, 400)
except SuspiciousOperation as e:
# The request logger receives events for any problematic request
# The security logger receives events for all SuspiciousOperations
security_logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
security_logger.error(
force_text(e),
extra={
'status_code': 400,
'request': request
})
if settings.DEBUG:
return debug.technical_500_response(request, *sys.exc_info(), status_code=400)
response = self.get_exception_response(request, resolver, 400)
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except: # Handle everything else.
# Get the exception info now, in case another exception is thrown later.
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
try:
# Apply response middleware, regardless of the response
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
# Complain if the response middleware returned None (a common error).
if response is None:
raise ValueError(
"%s.process_response didn't return an "
"HttpResponse object. It returned None instead."
% (middleware_method.__self__.__class__.__name__))
response = self.apply_response_fixes(request, response)
except: # Any exception should be gathered and handled
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
response._closable_objects.append(request)
return response
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
logger.error('Internal Server Error: %s', request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request': request
}
)
if settings.DEBUG:
return debug.technical_500_response(request, *exc_info)
# If Http500 handler is not installed, re-raise last exception
if resolver.urlconf_module is None:
six.reraise(*exc_info)
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve_error_handler(500)
return callback(request, **param_dict)
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response
|
da1z/intellij-community | refs/heads/master | python/testData/create_tests/create_tst.py | 27 | class Spam:
def eggs(self):
pass
def eggs_and_ham(self):
pass |
mbrukman/mapnik | refs/heads/master | scons/scons-local-2.3.6/SCons/Tool/packaging/__init__.py | 4 | """SCons.Tool.Packaging
SCons Packaging Tool.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/packaging/__init__.py rel_2.3.5:3347:d31d5a4e74b6 2015/07/31 14:36:10 bdbaddog"
import SCons.Environment
from SCons.Variables import *
from SCons.Errors import *
from SCons.Util import is_List, make_path_relative
from SCons.Warnings import warn, Warning
import os, imp
import SCons.Defaults
__all__ = [ 'src_targz', 'src_tarbz2', 'src_zip', 'tarbz2', 'targz', 'zip', 'rpm', 'msi', 'ipk' ]
#
# Utility and Builder function
#
def Tag(env, target, source, *more_tags, **kw_tags):
""" Tag a file with the given arguments, just sets the accordingly named
attribute on the file object.
TODO: FIXME
"""
if not target:
target=source
first_tag=None
else:
first_tag=source
if first_tag:
kw_tags[first_tag[0]] = ''
if len(kw_tags) == 0 and len(more_tags) == 0:
raise UserError("No tags given.")
# XXX: sanity checks
for x in more_tags:
kw_tags[x] = ''
if not SCons.Util.is_List(target):
target=[target]
else:
# hmm, sometimes the target list, is a list of a list
# make sure it is flattened prior to processing.
# TODO: perhaps some bug ?!?
target=env.Flatten(target)
for t in target:
for (k,v) in kw_tags.items():
# all file tags have to start with PACKAGING_, so we can later
# differentiate between "normal" object attributes and the
# packaging attributes. As the user should not be bothered with
# that, the prefix will be added here if missing.
#if not k.startswith('PACKAGING_'):
if k[:10] != 'PACKAGING_':
k='PACKAGING_'+k
setattr(t, k, v)
def Package(env, target=None, source=None, **kw):
""" Entry point for the package tool.
"""
# check if we need to find the source files ourself
if not source:
source = env.FindInstalledFiles()
if len(source)==0:
raise UserError("No source for Package() given")
# decide which types of packages shall be built. Can be defined through
# four mechanisms: command line argument, keyword argument,
# environment argument and default selection( zip or tar.gz ) in that
# order.
try: kw['PACKAGETYPE']=env['PACKAGETYPE']
except KeyError: pass
if not kw.get('PACKAGETYPE'):
from SCons.Script import GetOption
kw['PACKAGETYPE'] = GetOption('package_type')
if kw['PACKAGETYPE'] == None:
if 'Tar' in env['BUILDERS']:
kw['PACKAGETYPE']='targz'
elif 'Zip' in env['BUILDERS']:
kw['PACKAGETYPE']='zip'
else:
raise UserError("No type for Package() given")
PACKAGETYPE=kw['PACKAGETYPE']
if not is_List(PACKAGETYPE):
PACKAGETYPE=PACKAGETYPE.split(',')
# load the needed packagers.
def load_packager(type):
try:
file,path,desc=imp.find_module(type, __path__)
return imp.load_module(type, file, path, desc)
except ImportError, e:
raise EnvironmentError("packager %s not available: %s"%(type,str(e)))
packagers=list(map(load_packager, PACKAGETYPE))
# set up targets and the PACKAGEROOT
try:
# fill up the target list with a default target name until the PACKAGETYPE
# list is of the same size as the target list.
if not target: target = []
size_diff = len(PACKAGETYPE)-len(target)
default_name = "%(NAME)s-%(VERSION)s"
if size_diff>0:
default_target = default_name%kw
target.extend( [default_target]*size_diff )
if 'PACKAGEROOT' not in kw:
kw['PACKAGEROOT'] = default_name%kw
except KeyError, e:
raise SCons.Errors.UserError( "Missing Packagetag '%s'"%e.args[0] )
# setup the source files
source=env.arg2nodes(source, env.fs.Entry)
# call the packager to setup the dependencies.
targets=[]
try:
for packager in packagers:
t=[target.pop(0)]
t=packager.package(env,t,source, **kw)
targets.extend(t)
assert( len(target) == 0 )
except KeyError, e:
raise SCons.Errors.UserError( "Missing Packagetag '%s' for %s packager"\
% (e.args[0],packager.__name__) )
except TypeError, e:
# this exception means that a needed argument for the packager is
# missing. As our packagers get their "tags" as named function
# arguments we need to find out which one is missing.
from inspect import getargspec
args,varargs,varkw,defaults=getargspec(packager.package)
if defaults!=None:
args=args[:-len(defaults)] # throw away arguments with default values
args.remove('env')
args.remove('target')
args.remove('source')
# now remove any args for which we have a value in kw.
args=[x for x in args if x not in kw]
if len(args)==0:
raise # must be a different error, so reraise
elif len(args)==1:
raise SCons.Errors.UserError( "Missing Packagetag '%s' for %s packager"\
% (args[0],packager.__name__) )
else:
raise SCons.Errors.UserError( "Missing Packagetags '%s' for %s packager"\
% (", ".join(args),packager.__name__) )
target=env.arg2nodes(target, env.fs.Entry)
targets.extend(env.Alias( 'package', targets ))
return targets
#
# SCons tool initialization functions
#
added = None
def generate(env):
from SCons.Script import AddOption
global added
if not added:
added = 1
AddOption('--package-type',
dest='package_type',
default=None,
type="string",
action="store",
help='The type of package to create.')
try:
env['BUILDERS']['Package']
env['BUILDERS']['Tag']
except KeyError:
env['BUILDERS']['Package'] = Package
env['BUILDERS']['Tag'] = Tag
def exists(env):
return 1
# XXX
def options(opts):
opts.AddVariables(
EnumVariable( 'PACKAGETYPE',
'the type of package to create.',
None, allowed_values=list(map( str, __all__ )),
ignorecase=2
)
)
#
# Internal utility functions
#
def copy_attr(f1, f2):
""" copies the special packaging file attributes from f1 to f2.
"""
#pattrs = [x for x in dir(f1) if not hasattr(f2, x) and\
# x.startswith('PACKAGING_')]
copyit = lambda x: not hasattr(f2, x) and x[:10] == 'PACKAGING_'
pattrs = list(filter(copyit, dir(f1)))
for attr in pattrs:
setattr(f2, attr, getattr(f1, attr))
def putintopackageroot(target, source, env, pkgroot, honor_install_location=1):
""" Uses the CopyAs builder to copy all source files to the directory given
in pkgroot.
If honor_install_location is set and the copied source file has an
PACKAGING_INSTALL_LOCATION attribute, the PACKAGING_INSTALL_LOCATION is
used as the new name of the source file under pkgroot.
The source file will not be copied if it is already under the the pkgroot
directory.
All attributes of the source file will be copied to the new file.
"""
# make sure the packageroot is a Dir object.
if SCons.Util.is_String(pkgroot): pkgroot=env.Dir(pkgroot)
if not SCons.Util.is_List(source): source=[source]
new_source = []
for file in source:
if SCons.Util.is_String(file): file = env.File(file)
if file.is_under(pkgroot):
new_source.append(file)
else:
if hasattr(file, 'PACKAGING_INSTALL_LOCATION') and\
honor_install_location:
new_name=make_path_relative(file.PACKAGING_INSTALL_LOCATION)
else:
new_name=make_path_relative(file.get_path())
new_file=pkgroot.File(new_name)
new_file=env.CopyAs(new_file, file)[0]
copy_attr(file, new_file)
new_source.append(new_file)
return (target, new_source)
def stripinstallbuilder(target, source, env):
""" strips the install builder action from the source list and stores
the final installation location as the "PACKAGING_INSTALL_LOCATION" of
the source of the source file. This effectively removes the final installed
files from the source list while remembering the installation location.
It also warns about files which have no install builder attached.
"""
def has_no_install_location(file):
return not (file.has_builder() and\
hasattr(file.builder, 'name') and\
(file.builder.name=="InstallBuilder" or\
file.builder.name=="InstallAsBuilder"))
if len(list(filter(has_no_install_location, source))):
warn(Warning, "there are files to package which have no\
InstallBuilder attached, this might lead to irreproducible packages")
n_source=[]
for s in source:
if has_no_install_location(s):
n_source.append(s)
else:
for ss in s.sources:
n_source.append(ss)
copy_attr(s, ss)
setattr(ss, 'PACKAGING_INSTALL_LOCATION', s.get_path())
return (target, n_source)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
bmander/dancecontraption | refs/heads/master | django/contrib/localflavor/it/forms.py | 273 | """
IT-specific Form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode
from django.contrib.localflavor.it.util import ssn_check_digit, vat_number_check_digit
import re
class ITZipCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a valid zip code.'),
}
def __init__(self, *args, **kwargs):
super(ITZipCodeField, self).__init__(r'^\d{5}$',
max_length=None, min_length=None, *args, **kwargs)
class ITRegionSelect(Select):
"""
A Select widget that uses a list of IT regions as its choices.
"""
def __init__(self, attrs=None):
from it_region import REGION_CHOICES
super(ITRegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class ITProvinceSelect(Select):
"""
A Select widget that uses a list of IT provinces as its choices.
"""
def __init__(self, attrs=None):
from it_province import PROVINCE_CHOICES
super(ITProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class ITSocialSecurityNumberField(RegexField):
"""
A form field that validates Italian Social Security numbers (codice fiscale).
For reference see http://www.agenziaentrate.it/ and search for
'Informazioni sulla codificazione delle persone fisiche'.
"""
default_error_messages = {
'invalid': _(u'Enter a valid Social Security number.'),
}
def __init__(self, *args, **kwargs):
super(ITSocialSecurityNumberField, self).__init__(r'^\w{3}\s*\w{3}\s*\w{5}\s*\w{5}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self, value):
value = super(ITSocialSecurityNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub('\s', u'', value).upper()
try:
check_digit = ssn_check_digit(value)
except ValueError:
raise ValidationError(self.error_messages['invalid'])
if not value[15] == check_digit:
raise ValidationError(self.error_messages['invalid'])
return value
class ITVatNumberField(Field):
"""
A form field that validates Italian VAT numbers (partita IVA).
"""
default_error_messages = {
'invalid': _(u'Enter a valid VAT number.'),
}
def clean(self, value):
value = super(ITVatNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
try:
vat_number = int(value)
except ValueError:
raise ValidationError(self.error_messages['invalid'])
vat_number = str(vat_number).zfill(11)
check_digit = vat_number_check_digit(vat_number[0:10])
if not vat_number[10] == check_digit:
raise ValidationError(self.error_messages['invalid'])
return smart_unicode(vat_number)
|
sencha/chromium-spacewalk | refs/heads/master | mojo/spy/ui/__init__.py | 42 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from ui import tvcm_stub
|
matthaywardwebdesign/rethinkdb | refs/heads/next | external/v8_3.30.33.16/build/gyp/test/configurations/x64/gyptest-x86.py | 340 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable in three different configurations.
"""
import TestGyp
import sys
formats = ['msvs']
if sys.platform == 'win32':
formats += ['ninja']
test = TestGyp.TestGyp(formats=formats)
test.run_gyp('configurations.gyp')
test.set_configuration('Debug|Win32')
test.build('configurations.gyp', test.ALL)
for machine, suffix in [('14C machine (x86)', ''),
('8664 machine (x64)', '64')]:
output = test.run_dumpbin(
'/headers', test.built_file_path('configurations%s.exe' % suffix))
if machine not in output:
test.fail_test()
test.pass_test()
|
FireballDWF/cloud-custodian | refs/heads/master | tools/c7n_azure/c7n_azure/resources/data_factory.py | 3 | # Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from c7n_azure.resources.arm import ArmResourceManager
from c7n_azure.provider import resources
@resources.register('datafactory')
class DataFactory(ArmResourceManager):
"""Data Factory Resource
:example:
This policy will find all Data Factories with 10 or more failures in pipeline
runs over the last 72 hours
.. code-block:: yaml
policies:
- name: datafactory-dropping-messages
resource: azure.datafactory
filters:
- type: metric
metric: PipelineFailedRuns
op: ge
aggregation: total
threshold: 10
timeframe: 72
"""
class resource_type(ArmResourceManager.resource_type):
doc_groups = ['Analytics']
service = 'azure.mgmt.datafactory'
client = 'DataFactoryManagementClient'
enum_spec = ('factories', 'list', None)
default_report_fields = (
'name',
'location',
'resourceGroup'
)
resource_type = 'Microsoft.DataFactory/factories'
|
HardlyHaki/crits | refs/heads/master | crits/events/urls.py | 17 | from django.conf.urls import patterns
urlpatterns = patterns('crits.events.views',
(r'^details/(?P<eventid>\w+)/$', 'view_event'),
(r'^add/$', 'add_event'),
(r'^search/$', 'event_search'),
(r'^upload/sample/(?P<event_id>\w+)/$', 'upload_sample'),
(r'^remove/(?P<_id>[\S ]+)$', 'remove_event'),
(r'^set_title/(?P<event_id>\w+)/$', 'set_event_title'),
(r'^set_type/(?P<event_id>\w+)/$', 'set_event_type'),
(r'^get_event_types/$', 'get_event_type_dropdown'),
(r'^list/$', 'events_listing'),
(r'^list/(?P<option>\S+)/$', 'events_listing'),
)
|
cflee/voc | refs/heads/master | tests/builtins/test_max.py | 3 | from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class MaxTests(TranspileTestCase):
def test_args(self):
self.assertCodeExecution("""
try:
max()
except TypeError:
print("Threw an error as expected")
# Single integer
try:
max(4)
except TypeError:
print("Threw an error as expected")
# Multiple integers
print(max(1, 5, 2, 4))
# Mixed types
print(max(2, 5.0, True))
print(max(5.0, 2, True))
print(max(True, 5.0, 2))
# String (an iterable)
print(max("Hello World"))
# Multiple strings
print(max("Hello World", "Goodbye World"))
""")
def test_iterable(self):
self.assertCodeExecution("""
# Empty iterable
try:
max([])
except ValueError:
print("Threw an error as expected")
# Single iterable argument
print(max([1, 5, 2, 4]))
# Multiple iterables
print(max([1, 6], [1, 5, 2, 4]))
""")
def test_default(self):
self.assertCodeExecution("""
# Empty iterable
print(max([], default=42))
# Single iterable argument
print(max([1, 5, 2, 4], default=42))
# Multiple iterables
try:
print(max([1, 6], [1, 5, 2, 4], default=42))
except TypeError:
print("Threw an error as expected")
""")
def test_key(self):
self.assertCodeExecution("""
# key applied over args
print(max(51, 42, 33, 24, key=lambda v: v % 10))
# key applied over iterable
print(max([51, 42, 33, 24], key=lambda v: v % 10))
""")
class BuiltinMaxFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["max"]
|
usc-isi/nova | refs/heads/hpc-trunk | nova/exception.py | 1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nova base exception handling.
Includes decorator for re-raising Nova-type exceptions.
SHOULD include dedicated exception logging.
"""
import functools
import itertools
import webob.exc
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code=0, title="", explanation=""):
self.code = code
self.title = title
self.explanation = explanation
super(ConvertedException, self).__init__()
class ProcessExecutionError(IOError):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = _('Unexpected error while running command.')
if exit_code is None:
exit_code = '-'
message = _('%(description)s\nCommand: %(cmd)s\n'
'Exit code: %(exit_code)s\nStdout: %(stdout)r\n'
'Stderr: %(stderr)r') % locals()
IOError.__init__(self, message)
def wrap_db_error(f):
def _wrap(*args, **kwargs):
try:
return f(*args, **kwargs)
except UnicodeEncodeError:
raise InvalidUnicodeParameter()
except Exception, e:
LOG.exception(_('DB exception wrapped.'))
raise DBError(e)
_wrap.func_name = f.func_name
return _wrap
def wrap_exception(notifier=None, publisher_id=None, event_type=None,
level=None):
"""This decorator wraps a method to catch any exceptions that may
get thrown. It logs the exception as well as optionally sending
it to the notification system.
"""
# TODO(sandy): Find a way to import nova.notifier.api so we don't have
# to pass it in as a parameter. Otherwise we get a cyclic import of
# nova.notifier.api -> nova.utils -> nova.exception :(
# TODO(johannes): Also, it would be nice to use
# utils.save_and_reraise_exception() without an import loop
def inner(f):
def wrapped(*args, **kw):
try:
return f(*args, **kw)
except Exception, e:
with excutils.save_and_reraise_exception():
if notifier:
payload = dict(args=args, exception=e)
payload.update(kw)
# Use a temp vars so we don't shadow
# our outer definitions.
temp_level = level
if not temp_level:
temp_level = notifier.ERROR
temp_type = event_type
if not temp_type:
# If f has multiple decorators, they must use
# functools.wraps to ensure the name is
# propagated.
temp_type = f.__name__
context = get_context_from_function_and_args(f,
args,
kw)
notifier.notify(context, publisher_id, temp_type,
temp_level, payload)
return functools.wraps(f)(wrapped)
return inner
class NovaException(Exception):
"""Base Nova Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.message % kwargs
except Exception as e:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
# at least get the core message out if something happened
message = self.message
super(NovaException, self).__init__(message)
class EC2APIError(NovaException):
message = _("Unknown")
def __init__(self, message=None, code=None):
self.msg = message
self.code = code
if code:
outstr = '%s: %s' % (code, message)
else:
outstr = '%s' % message
super(EC2APIError, self).__init__(outstr)
class DBError(NovaException):
"""Wraps an implementation specific exception."""
def __init__(self, inner_exception=None):
self.inner_exception = inner_exception
super(DBError, self).__init__(str(inner_exception))
class DeprecatedConfig(NovaException):
message = _("Fatal call to deprecated config %(msg)s")
class DecryptionFailure(NovaException):
message = _("Failed to decrypt text")
class VirtualInterfaceCreateException(NovaException):
message = _("Virtual Interface creation failed")
class VirtualInterfaceMacAddressException(NovaException):
message = _("5 attempts to create virtual interface"
"with unique mac address failed")
class GlanceConnectionFailed(NovaException):
message = _("Connection to glance host %(host)s:%(port)s failed: "
"%(reason)s")
class NotAuthorized(NovaException):
message = _("Not authorized.")
code = 403
class AdminRequired(NotAuthorized):
message = _("User does not have admin privileges")
class PolicyNotAuthorized(NotAuthorized):
message = _("Policy doesn't allow %(action)s to be performed.")
class ImageNotActive(NovaException):
message = _("Image %(image_id)s is not active.")
class ImageNotAuthorized(NovaException):
message = _("Not authorized for image %(image_id)s.")
class Invalid(NovaException):
message = _("Unacceptable parameters.")
code = 400
class InvalidSnapshot(Invalid):
message = _("Invalid snapshot") + ": %(reason)s"
class VolumeUnattached(Invalid):
message = _("Volume %(volume_id)s is not attached to anything")
class VolumeAttached(Invalid):
message = _("Volume %(volume_id)s is still attached, detach volume first.")
class InvalidKeypair(Invalid):
message = _("Keypair data is invalid")
class SfJsonEncodeFailure(NovaException):
message = _("Failed to load data into json format")
class InvalidRequest(Invalid):
message = _("The request is invalid.")
class InvalidInput(Invalid):
message = _("Invalid input received") + ": %(reason)s"
class InvalidVolumeType(Invalid):
message = _("Invalid volume type") + ": %(reason)s"
class InvalidVolume(Invalid):
message = _("Invalid volume") + ": %(reason)s"
class InvalidMetadata(Invalid):
message = _("Invalid metadata") + ": %(reason)s"
class InvalidPortRange(Invalid):
message = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s")
class InvalidIpProtocol(Invalid):
message = _("Invalid IP protocol %(protocol)s.")
class InvalidContentType(Invalid):
message = _("Invalid content type %(content_type)s.")
class InvalidCidr(Invalid):
message = _("Invalid cidr %(cidr)s.")
class InvalidUnicodeParameter(Invalid):
message = _("Invalid Parameter: "
"Unicode is not supported by the current database.")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
message = _("%(err)s")
class InvalidAggregateAction(Invalid):
message = _("Cannot perform action '%(action)s' on aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidGroup(Invalid):
message = _("Group not valid. Reason: %(reason)s")
class InvalidSortKey(Invalid):
message = _("Sort key supplied was not valid.")
class InstanceInvalidState(Invalid):
message = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot "
"%(method)s while the instance is in this state.")
class InstanceNotRunning(Invalid):
message = _("Instance %(instance_id)s is not running.")
class InstanceNotInRescueMode(Invalid):
message = _("Instance %(instance_id)s is not in rescue mode")
class InstanceNotReady(Invalid):
message = _("Instance %(instance_id)s is not ready")
class InstanceSuspendFailure(Invalid):
message = _("Failed to suspend instance") + ": %(reason)s"
class InstanceResumeFailure(Invalid):
message = _("Failed to resume server") + ": %(reason)s."
class InstanceRebootFailure(Invalid):
message = _("Failed to reboot instance") + ": %(reason)s"
class InstanceTerminationFailure(Invalid):
message = _("Failed to terminate instance") + ": %(reason)s"
class ServiceUnavailable(Invalid):
message = _("Service is unavailable at this time.")
class ComputeResourcesUnavailable(ServiceUnavailable):
message = _("Insufficient compute resources.")
class ComputeServiceUnavailable(ServiceUnavailable):
message = _("Compute service is unavailable at this time.")
class UnableToMigrateToSelf(Invalid):
message = _("Unable to migrate instance (%(instance_id)s) "
"to current host (%(host)s).")
class InvalidHypervisorType(Invalid):
message = _("The supplied hypervisor type of is invalid.")
class DestinationHypervisorTooOld(Invalid):
message = _("The instance requires a newer hypervisor version than "
"has been provided.")
class DestinationDiskExists(Invalid):
message = _("The supplied disk path (%(path)s) already exists, "
"it is expected not to exist.")
class InvalidDevicePath(Invalid):
message = _("The supplied device path (%(path)s) is invalid.")
class DevicePathInUse(Invalid):
message = _("The supplied device path (%(path)s) is in use.")
class DeviceIsBusy(Invalid):
message = _("The supplied device (%(device)s) is busy.")
class InvalidCPUInfo(Invalid):
message = _("Unacceptable CPU info") + ": %(reason)s"
class InvalidIpAddressError(Invalid):
message = _("%(address)s is not a valid IP v4/6 address.")
class InvalidVLANTag(Invalid):
message = _("VLAN tag is not appropriate for the port group "
"%(bridge)s. Expected VLAN tag is %(tag)s, "
"but the one associated with the port group is %(pgroup)s.")
class InvalidVLANPortGroup(Invalid):
message = _("vSwitch which contains the port group %(bridge)s is "
"not associated with the desired physical adapter. "
"Expected vSwitch is %(expected)s, but the one associated "
"is %(actual)s.")
class InvalidDiskFormat(Invalid):
message = _("Disk format %(disk_format)s is not acceptable")
class ImageUnacceptable(Invalid):
message = _("Image %(image_id)s is unacceptable: %(reason)s")
class InstanceUnacceptable(Invalid):
message = _("Instance %(instance_id)s is unacceptable: %(reason)s")
class InvalidEc2Id(Invalid):
message = _("Ec2 id %(ec2_id)s is unacceptable.")
class InvalidUUID(Invalid):
message = _("Expected a uuid but received %(uuid)s.")
class ConstraintNotMet(NovaException):
message = _("Constraint not met.")
code = 412
class NotFound(NovaException):
message = _("Resource could not be found.")
code = 404
class VirtDriverNotFound(NotFound):
message = _("Could not find driver for connection_type %(name)s")
class PersistentVolumeFileNotFound(NotFound):
message = _("Volume %(volume_id)s persistence file could not be found.")
class VolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")
class SfAccountNotFound(NotFound):
message = _("Unable to locate account %(account_name)s on "
"Solidfire device")
class VolumeMetadataNotFound(NotFound):
message = _("Volume %(volume_id)s has no metadata with "
"key %(metadata_key)s.")
class VolumeTypeNotFound(NotFound):
message = _("Volume type %(volume_type_id)s could not be found.")
class VolumeTypeNotFoundByName(VolumeTypeNotFound):
message = _("Volume type with name %(volume_type_name)s "
"could not be found.")
class VolumeTypeExtraSpecsNotFound(NotFound):
message = _("Volume Type %(volume_type_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class SnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
class VolumeIsBusy(NovaException):
message = _("deleting volume %(volume_name)s that has snapshot")
class SnapshotIsBusy(NovaException):
message = _("deleting snapshot %(snapshot_name)s that has "
"dependent volumes")
class ISCSITargetNotFoundForVolume(NotFound):
message = _("No target id found for volume %(volume_id)s.")
class ISCSITargetCreateFailed(NovaException):
message = _("Failed to create iscsi target for volume %(volume_id)s.")
class ISCSITargetRemoveFailed(NovaException):
message = _("Failed to remove iscsi target for volume %(volume_id)s.")
class DiskNotFound(NotFound):
message = _("No disk at %(location)s")
class VolumeDriverNotFound(NotFound):
message = _("Could not find a handler for %(driver_type)s volume.")
class InvalidImageRef(Invalid):
message = _("Invalid image href %(image_href)s.")
class ImageNotFound(NotFound):
message = _("Image %(image_id)s could not be found.")
class ImageNotFoundEC2(ImageNotFound):
message = _("Image %(image_id)s could not be found. The nova EC2 API "
"assigns image ids dynamically when they are listed for the "
"first time. Have you listed image ids since adding this "
"image?")
class ProjectNotFound(NotFound):
message = _("Project %(project_id)s could not be found.")
class StorageRepositoryNotFound(NotFound):
message = _("Cannot find SR to read/write VDI.")
class NetworkInUse(NovaException):
message = _("Network %(network_id)s is still in use.")
class NetworkNotCreated(NovaException):
message = _("%(req)s is required to create a network.")
class NetworkNotFound(NotFound):
message = _("Network %(network_id)s could not be found.")
class NetworkNotFoundForBridge(NetworkNotFound):
message = _("Network could not be found for bridge %(bridge)s")
class NetworkNotFoundForUUID(NetworkNotFound):
message = _("Network could not be found for uuid %(uuid)s")
class NetworkNotFoundForCidr(NetworkNotFound):
message = _("Network could not be found with cidr %(cidr)s.")
class NetworkNotFoundForInstance(NetworkNotFound):
message = _("Network could not be found for instance %(instance_id)s.")
class NoNetworksFound(NotFound):
message = _("No networks defined.")
class NetworkNotFoundForProject(NotFound):
message = _("Either Network uuid %(network_uuid)s is not present or "
"is not assigned to the project %(project_id)s.")
class NetworkHostNotSet(NovaException):
message = _("Host is not set to the network (%(network_id)s).")
class DatastoreNotFound(NotFound):
message = _("Could not find the datastore reference(s) which the VM uses.")
class PortInUse(NovaException):
message = _("Port %(port_id)s is still in use.")
class PortNotFound(NotFound):
message = _("Port %(port_id)s could not be found.")
class FixedIpNotFound(NotFound):
message = _("No fixed IP associated with id %(id)s.")
class FixedIpNotFoundForAddress(FixedIpNotFound):
message = _("Fixed ip not found for address %(address)s.")
class FixedIpNotFoundForInstance(FixedIpNotFound):
message = _("Instance %(instance_uuid)s has zero fixed ips.")
class FixedIpNotFoundForNetworkHost(FixedIpNotFound):
message = _("Network host %(host)s has zero fixed ips "
"in network %(network_id)s.")
class FixedIpNotFoundForSpecificInstance(FixedIpNotFound):
message = _("Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'.")
class FixedIpNotFoundForNetwork(FixedIpNotFound):
message = _("Fixed IP address (%(address)s) does not exist in "
"network (%(network_uuid)s).")
class FixedIpAlreadyInUse(NovaException):
message = _("Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s.")
class FixedIpAssociatedWithMultipleInstances(NovaException):
message = _("More than one instance is associated with fixed ip address "
"'%(address)s'.")
class FixedIpInvalid(Invalid):
message = _("Fixed IP address %(address)s is invalid.")
class NoMoreFixedIps(NovaException):
message = _("Zero fixed ips available.")
class NoFixedIpsDefined(NotFound):
message = _("Zero fixed ips could be found.")
#TODO(bcwaldon): EOL this exception!
class Duplicate(NovaException):
pass
class FloatingIpExists(Duplicate):
message = _("Floating ip %(address)s already exists.")
class FloatingIpNotFound(NotFound):
message = _("Floating ip not found for id %(id)s.")
class FloatingIpDNSExists(Invalid):
message = _("The DNS entry %(name)s already exists in domain %(domain)s.")
class FloatingIpNotFoundForAddress(FloatingIpNotFound):
message = _("Floating ip not found for address %(address)s.")
class FloatingIpNotFoundForHost(FloatingIpNotFound):
message = _("Floating ip not found for host %(host)s.")
class NoMoreFloatingIps(FloatingIpNotFound):
message = _("Zero floating ips available.")
safe = True
class FloatingIpAssociated(NovaException):
message = _("Floating ip %(address)s is associated.")
class FloatingIpNotAssociated(NovaException):
message = _("Floating ip %(address)s is not associated.")
class NoFloatingIpsDefined(NotFound):
message = _("Zero floating ips exist.")
class NoFloatingIpInterface(NotFound):
message = _("Interface %(interface)s not found.")
class KeypairNotFound(NotFound):
message = _("Keypair %(name)s not found for user %(user_id)s")
class CertificateNotFound(NotFound):
message = _("Certificate %(certificate_id)s not found.")
class ServiceNotFound(NotFound):
message = _("Service %(service_id)s could not be found.")
class HostNotFound(NotFound):
message = _("Host %(host)s could not be found.")
class ComputeHostNotFound(HostNotFound):
message = _("Compute host %(host)s could not be found.")
class HostBinaryNotFound(NotFound):
message = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
message = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
message = _("Change would make usage less than 0 for the following "
"resources: %(unders)s")
class QuotaNotFound(NotFound):
message = _("Quota could not be found")
class QuotaResourceUnknown(QuotaNotFound):
message = _("Unknown quota resources %(unknown)s.")
class ProjectQuotaNotFound(QuotaNotFound):
message = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
message = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
message = _("Quota usage for project %(project_id)s could not be found.")
class ReservationNotFound(QuotaNotFound):
message = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(NovaException):
message = _("Quota exceeded for resources: %(overs)s")
class SecurityGroupNotFound(NotFound):
message = _("Security group %(security_group_id)s not found.")
class SecurityGroupNotFoundForProject(SecurityGroupNotFound):
message = _("Security group %(security_group_id)s not found "
"for project %(project_id)s.")
class SecurityGroupNotFoundForRule(SecurityGroupNotFound):
message = _("Security group with rule %(rule_id)s not found.")
class SecurityGroupExistsForInstance(Invalid):
message = _("Security group %(security_group_id)s is already associated"
" with the instance %(instance_id)s")
class SecurityGroupNotExistsForInstance(Invalid):
message = _("Security group %(security_group_id)s is not associated with"
" the instance %(instance_id)s")
class MigrationNotFound(NotFound):
message = _("Migration %(migration_id)s could not be found.")
class MigrationNotFoundByStatus(MigrationNotFound):
message = _("Migration not found for instance %(instance_id)s "
"with status %(status)s.")
class ConsolePoolNotFound(NotFound):
message = _("Console pool %(pool_id)s could not be found.")
class ConsolePoolNotFoundForHostType(NotFound):
message = _("Console pool of type %(console_type)s "
"for compute host %(compute_host)s "
"on proxy host %(host)s not found.")
class ConsoleNotFound(NotFound):
message = _("Console %(console_id)s could not be found.")
class ConsoleNotFoundForInstance(ConsoleNotFound):
message = _("Console for instance %(instance_uuid)s could not be found.")
class ConsoleNotFoundInPoolForInstance(ConsoleNotFound):
message = _("Console for instance %(instance_uuid)s "
"in pool %(pool_id)s could not be found.")
class ConsoleTypeInvalid(Invalid):
message = _("Invalid console type %(console_type)s ")
class InstanceTypeNotFound(NotFound):
message = _("Instance type %(instance_type_id)s could not be found.")
class InstanceTypeNotFoundByName(InstanceTypeNotFound):
message = _("Instance type with name %(instance_type_name)s "
"could not be found.")
class FlavorNotFound(NotFound):
message = _("Flavor %(flavor_id)s could not be found.")
class FlavorAccessNotFound(NotFound):
message = _("Flavor access not found for %(flavor_id) / "
"%(project_id) combination.")
class SchedulerHostFilterNotFound(NotFound):
message = _("Scheduler Host Filter %(filter_name)s could not be found.")
class SchedulerCostFunctionNotFound(NotFound):
message = _("Scheduler cost function %(cost_fn_str)s could"
" not be found.")
class SchedulerWeightFlagNotFound(NotFound):
message = _("Scheduler weight flag not found: %(flag_name)s")
class InstanceMetadataNotFound(NotFound):
message = _("Instance %(instance_uuid)s has no metadata with "
"key %(metadata_key)s.")
class InstanceSystemMetadataNotFound(NotFound):
message = _("Instance %(instance_uuid)s has no system metadata with "
"key %(metadata_key)s.")
class InstanceTypeExtraSpecsNotFound(NotFound):
message = _("Instance Type %(instance_type_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class FileNotFound(NotFound):
message = _("File %(file_path)s could not be found.")
class NoFilesFound(NotFound):
message = _("Zero files could be found.")
class SwitchNotFoundForNetworkAdapter(NotFound):
message = _("Virtual switch associated with the "
"network adapter %(adapter)s not found.")
class NetworkAdapterNotFound(NotFound):
message = _("Network adapter %(adapter)s could not be found.")
class ClassNotFound(NotFound):
message = _("Class %(class_name)s could not be found: %(exception)s")
class NotAllowed(NovaException):
message = _("Action not allowed.")
class ImageRotationNotAllowed(NovaException):
message = _("Rotation is not allowed for snapshots")
class RotationRequiredForBackup(NovaException):
message = _("Rotation param is required for backup image_type")
class KeyPairExists(Duplicate):
message = _("Key pair %(key_name)s already exists.")
class InstanceExists(Duplicate):
message = _("Instance %(name)s already exists.")
class InstanceTypeExists(Duplicate):
message = _("Instance Type with name %(name)s already exists.")
class InstanceTypeIdExists(Duplicate):
message = _("Instance Type with ID %(flavor_id)s already exists.")
class FlavorAccessExists(Duplicate):
message = _("Flavor access alreay exists for flavor %(flavor_id)s "
"and project %(project_id)s combination.")
class VolumeTypeExists(Duplicate):
message = _("Volume Type %(name)s already exists.")
class InvalidSharedStorage(NovaException):
message = _("%(path)s is not on shared storage: %(reason)s")
class InvalidLocalStorage(NovaException):
message = _("%(path)s is not on local storage: %(reason)s")
class MigrationError(NovaException):
message = _("Migration error") + ": %(reason)s"
class MalformedRequestBody(NovaException):
message = _("Malformed message body: %(reason)s")
# NOTE(johannes): NotFound should only be used when a 404 error is
# appropriate to be returned
class ConfigNotFound(NovaException):
message = _("Could not find config at %(path)s")
class PasteAppNotFound(NovaException):
message = _("Could not load paste app '%(name)s' from %(path)s")
class CannotResizeToSameFlavor(NovaException):
message = _("When resizing, instances must change flavor!")
class ImageTooLarge(NovaException):
message = _("Image is larger than instance type allows")
class InstanceTypeMemoryTooSmall(NovaException):
message = _("Instance type's memory is too small for requested image.")
class InstanceTypeDiskTooSmall(NovaException):
message = _("Instance type's disk is too small for requested image.")
class InsufficientFreeMemory(NovaException):
message = _("Insufficient free memory on compute node to start %(uuid)s.")
class CouldNotFetchMetrics(NovaException):
message = _("Could not fetch bandwidth/cpu/disk metrics for this host.")
class NoValidHost(NovaException):
message = _("No valid host was found. %(reason)s")
class WillNotSchedule(NovaException):
message = _("Host %(host)s is not up or doesn't exist.")
class QuotaError(NovaException):
message = _("Quota exceeded") + ": code=%(code)s"
code = 413
headers = {'Retry-After': 0}
safe = True
class TooManyInstances(QuotaError):
message = _("Quota exceeded for %(overs)s: Requested %(req)s,"
" but already used %(used)d of %(allowed)d %(resource)s")
class VolumeSizeTooLarge(QuotaError):
message = _("Maximum volume size exceeded")
class VolumeLimitExceeded(QuotaError):
message = _("Maximum number of volumes allowed (%(allowed)d) exceeded")
class FloatingIpLimitExceeded(QuotaError):
message = _("Maximum number of floating ips exceeded")
class MetadataLimitExceeded(QuotaError):
message = _("Maximum number of metadata items exceeds %(allowed)d")
class OnsetFileLimitExceeded(QuotaError):
message = _("Personality file limit exceeded")
class OnsetFilePathLimitExceeded(QuotaError):
message = _("Personality file path too long")
class OnsetFileContentLimitExceeded(QuotaError):
message = _("Personality file content too long")
class KeypairLimitExceeded(QuotaError):
message = _("Maximum number of key pairs exceeded")
class SecurityGroupLimitExceeded(QuotaError):
message = _("Maximum number of security groups or rules exceeded")
class AggregateError(NovaException):
message = _("Aggregate %(aggregate_id)s: action '%(action)s' "
"caused an error: %(reason)s.")
class AggregateNotFound(NotFound):
message = _("Aggregate %(aggregate_id)s could not be found.")
class AggregateNameExists(Duplicate):
message = _("Aggregate %(aggregate_name)s already exists.")
class AggregateHostNotFound(NotFound):
message = _("Aggregate %(aggregate_id)s has no host %(host)s.")
class AggregateMetadataNotFound(NotFound):
message = _("Aggregate %(aggregate_id)s has no metadata with "
"key %(metadata_key)s.")
class AggregateHostExists(Duplicate):
message = _("Aggregate %(aggregate_id)s already has host %(host)s.")
class DuplicateSfVolumeNames(Duplicate):
message = _("Detected more than one volume with name %(vol_name)s")
class VolumeTypeCreateFailed(NovaException):
message = _("Cannot create volume_type with "
"name %(name)s and specs %(extra_specs)s")
class VolumeBackendAPIException(NovaException):
message = _("Bad or unexpected response from the storage volume "
"backend API: %(data)s")
class NfsException(NovaException):
message = _("Unknown NFS exception")
class NfsNoSharesMounted(NotFound):
message = _("No mounted NFS shares found")
class NfsNoSuitableShareFound(NotFound):
message = _("There is no share which can host %(volume_size)sG")
class InstanceTypeCreateFailed(NovaException):
message = _("Unable to create instance type")
class InstancePasswordSetFailed(NovaException):
message = _("Failed to set admin password on %(instance)s "
"because %(reason)s")
safe = True
class SolidFireAPIException(NovaException):
message = _("Bad response from SolidFire API")
class SolidFireAPIDataException(SolidFireAPIException):
message = _("Error in SolidFire API response: data=%(data)s")
class DuplicateVlan(Duplicate):
message = _("Detected existing vlan with id %(vlan)d")
class InstanceNotFound(NotFound):
message = _("Instance %(instance_id)s could not be found.")
class MarkerNotFound(NotFound):
message = _("Marker %(marker)s could not be found.")
class InvalidInstanceIDMalformed(Invalid):
message = _("Invalid id: %(val)s (expecting \"i-...\").")
class CouldNotFetchImage(NovaException):
message = _("Could not fetch image %(image_id)s")
class TaskAlreadyRunning(NovaException):
message = _("Task %(task_name)s is already running on host %(host)s")
class TaskNotRunning(NovaException):
message = _("Task %(task_name)s is not running on host %(host)s")
class InstanceIsLocked(InstanceInvalidState):
message = _("Instance %(instance_uuid)s is locked")
class ConfigDriveMountFailed(NovaException):
message = _("Could not mount vfat config drive. %(operation)s failed. "
"Error: %(error)s")
class ConfigDriveUnknownFormat(NovaException):
message = _("Unknown config drive format %(format)s. Select one of "
"iso9660 or vfat.")
class InstanceUserDataTooLarge(NovaException):
message = _("User data too large. User data must be no larger than "
"%(maxsize)s bytes once base64 encoded. Your data is "
"%(length)d bytes")
class InstanceUserDataMalformed(NovaException):
message = _("User data needs to be valid base 64.")
class UnexpectedTaskStateError(NovaException):
message = _("unexpected task state: expecting %(expected)s but "
"the actual state is %(actual)s")
class CryptoCAFileNotFound(FileNotFound):
message = _("The CA file for %(project)s could not be found")
class CryptoCRLFileNotFound(FileNotFound):
message = _("The CRL file for %(project)s could not be found")
def get_context_from_function_and_args(function, args, kwargs):
"""Find an arg of type RequestContext and return it.
This is useful in a couple of decorators where we don't
know much about the function we're wrapping.
"""
# import here to avoid circularity:
from nova import context
for arg in itertools.chain(kwargs.values(), args):
if isinstance(arg, context.RequestContext):
return arg
return None
|
ntt-sic/nova | refs/heads/master | nova/api/openstack/compute/ips.py | 12 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
import nova
from nova.api.openstack import common
from nova.api.openstack.compute.views import addresses as view_addresses
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.openstack.common.gettextutils import _
def make_network(elem):
elem.set('id', 0)
ip = xmlutil.SubTemplateElement(elem, 'ip', selector=1)
ip.set('version')
ip.set('addr')
network_nsmap = {None: xmlutil.XMLNS_V11}
class NetworkTemplate(xmlutil.TemplateBuilder):
def construct(self):
sel = xmlutil.Selector(xmlutil.get_items, 0)
root = xmlutil.TemplateElement('network', selector=sel)
make_network(root)
return xmlutil.MasterTemplate(root, 1, nsmap=network_nsmap)
class AddressesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('addresses', selector='addresses')
elem = xmlutil.SubTemplateElement(root, 'network',
selector=xmlutil.get_items)
make_network(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=network_nsmap)
class Controller(wsgi.Controller):
"""The servers addresses API controller for the OpenStack API."""
_view_builder_class = view_addresses.ViewBuilder
def __init__(self, **kwargs):
super(Controller, self).__init__(**kwargs)
self._compute_api = nova.compute.API()
def _get_instance(self, context, server_id):
try:
instance = self._compute_api.get(context, server_id)
except nova.exception.NotFound:
msg = _("Instance does not exist")
raise exc.HTTPNotFound(explanation=msg)
return instance
def create(self, req, server_id, body):
raise exc.HTTPNotImplemented()
def delete(self, req, server_id, id):
raise exc.HTTPNotImplemented()
@wsgi.serializers(xml=AddressesTemplate)
def index(self, req, server_id):
context = req.environ["nova.context"]
instance = self._get_instance(context, server_id)
networks = common.get_networks_for_instance(context, instance)
return self._view_builder.index(networks)
@wsgi.serializers(xml=NetworkTemplate)
def show(self, req, server_id, id):
context = req.environ["nova.context"]
instance = self._get_instance(context, server_id)
networks = common.get_networks_for_instance(context, instance)
if id not in networks:
msg = _("Instance is not a member of specified network")
raise exc.HTTPNotFound(explanation=msg)
return self._view_builder.show(networks[id], id)
def create_resource():
return wsgi.Resource(Controller())
|
jazcollins/models | refs/heads/master | object_detection/utils/metrics.py | 17 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for computing metrics like precision, recall, CorLoc and etc."""
from __future__ import division
import numpy as np
from six import moves
def compute_precision_recall(scores, labels, num_gt):
"""Compute precision and recall.
Args:
scores: A float numpy array representing detection score
labels: A boolean numpy array representing true/false positive labels
num_gt: Number of ground truth instances
Raises:
ValueError: if the input is not of the correct format
Returns:
precision: Fraction of positive instances over detected ones. This value is
None if no ground truth labels are present.
recall: Fraction of detected positive instance over all positive instances.
This value is None if no ground truth labels are present.
"""
if not isinstance(
labels, np.ndarray) or labels.dtype != np.bool or len(labels.shape) != 1:
raise ValueError("labels must be single dimension bool numpy array")
if not isinstance(
scores, np.ndarray) or len(scores.shape) != 1:
raise ValueError("scores must be single dimension numpy array")
if num_gt < np.sum(labels):
raise ValueError("Number of true positives must be smaller than num_gt.")
if len(scores) != len(labels):
raise ValueError("scores and labels must be of the same size.")
if num_gt == 0:
return None, None
sorted_indices = np.argsort(scores)
sorted_indices = sorted_indices[::-1]
labels = labels.astype(int)
true_positive_labels = labels[sorted_indices]
false_positive_labels = 1 - true_positive_labels
cum_true_positives = np.cumsum(true_positive_labels)
cum_false_positives = np.cumsum(false_positive_labels)
precision = cum_true_positives.astype(float) / (
cum_true_positives + cum_false_positives)
recall = cum_true_positives.astype(float) / num_gt
return precision, recall
def compute_average_precision(precision, recall):
"""Compute Average Precision according to the definition in VOCdevkit.
Precision is modified to ensure that it does not decrease as recall
decrease.
Args:
precision: A float [N, 1] numpy array of precisions
recall: A float [N, 1] numpy array of recalls
Raises:
ValueError: if the input is not of the correct format
Returns:
average_precison: The area under the precision recall curve. NaN if
precision and recall are None.
"""
if precision is None:
if recall is not None:
raise ValueError("If precision is None, recall must also be None")
return np.NAN
if not isinstance(precision, np.ndarray) or not isinstance(recall,
np.ndarray):
raise ValueError("precision and recall must be numpy array")
if precision.dtype != np.float or recall.dtype != np.float:
raise ValueError("input must be float numpy array.")
if len(precision) != len(recall):
raise ValueError("precision and recall must be of the same size.")
if not precision.size:
return 0.0
if np.amin(precision) < 0 or np.amax(precision) > 1:
raise ValueError("Precision must be in the range of [0, 1].")
if np.amin(recall) < 0 or np.amax(recall) > 1:
raise ValueError("recall must be in the range of [0, 1].")
if not all(recall[i] <= recall[i + 1] for i in moves.range(len(recall) - 1)):
raise ValueError("recall must be a non-decreasing array")
recall = np.concatenate([[0], recall, [1]])
precision = np.concatenate([[0], precision, [0]])
# Preprocess precision to be a non-decreasing array
for i in range(len(precision) - 2, -1, -1):
precision[i] = np.maximum(precision[i], precision[i + 1])
indices = np.where(recall[1:] != recall[:-1])[0] + 1
average_precision = np.sum(
(recall[indices] - recall[indices - 1]) * precision[indices])
return average_precision
def compute_cor_loc(num_gt_imgs_per_class,
num_images_correctly_detected_per_class):
"""Compute CorLoc according to the definition in the following paper.
https://www.robots.ox.ac.uk/~vgg/rg/papers/deselaers-eccv10.pdf
Returns nans if there are no ground truth images for a class.
Args:
num_gt_imgs_per_class: 1D array, representing number of images containing
at least one object instance of a particular class
num_images_correctly_detected_per_class: 1D array, representing number of
images that are correctly detected at least one object instance of a
particular class
Returns:
corloc_per_class: A float numpy array represents the corloc score of each
class
"""
return np.where(
num_gt_imgs_per_class == 0,
np.nan,
num_images_correctly_detected_per_class / num_gt_imgs_per_class)
|
asacamano/keyczar | refs/heads/master | cpp/src/tools/scons/scons-local-1.2.0.d20090223/SCons/Tool/MSCommon/vs.py | 19 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/MSCommon/vs.py 4043 2009/02/23 09:06:45 scons"
__doc__ = """Module to detect Visual Studio and/or Visual C/C++
"""
import os
import SCons.Errors
import SCons.Util
from SCons.Tool.MSCommon.common import debug, \
read_reg, \
normalize_env, \
get_output, \
parse_output
class VisualStudio:
"""
An abstract base class for trying to find installed versions of
Visual Studio.
"""
def __init__(self, version, **kw):
self.version = version
self.__dict__.update(kw)
self._cache = {}
#
def find_batch_file(self):
"""Try to find the Visual Studio or Visual C/C++ batch file.
Return None if failed or the batch file does not exist.
"""
pdir = self.get_vc_product_dir()
if not pdir:
debug('find_batch_file(): no pdir')
return None
batch_file = os.path.normpath(os.path.join(pdir, self.batch_file))
batch_file = os.path.normpath(batch_file)
if not os.path.isfile(batch_file):
debug('find_batch_file(): %s not on file system' % batch_file)
return None
return batch_file
def find_executable(self):
pdir = self.get_vc_product_dir()
if not pdir:
debug('find_executable(): no pdir')
return None
executable = os.path.join(pdir, self.executable_path)
executable = os.path.normpath(executable)
if not os.path.isfile(executable):
debug('find_executable(): %s not on file system' % executable)
return None
return executable
def find_vc_product_dir(self):
if not SCons.Util.can_read_reg:
debug('find_vc_product_dir(): can not read registry')
return None
key = self.hkey_root + '\\' + self.vc_product_dir_key
try:
comps = read_reg(key)
except WindowsError, e:
debug('find_vc_product_dir(): no registry key %s' % key)
else:
if self.batch_file_dir_reg_relpath:
comps = os.path.join(comps, self.batch_file_dir_reg_relpath)
comps = os.path.normpath(comps)
if os.path.exists(comps):
return comps
else:
debug('find_vc_product_dir(): %s not on file system' % comps)
d = os.environ.get(self.common_tools_var)
if not d:
msg = 'find_vc_product_dir(): no %s variable'
debug(msg % self.common_tools_var)
return None
if not os.path.isdir(d):
debug('find_vc_product_dir(): %s not on file system' % d)
return None
if self.batch_file_dir_env_relpath:
d = os.path.join(d, self.batch_file_dir_env_relpath)
d = os.path.normpath(d)
return d
#
def get_batch_file(self):
try:
return self._cache['batch_file']
except KeyError:
batch_file = self.find_batch_file()
self._cache['batch_file'] = batch_file
return batch_file
def get_executable(self):
try:
return self._cache['executable']
except KeyError:
executable = self.find_executable()
self._cache['executable'] = executable
return executable
def get_supported_arch(self):
try:
return self._cache['supported_arch']
except KeyError:
# RDEVE: for the time being use hardcoded lists
# supported_arch = self.find_supported_arch()
self._cache['supported_arch'] = self.supported_arch
return self.supported_arch
def get_vc_product_dir(self):
try:
return self._cache['vc_product_dir']
except KeyError:
vc_product_dir = self.find_vc_product_dir()
self._cache['vc_product_dir'] = vc_product_dir
return vc_product_dir
def reset(self):
self._cache = {}
# The list of supported Visual Studio versions we know how to detect.
#
# How to look for .bat file ?
# - VS 2008 Express (x86):
# * from registry key productdir, gives the full path to vsvarsall.bat. In
# HKEY_LOCAL_MACHINE):
# Software\Microsoft\VCEpress\9.0\Setup\VC\productdir
# * from environmnent variable VS90COMNTOOLS: the path is then ..\..\VC
# relatively to the path given by the variable.
#
# - VS 2008 Express (WoW6432: 32 bits on windows x64):
# Software\Wow6432Node\Microsoft\VCEpress\9.0\Setup\VC\productdir
#
# - VS 2005 Express (x86):
# * from registry key productdir, gives the full path to vsvarsall.bat. In
# HKEY_LOCAL_MACHINE):
# Software\Microsoft\VCEpress\8.0\Setup\VC\productdir
# * from environmnent variable VS80COMNTOOLS: the path is then ..\..\VC
# relatively to the path given by the variable.
#
# - VS 2005 Express (WoW6432: 32 bits on windows x64): does not seem to have a
# productdir ?
#
# - VS 2003 .Net (pro edition ? x86):
# * from registry key productdir. The path is then ..\Common7\Tools\
# relatively to the key. The key is in HKEY_LOCAL_MACHINE):
# Software\Microsoft\VisualStudio\7.1\Setup\VC\productdir
# * from environmnent variable VS71COMNTOOLS: the path is the full path to
# vsvars32.bat
#
# - VS 98 (VS 6):
# * from registry key productdir. The path is then Bin
# relatively to the key. The key is in HKEY_LOCAL_MACHINE):
# Software\Microsoft\VisualStudio\6.0\Setup\VC98\productdir
#
# The first version found in the list is the one used by default if
# there are multiple versions installed. Barring good reasons to
# the contrary, this means we should list versions from most recent
# to oldest. Pro versions get listed before Express versions on the
# assumption that, by default, you'd rather use the version you paid
# good money for in preference to whatever Microsoft makes available
# for free.
#
# If you update this list, update the documentation in Tool/msvs.xml.
SupportedVSList = [
# Visual Studio 2010
# TODO: find the settings, perhaps from someone with a CTP copy?
#VisualStudio('TBD',
# hkey_root=r'TBD',
# common_tools_var='TBD',
# batch_file='TBD',
# vc_product_dir_key=r'TBD',
# batch_file_dir_reg_relpath=None,
# batch_file_dir_env_relpath=r'TBD',
# executable_path=r'TBD',
# default_dirname='TBD',
#),
# Visual Studio 2008
# The batch file we look for is in the VC directory,
# so the devenv.com executable is up in ..\..\Common7\IDE.
VisualStudio('9.0',
hkey_root=r'Software\Microsoft\VisualStudio\9.0',
common_tools_var='VS90COMNTOOLS',
batch_file='vcvarsall.bat',
vc_product_dir_key=r'Setup\VC\ProductDir',
batch_file_dir_reg_relpath=None,
batch_file_dir_env_relpath=r'..\..\VC',
executable_path=r'..\Common7\IDE\devenv.com',
default_dirname='Microsoft Visual Studio 9',
supported_arch=['x86', 'amd64'],
),
# Visual C++ 2008 Express Edition
# The batch file we look for is in the VC directory,
# so the VCExpress.exe executable is up in ..\..\Common7\IDE.
VisualStudio('9.0Exp',
hkey_root=r'Software\Microsoft\VisualStudio\9.0',
common_tools_var='VS90COMNTOOLS',
batch_file='vcvarsall.bat',
vc_product_dir_key=r'Setup\VC\ProductDir',
batch_file_dir_reg_relpath=None,
batch_file_dir_env_relpath=r'..\..\VC',
executable_path=r'..\Common7\IDE\VCExpress.exe',
default_dirname='Microsoft Visual Studio 9',
supported_arch=['x86'],
),
# Visual Studio 2005
# The batch file we look for is in the VC directory,
# so the devenv.com executable is up in ..\..\Common7\IDE.
VisualStudio('8.0',
hkey_root=r'Software\Microsoft\VisualStudio\8.0',
common_tools_var='VS80COMNTOOLS',
batch_file='vcvarsall.bat',
vc_product_dir_key=r'Setup\VC\ProductDir',
batch_file_dir_reg_relpath=None,
batch_file_dir_env_relpath=r'..\..\VC',
executable_path=r'..\Common7\IDE\devenv.com',
default_dirname='Microsoft Visual Studio 8',
supported_arch=['x86', 'amd64'],
),
# Visual C++ 2005 Express Edition
# The batch file we look for is in the VC directory,
# so the VCExpress.exe executable is up in ..\..\Common7\IDE.
VisualStudio('8.0Exp',
hkey_root=r'Software\Microsoft\VCExpress\8.0',
common_tools_var='VS80COMNTOOLS',
batch_file='vcvarsall.bat',
vc_product_dir_key=r'Setup\VC\ProductDir',
batch_file_dir_reg_relpath=None,
batch_file_dir_env_relpath=r'..\..\VC',
# The batch file is in the VC directory, so
# so the devenv.com executable is next door in ..\IDE.
executable_path=r'..\Common7\IDE\VCExpress.exe',
default_dirname='Microsoft Visual Studio 8',
supported_arch=['x86'],
),
# Visual Studio .NET 2003
# The batch file we look for is in the Common7\Tools directory,
# so the devenv.com executable is next door in ..\IDE.
VisualStudio('7.1',
hkey_root=r'Software\Microsoft\VisualStudio\7.1',
common_tools_var='VS71COMNTOOLS',
batch_file='vsvars32.bat',
vc_product_dir_key=r'Setup\VC\ProductDir',
batch_file_dir_reg_relpath=r'..\Common7\Tools',
batch_file_dir_env_relpath=None,
executable_path=r'..\IDE\devenv.com',
default_dirname='Microsoft Visual Studio .NET',
supported_arch=['x86'],
),
# Visual Studio .NET
# The batch file we look for is in the Common7\Tools directory,
# so the devenv.com executable is next door in ..\IDE.
VisualStudio('7.0',
hkey_root=r'Software\Microsoft\VisualStudio\7.0',
common_tools_var='VS70COMNTOOLS',
batch_file='vsvars32.bat',
vc_product_dir_key=r'Setup\VC\ProductDir',
batch_file_dir_reg_relpath=r'..\Common7\Tools',
batch_file_dir_env_relpath=None,
executable_path=r'..\IDE\devenv.com',
default_dirname='Microsoft Visual Studio .NET',
supported_arch=['x86'],
),
# Visual Studio 6.0
VisualStudio('6.0',
hkey_root=r'Software\Microsoft\VisualStudio\6.0',
common_tools_var='VS60COMNTOOLS',
batch_file='vcvars32.bat',
vc_product_dir_key='Setup\Microsoft Visual C++\ProductDir',
batch_file_dir_reg_relpath='Bin',
batch_file_dir_env_relpath=None,
executable_path=r'Common\MSDev98\Bin\MSDEV.COM',
default_dirname='Microsoft Visual Studio',
supported_arch=['x86'],
),
]
SupportedVSMap = {}
for vs in SupportedVSList:
SupportedVSMap[vs.version] = vs
# Finding installed versions of Visual Studio isn't cheap, because it
# goes not only to the registry but also to the disk to sanity-check
# that there is, in fact, a Visual Studio directory there and that the
# registry entry isn't just stale. Find this information once, when
# requested, and cache it.
InstalledVSList = None
InstalledVSMap = None
def get_installed_visual_studios():
global InstalledVSList
global InstalledVSMap
if InstalledVSList is None:
InstalledVSList = []
InstalledVSMap = {}
for vs in SupportedVSList:
debug('trying to find VS %s' % vs.version)
if vs.get_executable():
debug('found VS %s' % vs.version)
InstalledVSList.append(vs)
InstalledVSMap[vs.version] = vs
return InstalledVSList
def reset_installed_visual_studios():
global InstalledVSList
global InstalledVSMap
InstalledVSList = None
InstalledVSMap = None
for vs in SupportedVSList:
vs.reset()
# We may be asked to update multiple construction environments with
# SDK information. When doing this, we check on-disk for whether
# the SDK has 'mfc' and 'atl' subdirectories. Since going to disk
# is expensive, cache results by directory.
#SDKEnvironmentUpdates = {}
#
#def set_sdk_by_directory(env, sdk_dir):
# global SDKEnvironmentUpdates
# try:
# env_tuple_list = SDKEnvironmentUpdates[sdk_dir]
# except KeyError:
# env_tuple_list = []
# SDKEnvironmentUpdates[sdk_dir] = env_tuple_list
#
# include_path = os.path.join(sdk_dir, 'include')
# mfc_path = os.path.join(include_path, 'mfc')
# atl_path = os.path.join(include_path, 'atl')
#
# if os.path.exists(mfc_path):
# env_tuple_list.append(('INCLUDE', mfc_path))
# if os.path.exists(atl_path):
# env_tuple_list.append(('INCLUDE', atl_path))
# env_tuple_list.append(('INCLUDE', include_path))
#
# env_tuple_list.append(('LIB', os.path.join(sdk_dir, 'lib')))
# env_tuple_list.append(('LIBPATH', os.path.join(sdk_dir, 'lib')))
# env_tuple_list.append(('PATH', os.path.join(sdk_dir, 'bin')))
#
# for variable, directory in env_tuple_list:
# env.PrependENVPath(variable, directory)
def detect_msvs():
return (len(get_installed_visual_studios()) > 0)
def get_vs_by_version(msvs):
if not SupportedVSMap.has_key(msvs):
msg = "Visual Studio version %s is not supported" % repr(msvs)
raise SCons.Errors.UserError, msg
get_installed_visual_studios()
vs = InstalledVSMap.get(msvs)
# Some check like this would let us provide a useful error message
# if they try to set a Visual Studio version that's not installed.
# However, we also want to be able to run tests (like the unit
# tests) on systems that don't, or won't ever, have it installed.
# It might be worth resurrecting this, with some configurable
# setting that the tests can use to bypass the check.
#if not vs:
# msg = "Visual Studio version %s is not installed" % repr(msvs)
# raise SCons.Errors.UserError, msg
return vs
def get_default_version(env):
"""Returns the default version string to use for MSVS.
If no version was requested by the user through the MSVS environment
variable, query all the available the visual studios through
query_versions, and take the highest one.
Return
------
version: str
the default version.
"""
if not env.has_key('MSVS') or not SCons.Util.is_Dict(env['MSVS']):
# TODO(1.5):
#versions = [vs.version for vs in get_installed_visual_studios()]
versions = map(lambda vs: vs.version, get_installed_visual_studios())
env['MSVS'] = {'VERSIONS' : versions}
else:
versions = env['MSVS'].get('VERSIONS', [])
if not env.has_key('MSVS_VERSION'):
if versions:
env['MSVS_VERSION'] = versions[0] #use highest version by default
else:
env['MSVS_VERSION'] = SupportedVSList[0].version
env['MSVS']['VERSION'] = env['MSVS_VERSION']
return env['MSVS_VERSION']
def get_default_arch(env):
"""Return the default arch to use for MSVS
if no version was requested by the user through the MSVS_ARCH environment
variable, select x86
Return
------
arch: str
"""
arch = env.get('MSVS_ARCH', 'x86')
msvs = InstalledVSMap.get(env['MSVS_VERSION'])
if not msvs:
arch = 'x86'
elif not arch in msvs.get_supported_arch():
fmt = "Visual Studio version %s does not support architecture %s"
raise SCons.Errors.UserError, fmt % (env['MSVS_VERSION'], arch)
return arch
def merge_default_version(env):
version = get_default_version(env)
arch = get_default_arch(env)
msvs = get_vs_by_version(version)
if msvs is None:
return
batfilename = msvs.get_batch_file()
# XXX: I think this is broken. This will silently set a bogus tool instead
# of failing, but there is no other way with the current scons tool
# framework
if batfilename is not None:
vars = ('LIB', 'LIBPATH', 'PATH', 'INCLUDE')
msvs_list = get_installed_visual_studios()
# TODO(1.5):
#vscommonvarnames = [ vs.common_tools_var for vs in msvs_list ]
vscommonvarnames = map(lambda vs: vs.common_tools_var, msvs_list)
nenv = normalize_env(env['ENV'], vscommonvarnames + ['COMSPEC'])
output = get_output(batfilename, arch, env=nenv)
vars = parse_output(output, vars)
for k, v in vars.items():
env.PrependENVPath(k, v, delete_existing=1)
def query_versions():
"""Query the system to get available versions of VS. A version is
considered when a batfile is found."""
msvs_list = get_installed_visual_studios()
# TODO(1.5)
#versions = [ msvs.version for msvs in msvs_list ]
versions = map(lambda msvs: msvs.version, msvs_list)
return versions
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
richardcs/ansible | refs/heads/devel | lib/ansible/modules/storage/netapp/na_ontap_lun_map.py | 7 | #!/usr/bin/python
""" this is lun mapping module
(c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = """
module: na_ontap_lun_map
short_description: NetApp ONTAP LUN maps
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.6'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Map and unmap LUNs on NetApp ONTAP.
options:
state:
description:
- Whether the specified LUN should exist or not.
choices: ['present', 'absent']
default: present
initiator_group_name:
description:
- Initiator group to map to the given LUN.
required: true
path:
description:
- Path of the LUN..
required: true
vserver:
required: true
description:
- The name of the vserver to use.
lun_id:
description:
- LUN ID assigned for the map.
"""
EXAMPLES = """
- name: Create LUN mapping
na_ontap_lun_map:
state: present
initiator_group_name: ansibleIgroup3234
path: /vol/iscsi_path/iscsi_lun
vserver: ci_dev
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Unmap LUN
na_ontap_lun_map:
state: absent
initiator_group_name: ansibleIgroup3234
path: /vol/iscsi_path/iscsi_lun
vserver: ci_dev
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
lun_node:
description: NetApp controller that is hosting the LUN.
returned: success
type: string
sample: node01
lun_ostype:
description: Specifies the OS of the host accessing the LUN.
returned: success
type: string
sample: vmware
lun_serial:
description: A unique, 12-byte, ASCII string used to identify the LUN.
returned: success
type: string
sample: 80E7/]LZp1Tt
lun_naa_id:
description: The Network Address Authority (NAA) identifier for the LUN.
returned: success
type: string
sample: 600a0980383045372f5d4c5a70315474
lun_state:
description: Online or offline status of the LUN.
returned: success
type: string
sample: online
lun_size:
description: Size of the LUN in bytes.
returned: success
type: int
sample: 2199023255552
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapLUNMap(object):
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
initiator_group_name=dict(required=True, type='str'),
path=dict(required=True, type='str'),
vserver=dict(required=True, type='str'),
lun_id=dict(required=False, type='str', default=None),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['path'])
],
supports_check_mode=True
)
self.result = dict(
changed=False,
)
p = self.module.params
# set up state variables
self.state = p['state']
self.initiator_group_name = p['initiator_group_name']
self.path = p['path']
self.vserver = p['vserver']
self.lun_id = p['lun_id']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.vserver)
def get_lun_map(self):
"""
Return details about the LUN map
:return: Details about the lun map
:rtype: dict
"""
lun_info = netapp_utils.zapi.NaElement('lun-map-list-info')
lun_info.add_new_child('path', self.path)
result = self.server.invoke_successfully(lun_info, True)
return_value = None
igroups = result.get_child_by_name('initiator-groups')
if igroups:
for igroup_info in igroups.get_children():
initiator_group_name = igroup_info.get_child_content('initiator-group-name')
lun_id = igroup_info.get_child_content('lun-id')
if initiator_group_name == self.initiator_group_name:
return_value = {
'lun_id': lun_id
}
break
return return_value
def get_lun(self):
"""
Return details about the LUN
:return: Details about the lun
:rtype: dict
"""
# build the lun query
query_details = netapp_utils.zapi.NaElement('lun-info')
query_details.add_new_child('path', self.path)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
lun_query = netapp_utils.zapi.NaElement('lun-get-iter')
lun_query.add_child_elem(query)
# find lun using query
result = self.server.invoke_successfully(lun_query, True)
return_value = None
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
lun = result.get_child_by_name('attributes-list').get_child_by_name('lun-info')
# extract and assign lun infomation to return value
return_value = {
'lun_node': lun.get_child_content('node'),
'lun_ostype': lun.get_child_content('multiprotocol-type'),
'lun_serial': lun.get_child_content('serial-number'),
'lun_naa_id': '600a0980' + lun.get_child_content('serial-number').encode('hex'),
'lun_state': lun.get_child_content('state'),
'lun_size': lun.get_child_content('size'),
}
return return_value
def create_lun_map(self):
"""
Create LUN map
"""
options = {'path': self.path, 'initiator-group': self.initiator_group_name}
if self.lun_id is not None:
options['lun-id'] = self.lun_id
lun_map_create = netapp_utils.zapi.NaElement.create_node_with_children('lun-map', **options)
try:
self.server.invoke_successfully(lun_map_create, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error mapping lun %s of initiator_group_name %s: %s" %
(self.path, self.initiator_group_name, to_native(e)),
exception=traceback.format_exc())
def delete_lun_map(self):
"""
Unmap LUN map
"""
lun_map_delete = netapp_utils.zapi.NaElement.create_node_with_children('lun-unmap', **{'path': self.path, 'initiator-group': self.initiator_group_name})
try:
self.server.invoke_successfully(lun_map_delete, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error unmapping lun %s of initiator_group_name %s: %s" %
(self.path, self.initiator_group_name, to_native(e)),
exception=traceback.format_exc())
def apply(self):
netapp_utils.ems_log_event("na_ontap_lun_map", self.server)
lun_details = self.get_lun()
lun_map_details = self.get_lun_map()
if self.state == 'present' and lun_details:
self.result.update(lun_details)
if self.state == 'present' and not lun_map_details:
self.result['changed'] = True
if not self.module.check_mode:
self.create_lun_map()
elif self.state == 'absent' and lun_map_details:
self.result['changed'] = True
if not self.module.check_mode:
self.delete_lun_map()
self.module.exit_json(**self.result)
def main():
v = NetAppOntapLUNMap()
v.apply()
if __name__ == '__main__':
main()
|
Cubillosxy/MYO-PYTHON-BEBOP | refs/heads/master | core/remote_testing/test_allsettings.py | 5 | #!/usr/bin/python
"""
Test request of all drone settings.
status: TESTED, Charles/France 2015-04-20 ... integrated into Bebop initial config()
usage:
./test_allsettings.py <dummytask> [<metalog> [<F>]]
"""
import sys
import os
import inspect
BEBOP_ROOT = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if BEBOP_ROOT not in sys.path:
sys.path.insert(0, BEBOP_ROOT) # access to drone source without installation
from bebop import Bebop
from commands import requestAllSettingsCmd
from commands import requestAllStatesCmd
from apyros.metalog import MetaLog, disableAsserts
from apyros.manual import myKbhit, ManualControlException
def testAllSettings( drone ):
drone.update( cmd=requestAllSettingsCmd() )
for i in xrange(200):
sys.stderr.write('.')
drone.update( cmd=None )
def testAllStates( drone ):
drone.update( cmd=requestAllStatesCmd() )
for i in xrange(200):
sys.stderr.write('.')
drone.update( cmd=None )
if __name__ == "__main__":
if len(sys.argv) < 2:
print __doc__
sys.exit(2)
metalog=None
if len(sys.argv) > 2:
metalog = MetaLog( filename=sys.argv[2] )
if len(sys.argv) > 3 and sys.argv[3] == 'F':
disableAsserts()
drone = Bebop( metalog=metalog, onlyIFrames=True )
testAllSettings( drone )
testAllStates( drone )
print "Battery:", drone.battery # this time you should not see None any more
# vim: expandtab sw=4 ts=4
|
ecliptik/ansible-modules-core | refs/heads/devel | cloud/rackspace/rax_dns.py | 132 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_dns
short_description: Manage domains on Rackspace Cloud DNS
description:
- Manage domains on Rackspace Cloud DNS
version_added: 1.5
options:
comment:
description:
- Brief description of the domain. Maximum length of 160 characters
email:
desctiption:
- Email address of the domain administrator
name:
description:
- Domain name to create
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
ttl:
description:
- Time to live of domain in seconds
default: 3600
notes:
- "It is recommended that plays utilizing this module be run with
C(serial: 1) to avoid exceeding the API request limit imposed by
the Rackspace CloudDNS API"
author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Create domain
hosts: all
gather_facts: False
tasks:
- name: Domain create request
local_action:
module: rax_dns
credentials: ~/.raxpub
name: example.org
email: admin@example.org
register: rax_dns
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_dns(module, comment, email, name, state, ttl):
changed = False
dns = pyrax.cloud_dns
if not dns:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if state == 'present':
if not email:
module.fail_json(msg='An "email" attribute is required for '
'creating a domain')
try:
domain = dns.find(name=name)
except pyrax.exceptions.NoUniqueMatch, e:
module.fail_json(msg='%s' % e.message)
except pyrax.exceptions.NotFound:
try:
domain = dns.create(name=name, emailAddress=email, ttl=ttl,
comment=comment)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
update = {}
if comment != getattr(domain, 'comment', None):
update['comment'] = comment
if ttl != getattr(domain, 'ttl', None):
update['ttl'] = ttl
if email != getattr(domain, 'emailAddress', None):
update['emailAddress'] = email
if update:
try:
domain.update(**update)
changed = True
domain.get()
except Exception, e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
domain = dns.find(name=name)
except pyrax.exceptions.NotFound:
domain = {}
pass
except Exception, e:
module.fail_json(msg='%s' % e.message)
if domain:
try:
domain.delete()
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, domain=rax_to_dict(domain))
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
comment=dict(),
email=dict(),
name=dict(),
state=dict(default='present', choices=['present', 'absent']),
ttl=dict(type='int', default=3600),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
comment = module.params.get('comment')
email = module.params.get('email')
name = module.params.get('name')
state = module.params.get('state')
ttl = module.params.get('ttl')
setup_rax_module(module, pyrax, False)
rax_dns(module, comment, email, name, state, ttl)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()
|
shujaatak/UAV_MissionPlanner | refs/heads/master | Lib/lib2to3/fixes/fix_repr.py | 327 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that transforms `xyzzy` into repr(xyzzy)."""
# Local imports
from .. import fixer_base
from ..fixer_util import Call, Name, parenthesize
class FixRepr(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
atom < '`' expr=any '`' >
"""
def transform(self, node, results):
expr = results["expr"].clone()
if expr.type == self.syms.testlist1:
expr = parenthesize(expr)
return Call(Name(u"repr"), [expr], prefix=node.prefix)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.