gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
from six.moves import range
import json, subprocess, os
from semantic_version import Version
import frappe
from frappe.utils import cstr
import requests
from frappe import _
import git
def get_change_log(user=None):
if not user: user = frappe.session.user
last_known_versions = frappe._dict(json.loads(frappe.db.get_value("User",
user, "last_known_versions") or "{}"))
current_versions = get_versions()
if not last_known_versions:
update_last_known_versions()
return []
change_log = []
def set_in_change_log(app, opts, change_log):
from_version = last_known_versions.get(app, {}).get("version") or "0.0.1"
to_version = opts["version"]
if from_version != to_version:
app_change_log = get_change_log_for_app(app, from_version=from_version, to_version=to_version)
if app_change_log:
change_log.append({
"title": opts["title"],
"description": opts["description"],
"version": to_version,
"change_log": app_change_log
})
for app, opts in current_versions.items():
if app != "frappe":
set_in_change_log(app, opts, change_log)
if "frappe" in current_versions:
set_in_change_log("frappe", current_versions["frappe"], change_log)
return change_log
def get_change_log_for_app(app, from_version, to_version):
change_log_folder = os.path.join(frappe.get_app_path(app), "change_log")
if not os.path.exists(change_log_folder):
return
from_version = Version(from_version)
to_version = Version(to_version)
# remove pre-release part
to_version.prerelease = None
major_version_folders = ["v{0}".format(i) for i in range(from_version.major, to_version.major + 1)]
app_change_log = []
for folder in os.listdir(change_log_folder):
if folder in major_version_folders:
for file in os.listdir(os.path.join(change_log_folder, folder)):
version = Version(os.path.splitext(file)[0][1:].replace("_", "."))
if from_version < version <= to_version:
file_path = os.path.join(change_log_folder, folder, file)
content = frappe.read_file(file_path)
app_change_log.append([version, content])
app_change_log = sorted(app_change_log, key=lambda d: d[0], reverse=True)
# convert version to string and send
return [[cstr(d[0]), d[1]] for d in app_change_log]
@frappe.whitelist()
def update_last_known_versions():
frappe.db.set_value("User", frappe.session.user, "last_known_versions",
json.dumps(get_versions()), update_modified=False)
@frappe.whitelist()
def get_versions():
"""Get versions of all installed apps.
Example:
{
"frappe": {
"title": "Frappe Framework",
"version": "5.0.0"
}
}"""
versions = {}
for app in frappe.get_installed_apps(sort=True):
app_hooks = frappe.get_hooks(app_name=app)
versions[app] = {
"title": app_hooks.get("app_title")[0],
"description": app_hooks.get("app_description")[0],
"branch": get_app_branch(app)
}
if versions[app]['branch'] != 'master':
try:
app_repo = git.Repo(os.path.join('..', 'apps', '{}'.format(app)))
branch_version = '-'.join(app_repo.git.describe().split('-')[:2])
branch_version = [branch_version.strip('v')]
except:
branch_version = app_hooks.get('{0}_version'.format(versions[app]['branch']))
if branch_version:
versions[app]['branch_version'] = branch_version[0] + ' ({0})'.format(get_app_last_commit_ref(app))
try:
versions[app]["version"] = frappe.get_attr(app + ".__version__")
except AttributeError:
versions[app]["version"] = '0.0.1'
return versions
def get_app_branch(app):
'''Returns branch of an app'''
try:
return subprocess.check_output('cd ../apps/{0} && git rev-parse --abbrev-ref HEAD'.format(app),
shell=True).strip()
except Exception as e:
return ''
def get_app_last_commit_ref(app):
try:
return subprocess.check_output('cd ../apps/{0} && git rev-parse HEAD'.format(app),
shell=True).strip()[:7]
except Exception as e:
return ''
def check_for_update():
updates = frappe._dict(major=[], minor=[], patch=[])
apps = get_versions()
for app in apps:
app_details = check_release_on_github(app)
if not app_details: continue
github_version, org_name = app_details
# Get local instance's current version or the app
instance_version = Version(apps[app]['branch_version'].split(' ')[0])
# Compare and popup update message
for update_type in updates:
if github_version.__dict__[update_type] > instance_version.__dict__[update_type]:
updates[update_type].append(frappe._dict(
current_version = str(instance_version),
available_version = str(github_version),
org_name = org_name,
app_name = app,
title = apps[app]['title'],
))
break
if github_version.__dict__[update_type] < instance_version.__dict__[update_type]: break
add_message_to_redis(updates)
def check_release_on_github(app):
# Check if repo remote is on github
from subprocess import CalledProcessError
try:
remote_url = subprocess.check_output("cd ../apps/{} && git ls-remote --get-url".format(app), shell=True)
except CalledProcessError:
# Passing this since some apps may not have git initializaed in them
return None
if "github.com" not in remote_url:
return None
# Get latest version from github
if 'https' not in remote_url:
return None
org_name = remote_url.split('/')[3]
r = requests.get('https://api.github.com/repos/{}/{}/releases'.format(org_name, app))
if r.status_code == 200 and r.json():
# 0 => latest release
return Version(r.json()[0]['tag_name'].strip('v')), org_name
else:
# In case of an improper response or if there are no releases
return None
def add_message_to_redis(update_json):
# "update-message" will store the update message string
# "update-user-set" will be a set of users
cache = frappe.cache()
cache.set_value("update-info", json.dumps(update_json))
user_list = [x.name for x in frappe.get_all("User", filters={"enabled": True})]
system_managers = [user for user in user_list if 'System Manager' in frappe.get_roles(user)]
cache.sadd("update-user-set", *system_managers)
@frappe.whitelist()
def show_update_popup():
cache = frappe.cache()
user = frappe.session.user
update_info = cache.get_value("update-info")
if not update_info:
return
updates = json.loads(update_info)
current_versions = get_versions()
# Check if user is int the set of users to send update message to
update_message = ""
if cache.sismember("update-user-set", user):
for update_type in updates:
release_links = ""
for app in updates[update_type]:
app = frappe._dict(app)
release_links += "<a href='https://github.com/{org_name}/{app_name}/releases/tag/v{available_version}'><b>{title}</b>: v{available_version}</a><br>".format(
available_version = app.available_version,
org_name = app.org_name,
app_name = app.app_name,
title = app.title
)
if release_links:
update_message += _("New {} releases for the following apps are available".format(update_type)) + ":<br><br>{}<hr>".format(release_links)
if update_message:
frappe.msgprint(update_message, title=_("New updates are available"), indicator='green')
cache.srem("update-user-set", user)
| |
"""
Module for the management of upstart systems. The Upstart system only supports
service starting, stopping and restarting.
.. important::
If you feel that Salt should be using this module to manage services on a
minion, and it is using a different module (or gives an error similar to
*'service.start' is not available*), see :ref:`here
<module-provider-override>`.
Currently (as of Ubuntu 12.04) there is no tool available to disable
Upstart services (like update-rc.d). This[1] is the recommended way to
disable an Upstart service. So we assume that all Upstart services
that have not been disabled in this manner are enabled.
But this is broken because we do not check to see that the dependent
services are enabled. Otherwise we would have to do something like
parse the output of "initctl show-config" to determine if all service
dependencies are enabled to start on boot. For example, see the "start
on" condition for the lightdm service below[2]. And this would be too
hard. So we wait until the upstart developers have solved this
problem. :) This is to say that an Upstart service that is enabled may
not really be enabled.
Also, when an Upstart service is enabled, should the dependent
services be enabled too? Probably not. But there should be a notice
about this, at least.
[1] http://upstart.ubuntu.com/cookbook/#disabling-a-job-from-automatically-starting
[2] example upstart configuration file::
lightdm
emits login-session-start
emits desktop-session-start
emits desktop-shutdown
start on ((((filesystem and runlevel [!06]) and started dbus) and (drm-device-added card0 PRIMARY_DEVICE_FOR_DISPLAY=1 or stopped udev-fallback-graphics)) or runlevel PREVLEVEL=S)
stop on runlevel [016]
.. warning::
This module should not be used on Red Hat systems. For these,
the :mod:`rh_service <salt.modules.rh_service>` module should be
used, as it supports the hybrid upstart/sysvinit system used in
RHEL/CentOS 6.
"""
import fnmatch
import glob
import os
import re
import salt.modules.cmdmod
import salt.utils.files
import salt.utils.path
import salt.utils.systemd
__func_alias__ = {"reload_": "reload"}
# Define the module's virtual name
__virtualname__ = "service"
def __virtual__():
"""
Only work on Ubuntu
"""
# Disable on these platforms, specific service modules exist:
if salt.utils.systemd.booted(__context__):
return (
False,
"The upstart execution module failed to load: this system was booted with"
" systemd.",
)
elif __grains__["os"] in ("Ubuntu", "Linaro", "elementary OS", "Mint"):
return __virtualname__
elif __grains__["os"] in ("Debian", "Raspbian"):
debian_initctl = "/sbin/initctl"
if os.path.isfile(debian_initctl):
initctl_version = salt.modules.cmdmod._run_quiet(
debian_initctl + " version"
)
if "upstart" in initctl_version:
return __virtualname__
return (
False,
"The upstart execution module failed to load: "
" the system must be Ubuntu-based, or Debian-based with upstart support.",
)
def _find_utmp():
"""
Figure out which utmp file to use when determining runlevel.
Sometimes /var/run/utmp doesn't exist, /run/utmp is the new hotness.
"""
result = {}
# These are the likely locations for the file on Ubuntu
for utmp in "/var/run/utmp", "/run/utmp":
try:
result[os.stat(utmp).st_mtime] = utmp
except Exception: # pylint: disable=broad-except
pass
if result:
return result[sorted(result).pop()]
else:
return False
def _default_runlevel():
"""
Try to figure out the default runlevel. It is kept in
/etc/init/rc-sysinit.conf, but can be overridden with entries
in /etc/inittab, or via the kernel command-line at boot
"""
# Try to get the "main" default. If this fails, throw up our
# hands and just guess "2", because things are horribly broken
try:
with salt.utils.files.fopen("/etc/init/rc-sysinit.conf") as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith("env DEFAULT_RUNLEVEL"):
runlevel = line.split("=")[-1].strip()
except Exception: # pylint: disable=broad-except
return "2"
# Look for an optional "legacy" override in /etc/inittab
try:
with salt.utils.files.fopen("/etc/inittab") as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if not line.startswith("#") and "initdefault" in line:
runlevel = line.split(":")[1]
except Exception: # pylint: disable=broad-except
pass
# The default runlevel can also be set via the kernel command-line.
# Kinky.
try:
valid_strings = {"0", "1", "2", "3", "4", "5", "6", "s", "S", "-s", "single"}
with salt.utils.files.fopen("/proc/cmdline") as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
for arg in line.strip().split():
if arg in valid_strings:
runlevel = arg
break
except Exception: # pylint: disable=broad-except
pass
return runlevel
def _runlevel():
"""
Return the current runlevel
"""
if "upstart._runlevel" in __context__:
return __context__["upstart._runlevel"]
ret = _default_runlevel()
utmp = _find_utmp()
if utmp:
out = __salt__["cmd.run"](["runlevel", "{}".format(utmp)], python_shell=False)
try:
ret = out.split()[1]
except IndexError:
pass
__context__["upstart._runlevel"] = ret
return ret
def _is_symlink(name):
return os.path.abspath(name) != os.path.realpath(name)
def _service_is_upstart(name):
"""
From "Writing Jobs" at
http://upstart.ubuntu.com/getting-started.html:
Jobs are defined in files placed in /etc/init, the name of the job
is the filename under this directory without the .conf extension.
"""
return os.access("/etc/init/{}.conf".format(name), os.R_OK)
def _upstart_is_disabled(name):
"""
An Upstart service is assumed disabled if a manual stanza is
placed in /etc/init/[name].override.
NOTE: An Upstart service can also be disabled by placing "manual"
in /etc/init/[name].conf.
"""
files = ["/etc/init/{}.conf".format(name), "/etc/init/{}.override".format(name)]
for file_name in filter(os.path.isfile, files):
with salt.utils.files.fopen(file_name) as fp_:
if re.search(
r"^\s*manual",
salt.utils.stringutils.to_unicode(fp_.read()),
re.MULTILINE,
):
return True
return False
def _upstart_is_enabled(name):
"""
Assume that if an Upstart service is not disabled then it must be
enabled.
"""
return not _upstart_is_disabled(name)
def _service_is_sysv(name):
"""
A System-V style service will have a control script in
/etc/init.d. We make sure to skip over symbolic links that point
to Upstart's /lib/init/upstart-job, and anything that isn't an
executable, like README or skeleton.
"""
script = "/etc/init.d/{}".format(name)
return not _service_is_upstart(name) and os.access(script, os.X_OK)
def _sysv_is_disabled(name):
"""
A System-V style service is assumed disabled if there is no
start-up link (starts with "S") to its script in /etc/init.d in
the current runlevel.
"""
return not bool(glob.glob("/etc/rc{}.d/S*{}".format(_runlevel(), name)))
def _sysv_is_enabled(name):
"""
Assume that if a System-V style service is not disabled then it
must be enabled.
"""
return not _sysv_is_disabled(name)
def _iter_service_names():
"""
Detect all of the service names available to upstart via init configuration
files and via classic sysv init scripts
"""
found = set()
for line in glob.glob("/etc/init.d/*"):
name = os.path.basename(line)
found.add(name)
yield name
# This walk method supports nested services as per the init man page
# definition 'For example a configuration file /etc/init/rc-sysinit.conf
# is named rc-sysinit, while a configuration file /etc/init/net/apache.conf
# is named net/apache'
init_root = "/etc/init/"
for root, dirnames, filenames in salt.utils.path.os_walk(init_root):
relpath = os.path.relpath(root, init_root)
for filename in fnmatch.filter(filenames, "*.conf"):
if relpath == ".":
# service is defined in the root, no need to append prefix.
name = filename[:-5]
else:
# service is nested, append its relative path prefix.
name = os.path.join(relpath, filename[:-5])
if name in found:
continue
yield name
def get_enabled():
"""
Return the enabled services
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
"""
ret = set()
for name in _iter_service_names():
if _service_is_upstart(name):
if _upstart_is_enabled(name):
ret.add(name)
else:
if _service_is_sysv(name):
if _sysv_is_enabled(name):
ret.add(name)
return sorted(ret)
def get_disabled():
"""
Return the disabled services
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled
"""
ret = set()
for name in _iter_service_names():
if _service_is_upstart(name):
if _upstart_is_disabled(name):
ret.add(name)
else:
if _service_is_sysv(name):
if _sysv_is_disabled(name):
ret.add(name)
return sorted(ret)
def available(name):
"""
Returns ``True`` if the specified service is available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.available sshd
"""
return name in get_all()
def missing(name):
"""
The inverse of service.available.
Returns ``True`` if the specified service is not available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.missing sshd
"""
return name not in get_all()
def get_all():
"""
Return all installed services
CLI Example:
.. code-block:: bash
salt '*' service.get_all
"""
return sorted(get_enabled() + get_disabled())
def start(name):
"""
Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
"""
cmd = ["service", name, "start"]
return not __salt__["cmd.retcode"](cmd, python_shell=False)
def stop(name):
"""
Stop the specified service
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
"""
cmd = ["service", name, "stop"]
return not __salt__["cmd.retcode"](cmd, python_shell=False)
def restart(name):
"""
Restart the named service
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
"""
cmd = ["service", name, "restart"]
return not __salt__["cmd.retcode"](cmd, python_shell=False)
def full_restart(name):
"""
Do a full restart (stop/start) of the named service
CLI Example:
.. code-block:: bash
salt '*' service.full_restart <service name>
"""
cmd = ["service", name, "--full-restart"]
return not __salt__["cmd.retcode"](cmd, python_shell=False)
def reload_(name):
"""
Reload the named service
CLI Example:
.. code-block:: bash
salt '*' service.reload <service name>
"""
cmd = ["service", name, "reload"]
return not __salt__["cmd.retcode"](cmd, python_shell=False)
def force_reload(name):
"""
Force-reload the named service
CLI Example:
.. code-block:: bash
salt '*' service.force_reload <service name>
"""
cmd = ["service", name, "force-reload"]
return not __salt__["cmd.retcode"](cmd, python_shell=False)
def status(name, sig=None):
"""
Return the status for a service.
If the name contains globbing, a dict mapping service name to True/False
values is returned.
.. versionchanged:: 2018.3.0
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
sig (str): Signature to use to find the service via ps
Returns:
bool: True if running, False otherwise
dict: Maps service name to True if running, False otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name> [service signature]
"""
if sig:
return bool(__salt__["status.pid"](sig))
contains_globbing = bool(re.search(r"\*|\?|\[.+\]", name))
if contains_globbing:
services = fnmatch.filter(get_all(), name)
else:
services = [name]
results = {}
for service in services:
cmd = ["service", service, "status"]
if _service_is_upstart(service):
# decide result base on cmd output, thus ignore retcode,
# which makes cmd output not at error lvl even when cmd fail.
results[service] = "start/running" in __salt__["cmd.run"](
cmd, python_shell=False, ignore_retcode=True
)
else:
# decide result base on retcode, thus ignore output (set quite)
# because there is no way to avoid logging at error lvl when
# service is not running - retcode != 0 (which is totally relevant).
results[service] = not bool(
__salt__["cmd.retcode"](
cmd, python_shell=False, ignore_retcode=True, quite=True
)
)
if contains_globbing:
return results
return results[name]
def _get_service_exec():
"""
Debian uses update-rc.d to manage System-V style services.
http://www.debian.org/doc/debian-policy/ch-opersys.html#s9.3.3
"""
executable = "update-rc.d"
salt.utils.path.check_or_die(executable)
return executable
def _upstart_disable(name):
"""
Disable an Upstart service.
"""
if _upstart_is_disabled(name):
return _upstart_is_disabled(name)
override = "/etc/init/{}.override".format(name)
with salt.utils.files.fopen(override, "a") as ofile:
ofile.write(salt.utils.stringutils.to_str("manual\n"))
return _upstart_is_disabled(name)
def _upstart_enable(name):
"""
Enable an Upstart service.
"""
if _upstart_is_enabled(name):
return _upstart_is_enabled(name)
override = "/etc/init/{}.override".format(name)
files = ["/etc/init/{}.conf".format(name), override]
for file_name in filter(os.path.isfile, files):
with salt.utils.files.fopen(file_name, "r+") as fp_:
new_text = re.sub(
r"^\s*manual\n?",
"",
salt.utils.stringutils.to_unicode(fp_.read()),
0,
re.MULTILINE,
)
fp_.seek(0)
fp_.write(salt.utils.stringutils.to_str(new_text))
fp_.truncate()
if os.access(override, os.R_OK) and os.path.getsize(override) == 0:
os.unlink(override)
return _upstart_is_enabled(name)
def enable(name, **kwargs):
"""
Enable the named service to start at boot
CLI Example:
.. code-block:: bash
salt '*' service.enable <service name>
"""
if _service_is_upstart(name):
return _upstart_enable(name)
executable = _get_service_exec()
cmd = "{} -f {} defaults".format(executable, name)
return not __salt__["cmd.retcode"](cmd, python_shell=False)
def disable(name, **kwargs):
"""
Disable the named service from starting on boot
CLI Example:
.. code-block:: bash
salt '*' service.disable <service name>
"""
if _service_is_upstart(name):
return _upstart_disable(name)
executable = _get_service_exec()
cmd = [executable, "-f", name, "remove"]
return not __salt__["cmd.retcode"](cmd, python_shell=False)
def enabled(name, **kwargs):
"""
Check to see if the named service is enabled to start on boot
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name>
"""
if _service_is_upstart(name):
return _upstart_is_enabled(name)
else:
if _service_is_sysv(name):
return _sysv_is_enabled(name)
return None
def disabled(name):
"""
Check to see if the named service is disabled to start on boot
CLI Example:
.. code-block:: bash
salt '*' service.disabled <service name>
"""
if _service_is_upstart(name):
return _upstart_is_disabled(name)
else:
if _service_is_sysv(name):
return _sysv_is_disabled(name)
return None
| |
from StringIO import StringIO
from urlparse import urlparse, parse_qs
from urllib2 import URLError
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase as DjangoTestCase
from django.utils import simplejson
import mock
from social_auth.models import UserSocialAuth
from social_auth import version as VERSION
if VERSION[1] == 3:
DEFAULT_REDIRECT = getattr(settings, 'LOGIN_REDIRECT_URL', '')
LOGIN_ERROR_URL = getattr(settings, 'LOGIN_ERROR_URL', settings.LOGIN_URL)
NEW_USER_REDIRECT = DEFAULT_REDIRECT
BEGIN_URL_NAME = 'begin'
COMPLETE_URL_NAME = 'complete'
else:
DEFAULT_REDIRECT = getattr(settings, 'SOCIAL_AUTH_LOGIN_REDIRECT_URL', '') or getattr(settings, 'LOGIN_REDIRECT_URL', '')
LOGIN_ERROR_URL = getattr(settings, 'LOGIN_ERROR_URL', settings.LOGIN_URL)
NEW_USER_REDIRECT = getattr(settings, 'SOCIAL_AUTH_NEW_USER_REDIRECT_URL', '')
BEGIN_URL_NAME = 'socialauth_begin'
COMPLETE_URL_NAME = 'socialauth_complete'
def lastfm_user_response():
return {
"name": "RJ",
"realname": "Richard Jones",
"image": [
{"#text": "http://userserve-ak.last.fm/serve/34/8270359.jpg", "size":"small"},
{"#text": "http:/userserve-ak.last.fm/serve/64/8270359.jpg", "size":"medium"},
{"#text": "http:/userserve-ak.last.fm/serve/126/8270359.jpg", "size":"large"},
{"#text": "http://userserve-ak.last.fm/serve/252/8270359.jpg", "size":"extralarge"}
],
"url": "http://www.last.fm/user/RJ",
"id": "1000002",
"country": "UK",
"age": 29,
"gender": "m",
"subscriber": 1,
"playcount": 61798,
"playlists": 4,
"bootstrap": "0",
"registered": {"#text":"2002-11-20 11:50", "unixtime":"1037793040"}
}
class AuthStartTestCase(DjangoTestCase):
"""Test login via Lastfm."""
def setUp(self):
self.login_url = reverse(BEGIN_URL_NAME, kwargs={'backend': 'lastfm'})
def test_redirect_url(self):
"""Check redirect to Last.fm."""
response = self.client.get(self.login_url)
# Don't use assertRedirect because we don't want to fetch the url
self.assertTrue(response.status_code, 302)
url = response['Location']
scheme, netloc, path, params, query, fragment = urlparse(url)
self.assertEqual('%s://%s%s' % (scheme, netloc, path), 'https://www.last.fm/api/auth/')
query_data = parse_qs(query)
self.assertEqual(query_data['api_key'][0], settings.LASTFM_API_KEY)
def test_callback(self):
"""Check callback sent to Last.fm."""
response = self.client.get(self.login_url)
url = response['Location']
scheme, netloc, path, params, query, fragment = urlparse(url)
query_data = parse_qs(query)
callback = reverse(COMPLETE_URL_NAME, kwargs={'backend': 'lastfm'})
self.assertTrue(query_data['cb'][0].endswith(callback))
self.assertTrue(query_data['cb'][0].startswith('http:'))
def test_https_callback(self):
"""
Convert https callbacks to http due to Last.fm bug.
See http://www.last.fm/group/Last.fm+Web+Services/forum/21604/_/633280
"""
response = self.client.get(self.login_url, **{'wsgi.url_scheme': 'https'})
url = response['Location']
scheme, netloc, path, params, query, fragment = urlparse(url)
query_data = parse_qs(query)
callback = reverse(COMPLETE_URL_NAME, kwargs={'backend': 'lastfm'})
self.assertTrue(query_data['cb'][0].startswith('http:'))
class AuthCompleteTestCase(DjangoTestCase):
"""Complete login process from Last.fm."""
def setUp(self):
self.complete_url = reverse(COMPLETE_URL_NAME, kwargs={'backend': 'lastfm'})
self.access_token_patch = mock.patch('lastfm_auth.backend.LastfmAuth.access_token')
self.access_token_mock = self.access_token_patch.start()
self.access_token_mock.return_value = ('USERNAME', 'FAKETOKEN')
self.user_data_patch = mock.patch('lastfm_auth.backend.LastfmAuth.user_data')
self.user_data_mock = self.user_data_patch.start()
fake_data = lastfm_user_response()
self.user_data_mock.return_value = fake_data
def tearDown(self):
self.access_token_patch.stop()
self.user_data_patch.stop()
def test_new_user(self):
"""Login for the first time via Last.fm."""
data = {'token': 'FAKEKEY'}
response = self.client.get(self.complete_url, data)
self.assertRedirects(response, NEW_USER_REDIRECT)
def test_new_user_name(self):
"""Check the name set on the newly created user."""
data = {'token': 'FAKEKEY'}
self.client.get(self.complete_url, data)
new_user = User.objects.latest('id')
self.assertEqual(new_user.first_name, "Richard")
self.assertEqual(new_user.last_name, "Jones")
def test_single_name(self):
"""Process a user with a single word name."""
fake_data = lastfm_user_response()
fake_data['realname'] = "Cher"
self.user_data_mock.return_value = fake_data
data = {'token': 'FAKEKEY'}
self.client.get(self.complete_url, data)
new_user = User.objects.latest('id')
self.assertEqual(new_user.first_name, "Cher")
self.assertEqual(new_user.last_name, "")
def test_existing_user(self):
"""Login with an existing user via Last.fm."""
user = User.objects.create_user(username='test', password='test', email='')
social_user = UserSocialAuth.objects.create(
user=user, provider='lastfm', uid='1000002'
)
data = {'token': 'FAKEKEY'}
response = self.client.get(self.complete_url, data)
self.assertRedirects(response, DEFAULT_REDIRECT)
def test_failed_authentication(self):
"""Failed authentication. Bad data from Last.fm."""
self.user_data_mock.return_value = None
data = {'token': 'FAKEKEY'}
response = self.client.get(self.complete_url, data)
self.assertRedirects(response, LOGIN_ERROR_URL)
def test_no_token(self):
"""Failed auth due to no token."""
response = self.client.get(self.complete_url)
self.assertRedirects(response, LOGIN_ERROR_URL)
class ContribAuthTestCase(DjangoTestCase):
"""Validate contrib.auth calls."""
def test_has_get_user(self):
"""Authentication backend must define a get_user method."""
from lastfm_auth.backend import LastfmBackend
get_user = getattr(LastfmBackend, 'get_user', None)
self.assertTrue(get_user, "Auth backend must define get_user")
self.assertTrue(callable(get_user), "get_user should be a callable")
def test_get_existing_user(self):
"""Get existing user by id."""
from lastfm_auth.backend import LastfmBackend
user = User.objects.create_user(username='test', password='test', email='')
result = LastfmBackend().get_user(user.id)
self.assertEqual(result, user)
def test_get_non_existing_user(self):
"""User ids which don't exist should return none."""
from lastfm_auth.backend import LastfmBackend
result = LastfmBackend().get_user(100)
self.assertEqual(result, None)
def test_authenticate(self):
"""Authentication backend must define a authenticate method."""
from lastfm_auth.backend import LastfmBackend
authenticate = getattr(LastfmBackend, 'authenticate', None)
self.assertTrue(authenticate, "Auth backend must define authenticate")
self.assertTrue(callable(authenticate), "authenticate should be a callable")
def test_authenticate_existing_user(self):
"""Authenticate an existing user."""
from lastfm_auth.backend import LastfmBackend
user = User.objects.create_user(username='test', password='test', email='')
social_user = UserSocialAuth.objects.create(
user=user, provider='lastfm', uid='1000002'
)
response = lastfm_user_response()
result = LastfmBackend().authenticate(response=response, lastfm=True)
self.assertEqual(result, user)
if hasattr(result, 'is_new'):
self.assertFalse(result.is_new)
def test_authenticate_non_existing_user(self):
"""Authenticate a new user creating that user."""
from lastfm_auth.backend import LastfmBackend
response = lastfm_user_response()
result = LastfmBackend().authenticate(response=response, lastfm=True)
self.assertTrue(result)
if hasattr(result, 'is_new'):
self.assertTrue(result.is_new)
class LastfmAPITestCase(DjangoTestCase):
"""Validate calls to the Last.fm API."""
def test_access_token_url(self):
"""
Check url contruction for requesting access/session token.
See http://www.last.fm/api/show?service=125
"""
from lastfm_auth.backend import LastfmAuth
with mock.patch('lastfm_auth.backend.urlopen') as urlopen:
urlopen.return_value = StringIO('')
request = mock.MagicMock()
redirect = 'http://example.com'
access_token = LastfmAuth(request, redirect).access_token('REQUESTTOKEN')
args, kwargs = urlopen.call_args
url = args[0]
scheme, netloc, path, params, query, fragment = urlparse(url)
self.assertEqual('%s://%s%s' % (scheme, netloc, path), 'https://ws.audioscrobbler.com/2.0/')
query_data = parse_qs(query)
self.assertEqual(query_data['api_key'][0], settings.LASTFM_API_KEY)
self.assertEqual(query_data['token'][0], 'REQUESTTOKEN')
self.assertEqual(query_data['method'][0], 'auth.getSession')
self.assertEqual(query_data['format'][0], 'json')
def test_access_token_value(self):
"""
Check parsed access token value.
See http://www.last.fm/api/show?service=125
"""
from lastfm_auth.backend import LastfmAuth
with mock.patch('lastfm_auth.backend.urlopen') as urlopen:
return_data = {
'session': {
'name': 'MyLastFMUsername',
'key': 'd580d57f32848f5dcf574d1ce18d78b2',
'subscriber': 0,
}
}
urlopen.return_value = StringIO(simplejson.dumps(return_data))
request = mock.MagicMock()
redirect = 'http://example.com'
username, access_token = LastfmAuth(request, redirect).access_token('REQUESTTOKEN')
self.assertEqual(username, 'MyLastFMUsername')
self.assertEqual(access_token, 'd580d57f32848f5dcf574d1ce18d78b2')
def test_access_token_upstream_failure(self):
"""
Check handling upstream failures from Last.fm.
See http://www.last.fm/api/show?service=125
"""
from lastfm_auth.backend import LastfmAuth
with mock.patch('lastfm_auth.backend.urlopen') as urlopen:
urlopen.side_effect = URLError('Fake URL error')
request = mock.MagicMock()
redirect = 'http://example.com'
username, access_token = LastfmAuth(request, redirect).access_token('REQUESTTOKEN')
self.assertFalse(username)
self.assertFalse(access_token)
def test_access_token_bad_data(self):
"""
Handle bad data when requesting access/session token.
See http://www.last.fm/api/show?service=125
"""
from lastfm_auth.backend import LastfmAuth
with mock.patch('lastfm_auth.backend.urlopen') as urlopen:
urlopen.return_value = StringIO('')
request = mock.MagicMock()
redirect = 'http://example.com'
username, access_token = LastfmAuth(request, redirect).access_token('REQUESTTOKEN')
self.assertFalse(username)
self.assertFalse(access_token)
def test_user_data_url(self):
"""
Check url contruction for requesting user data.
See http://www.last.fm/api/show?service=344
"""
from lastfm_auth.backend import LastfmAuth
with mock.patch('lastfm_auth.backend.urlopen') as urlopen:
urlopen.return_value = StringIO('')
request = mock.MagicMock()
redirect = 'http://example.com'
user_data = LastfmAuth(request, redirect).user_data('UserName')
args, kwargs = urlopen.call_args
url = args[0]
scheme, netloc, path, params, query, fragment = urlparse(url)
self.assertEqual('%s://%s%s' % (scheme, netloc, path), 'https://ws.audioscrobbler.com/2.0/')
query_data = parse_qs(query)
self.assertEqual(query_data['api_key'][0], settings.LASTFM_API_KEY)
self.assertEqual(query_data['method'][0], 'user.getinfo')
self.assertEqual(query_data['format'][0], 'json')
def test_user_data_value(self):
"""
Check return value for requesting user data.
See http://www.last.fm/api/show?service=344
"""
from lastfm_auth.backend import LastfmAuth
with mock.patch('lastfm_auth.backend.urlopen') as urlopen:
return_data = {'user': lastfm_user_response()}
urlopen.return_value = StringIO(simplejson.dumps(return_data))
request = mock.MagicMock()
redirect = 'http://example.com'
user_data = LastfmAuth(request, redirect).user_data('UserName')
self.assertEqual(user_data, lastfm_user_response())
def test_user_data_upstream_failure(self):
"""
Handle upstream errors when requesting user data.
See http://www.last.fm/api/show?service=344
"""
from lastfm_auth.backend import LastfmAuth
with mock.patch('lastfm_auth.backend.urlopen') as urlopen:
urlopen.side_effect = URLError('Fake URL error')
request = mock.MagicMock()
redirect = 'http://example.com'
user_data = LastfmAuth(request, redirect).user_data('UserName')
self.assertEqual(user_data, None)
def test_user_data_bad_data(self):
"""
Bad return data when requesting user data.
See http://www.last.fm/api/show?service=344
"""
from lastfm_auth.backend import LastfmAuth
with mock.patch('lastfm_auth.backend.urlopen') as urlopen:
urlopen.return_value = StringIO('')
request = mock.MagicMock()
redirect = 'http://example.com'
user_data = LastfmAuth(request, redirect).user_data('UserName')
self.assertEqual(user_data, None)
| |
# -*- coding: utf-8 -*-
"""
pygments.lexers.data
~~~~~~~~~~~~~~~~~~~~
Lexers for data file format.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, LexerContext, \
include, bygroups, inherit
from pygments.token import Text, Comment, Keyword, Name, String, Number, \
Punctuation, Literal, Error
__all__ = ['YamlLexer', 'JsonLexer', 'JsonBareObjectLexer', 'JsonLdLexer']
class YamlLexerContext(LexerContext):
"""Indentation context for the YAML lexer."""
def __init__(self, *args, **kwds):
super(YamlLexerContext, self).__init__(*args, **kwds)
self.indent_stack = []
self.indent = -1
self.next_indent = 0
self.block_scalar_indent = None
class YamlLexer(ExtendedRegexLexer):
"""
Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization
language.
.. versionadded:: 0.11
"""
name = 'YAML'
aliases = ['yaml']
filenames = ['*.yaml', '*.yml']
mimetypes = ['text/x-yaml']
def something(token_class):
"""Do not produce empty tokens."""
def callback(lexer, match, context):
text = match.group()
if not text:
return
yield match.start(), token_class, text
context.pos = match.end()
return callback
def reset_indent(token_class):
"""Reset the indentation levels."""
def callback(lexer, match, context):
text = match.group()
context.indent_stack = []
context.indent = -1
context.next_indent = 0
context.block_scalar_indent = None
yield match.start(), token_class, text
context.pos = match.end()
return callback
def save_indent(token_class, start=False):
"""Save a possible indentation level."""
def callback(lexer, match, context):
text = match.group()
extra = ''
if start:
context.next_indent = len(text)
if context.next_indent < context.indent:
while context.next_indent < context.indent:
context.indent = context.indent_stack.pop()
if context.next_indent > context.indent:
extra = text[context.indent:]
text = text[:context.indent]
else:
context.next_indent += len(text)
if text:
yield match.start(), token_class, text
if extra:
yield match.start()+len(text), token_class.Error, extra
context.pos = match.end()
return callback
def set_indent(token_class, implicit=False):
"""Set the previously saved indentation level."""
def callback(lexer, match, context):
text = match.group()
if context.indent < context.next_indent:
context.indent_stack.append(context.indent)
context.indent = context.next_indent
if not implicit:
context.next_indent += len(text)
yield match.start(), token_class, text
context.pos = match.end()
return callback
def set_block_scalar_indent(token_class):
"""Set an explicit indentation level for a block scalar."""
def callback(lexer, match, context):
text = match.group()
context.block_scalar_indent = None
if not text:
return
increment = match.group(1)
if increment:
current_indent = max(context.indent, 0)
increment = int(increment)
context.block_scalar_indent = current_indent + increment
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_block_scalar_empty_line(indent_token_class, content_token_class):
"""Process an empty line in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if (context.block_scalar_indent is None or
len(text) <= context.block_scalar_indent):
if text:
yield match.start(), indent_token_class, text
else:
indentation = text[:context.block_scalar_indent]
content = text[context.block_scalar_indent:]
yield match.start(), indent_token_class, indentation
yield (match.start()+context.block_scalar_indent,
content_token_class, content)
context.pos = match.end()
return callback
def parse_block_scalar_indent(token_class):
"""Process indentation spaces in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if context.block_scalar_indent is None:
if len(text) <= max(context.indent, 0):
context.stack.pop()
context.stack.pop()
return
context.block_scalar_indent = len(text)
else:
if len(text) < context.block_scalar_indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_plain_scalar_indent(token_class):
"""Process indentation spaces in a plain scalar."""
def callback(lexer, match, context):
text = match.group()
if len(text) <= context.indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
tokens = {
# the root rules
'root': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# the '%YAML' directive
(r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'),
# the %TAG directive
(r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'),
# document start and document end indicators
(r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace),
'block-line'),
# indentation spaces
(r'[ ]*(?!\s|$)', save_indent(Text, start=True),
('block-line', 'indentation')),
],
# trailing whitespaces after directives or a block scalar indicator
'ignored-line': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# line break
(r'\n', Text, '#pop:2'),
],
# the %YAML directive
'yaml-directive': [
# the version number
(r'([ ]+)([0-9]+\.[0-9]+)',
bygroups(Text, Number), 'ignored-line'),
],
# the %YAG directive
'tag-directive': [
# a tag handle and the corresponding prefix
(r'([ ]+)(!|![\w-]*!)'
r'([ ]+)(!|!?[\w;/?:@&=+$,.!~*\'()\[\]%-]+)',
bygroups(Text, Keyword.Type, Text, Keyword.Type),
'ignored-line'),
],
# block scalar indicators and indentation spaces
'indentation': [
# trailing whitespaces are ignored
(r'[ ]*$', something(Text), '#pop:2'),
# whitespaces preceeding block collection indicators
(r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)),
# block collection indicators
(r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),
# the beginning a block line
(r'[ ]*', save_indent(Text), '#pop'),
],
# an indented line in the block context
'block-line': [
# the line end
(r'[ ]*(?=#|$)', something(Text), '#pop'),
# whitespaces separating tokens
(r'[ ]+', Text),
# tags, anchors and aliases,
include('descriptors'),
# block collections and scalars
include('block-nodes'),
# flow collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`-]|[?:-]\S)',
something(Name.Variable),
'plain-scalar-in-block-context'),
],
# tags, anchors, aliases
'descriptors': [
# a full-form tag
(r'!<[\w#;/?:@&=+$,.!~*\'()\[\]%-]+>', Keyword.Type),
# a tag in the form '!', '!suffix' or '!handle!suffix'
(r'!(?:[\w-]+!)?'
r'[\w#;/?:@&=+$,.!~*\'()\[\]%-]+', Keyword.Type),
# an anchor
(r'&[\w-]+', Name.Label),
# an alias
(r'\*[\w-]+', Name.Variable),
],
# block collections and scalars
'block-nodes': [
# implicit key
(r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)),
# literal and folded scalars
(r'[|>]', Punctuation.Indicator,
('block-scalar-content', 'block-scalar-header')),
],
# flow collections and quoted scalars
'flow-nodes': [
# a flow sequence
(r'\[', Punctuation.Indicator, 'flow-sequence'),
# a flow mapping
(r'\{', Punctuation.Indicator, 'flow-mapping'),
# a single-quoted scalar
(r'\'', String, 'single-quoted-scalar'),
# a double-quoted scalar
(r'\"', String, 'double-quoted-scalar'),
],
# the content of a flow collection
'flow-collection': [
# whitespaces
(r'[ ]+', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# simple indicators
(r'[?:,]', Punctuation.Indicator),
# tags, anchors and aliases
include('descriptors'),
# nested collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`])',
something(Name.Variable),
'plain-scalar-in-flow-context'),
],
# a flow sequence indicated by '[' and ']'
'flow-sequence': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\]', Punctuation.Indicator, '#pop'),
],
# a flow mapping indicated by '{' and '}'
'flow-mapping': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\}', Punctuation.Indicator, '#pop'),
],
# block scalar lines
'block-scalar-content': [
# line break
(r'\n', Text),
# empty line
(r'^[ ]+$',
parse_block_scalar_empty_line(Text, Name.Constant)),
# indentation spaces (we may leave the state here)
(r'^[ ]*', parse_block_scalar_indent(Text)),
# line content
(r'[\S\t ]+', Name.Constant),
],
# the content of a literal or folded scalar
'block-scalar-header': [
# indentation indicator followed by chomping flag
(r'([1-9])?[+-]?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
# chomping flag followed by indentation indicator
(r'[+-]?([1-9])?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
],
# ignored and regular whitespaces in quoted scalars
'quoted-scalar-whitespaces': [
# leading and trailing whitespaces are ignored
(r'^[ ]+', Text),
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
],
# single-quoted scalars
'single-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of the quote character
(r'\'\'', String.Escape),
# regular non-whitespace characters
(r'[^\s\']+', String),
# the closing quote
(r'\'', String, '#pop'),
],
# double-quoted scalars
'double-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of special characters
(r'\\[0abt\tn\nvfre "\\N_LP]', String),
# escape codes
(r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
String.Escape),
# regular non-whitespace characters
(r'[^\s"\\]+', String),
# the closing quote
(r'"', String, '#pop'),
],
# the beginning of a new line while scanning a plain scalar
'plain-scalar-in-block-context-new-line': [
# empty lines
(r'^[ ]+$', Text),
# line breaks
(r'\n+', Text),
# document start and document end indicators
(r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'),
# indentation spaces (we may leave the block line state here)
(r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'),
],
# a plain scalar in the block context
'plain-scalar-in-block-context': [
# the scalar ends with the ':' indicator
(r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'),
# the scalar ends with whitespaces followed by a comment
(r'[ ]+(?=#)', Text, '#pop'),
# trailing whitespaces are ignored
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text, 'plain-scalar-in-block-context-new-line'),
# other whitespaces are a part of the value
(r'[ ]+', Literal.Scalar.Plain),
# regular non-whitespace characters
(r'(?::(?!\s)|[^\s:])+', Literal.Scalar.Plain),
],
# a plain scalar is the flow context
'plain-scalar-in-flow-context': [
# the scalar ends with an indicator character
(r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'),
# the scalar ends with a comment
(r'[ ]+(?=#)', Text, '#pop'),
# leading and trailing whitespaces are ignored
(r'^[ ]+', Text),
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
# regular non-whitespace characters
(r'[^\s,:?\[\]{}]+', Name.Variable),
],
}
def get_tokens_unprocessed(self, text=None, context=None):
if context is None:
context = YamlLexerContext(text, 0)
return super(YamlLexer, self).get_tokens_unprocessed(text, context)
class JsonLexer(RegexLexer):
"""
For JSON data structures.
.. versionadded:: 1.5
"""
name = 'JSON'
aliases = ['json']
filenames = ['*.json']
mimetypes = ['application/json']
flags = re.DOTALL
# integer part of a number
int_part = r'-?(0|[1-9]\d*)'
# fractional part of a number
frac_part = r'\.\d+'
# exponential part of a number
exp_part = r'[eE](\+|-)?\d+'
tokens = {
'whitespace': [
(r'\s+', Text),
],
# represents a simple terminal value
'simplevalue': [
(r'(true|false|null)\b', Keyword.Constant),
(('%(int_part)s(%(frac_part)s%(exp_part)s|'
'%(exp_part)s|%(frac_part)s)') % vars(),
Number.Float),
(int_part, Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
],
# the right hand side of an object, after the attribute name
'objectattribute': [
include('value'),
(r':', Punctuation),
# comma terminates the attribute but expects more
(r',', Punctuation, '#pop'),
# a closing bracket terminates the entire object, so pop twice
(r'\}', Punctuation, '#pop:2'),
],
# a json object - { attr, attr, ... }
'objectvalue': [
include('whitespace'),
(r'"(\\\\|\\"|[^"])*"', Name.Tag, 'objectattribute'),
(r'\}', Punctuation, '#pop'),
],
# json array - [ value, value, ... }
'arrayvalue': [
include('whitespace'),
include('value'),
(r',', Punctuation),
(r'\]', Punctuation, '#pop'),
],
# a json value - either a simple value or a complex value (object or array)
'value': [
include('whitespace'),
include('simplevalue'),
(r'\{', Punctuation, 'objectvalue'),
(r'\[', Punctuation, 'arrayvalue'),
],
# the root of a json document whould be a value
'root': [
include('value'),
],
}
class JsonBareObjectLexer(JsonLexer):
"""
For JSON data structures (with missing object curly braces).
.. versionadded:: 2.2
"""
name = 'JSONBareObject'
aliases = ['json-object']
filenames = []
mimetypes = ['application/json-object']
tokens = {
'root': [
(r'\}', Error),
include('objectvalue'),
],
'objectattribute': [
(r'\}', Error),
inherit,
],
}
class JsonLdLexer(JsonLexer):
"""
For `JSON-LD <http://json-ld.org/>`_ linked data.
.. versionadded:: 2.0
"""
name = 'JSON-LD'
aliases = ['jsonld', 'json-ld']
filenames = ['*.jsonld']
mimetypes = ['application/ld+json']
tokens = {
'objectvalue': [
(r'"@(context|id|value|language|type|container|list|set|'
r'reverse|index|base|vocab|graph)"', Name.Decorator,
'objectattribute'),
inherit,
],
}
| |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import uuid
from oslo_config import cfg
from six.moves import http_client
import webob
from keystone.common import authorization
from keystone.common import tokenless_auth
from keystone.contrib.federation import constants as federation_constants
from keystone import exception
from keystone import middleware
from keystone.tests import unit
from keystone.tests.unit import mapping_fixtures
from keystone.tests.unit import test_backend_sql
CONF = cfg.CONF
def make_request(**kwargs):
accept = kwargs.pop('accept', None)
method = kwargs.pop('method', 'GET')
body = kwargs.pop('body', None)
req = webob.Request.blank('/', **kwargs)
req.method = method
if body is not None:
req.body = body
if accept is not None:
req.accept = accept
return req
def make_response(**kwargs):
body = kwargs.pop('body', None)
return webob.Response(body)
class TokenAuthMiddlewareTest(unit.TestCase):
def test_request(self):
req = make_request()
req.headers[middleware.AUTH_TOKEN_HEADER] = 'MAGIC'
middleware.TokenAuthMiddleware(None).process_request(req)
context = req.environ[middleware.CONTEXT_ENV]
self.assertEqual('MAGIC', context['token_id'])
class AdminTokenAuthMiddlewareTest(unit.TestCase):
def test_request_admin(self):
req = make_request()
req.headers[middleware.AUTH_TOKEN_HEADER] = CONF.admin_token
middleware.AdminTokenAuthMiddleware(None).process_request(req)
context = req.environ[middleware.CONTEXT_ENV]
self.assertTrue(context['is_admin'])
def test_request_non_admin(self):
req = make_request()
req.headers[middleware.AUTH_TOKEN_HEADER] = 'NOT-ADMIN'
middleware.AdminTokenAuthMiddleware(None).process_request(req)
context = req.environ[middleware.CONTEXT_ENV]
self.assertFalse(context['is_admin'])
class PostParamsMiddlewareTest(unit.TestCase):
def test_request_with_params(self):
req = make_request(body="arg1=one", method='POST')
middleware.PostParamsMiddleware(None).process_request(req)
params = req.environ[middleware.PARAMS_ENV]
self.assertEqual({"arg1": "one"}, params)
class JsonBodyMiddlewareTest(unit.TestCase):
def test_request_with_params(self):
req = make_request(body='{"arg1": "one", "arg2": ["a"]}',
content_type='application/json',
method='POST')
middleware.JsonBodyMiddleware(None).process_request(req)
params = req.environ[middleware.PARAMS_ENV]
self.assertEqual({"arg1": "one", "arg2": ["a"]}, params)
def test_malformed_json(self):
req = make_request(body='{"arg1": "on',
content_type='application/json',
method='POST')
resp = middleware.JsonBodyMiddleware(None).process_request(req)
self.assertEqual(http_client.BAD_REQUEST, resp.status_int)
def test_not_dict_body(self):
req = make_request(body='42',
content_type='application/json',
method='POST')
resp = middleware.JsonBodyMiddleware(None).process_request(req)
self.assertEqual(http_client.BAD_REQUEST, resp.status_int)
self.assertTrue('valid JSON object' in resp.json['error']['message'])
def test_no_content_type(self):
req = make_request(body='{"arg1": "one", "arg2": ["a"]}',
method='POST')
middleware.JsonBodyMiddleware(None).process_request(req)
params = req.environ[middleware.PARAMS_ENV]
self.assertEqual({"arg1": "one", "arg2": ["a"]}, params)
def test_unrecognized_content_type(self):
req = make_request(body='{"arg1": "one", "arg2": ["a"]}',
content_type='text/plain',
method='POST')
resp = middleware.JsonBodyMiddleware(None).process_request(req)
self.assertEqual(http_client.BAD_REQUEST, resp.status_int)
def test_unrecognized_content_type_without_body(self):
req = make_request(content_type='text/plain',
method='GET')
middleware.JsonBodyMiddleware(None).process_request(req)
params = req.environ.get(middleware.PARAMS_ENV, {})
self.assertEqual({}, params)
class AuthContextMiddlewareTest(test_backend_sql.SqlTests):
def setUp(self):
super(AuthContextMiddlewareTest, self).setUp()
self.client_issuer = uuid.uuid4().hex
self.untrusted_client_issuer = uuid.uuid4().hex
self.trusted_issuer = self.client_issuer
self.config_fixture.config(group='tokenless_auth',
trusted_issuer=[self.trusted_issuer])
# This idp_id is calculated based on
# sha256(self.client_issuer)
hashed_idp = hashlib.sha256(self.client_issuer)
self.idp_id = hashed_idp.hexdigest()
self._load_sample_data()
def _load_sample_data(self):
self.domain_id = uuid.uuid4().hex
self.domain_name = uuid.uuid4().hex
self.project_id = uuid.uuid4().hex
self.project_name = uuid.uuid4().hex
self.user_name = uuid.uuid4().hex
self.user_password = uuid.uuid4().hex
self.user_email = uuid.uuid4().hex
self.protocol_id = 'x509'
self.role_id = uuid.uuid4().hex
self.role_name = uuid.uuid4().hex
# for ephemeral user
self.group_name = uuid.uuid4().hex
# 1) Create a domain for the user.
self.domain = {
'description': uuid.uuid4().hex,
'enabled': True,
'id': self.domain_id,
'name': self.domain_name,
}
self.resource_api.create_domain(self.domain_id, self.domain)
# 2) Create a project for the user.
self.project = {
'description': uuid.uuid4().hex,
'domain_id': self.domain_id,
'enabled': True,
'id': self.project_id,
'name': self.project_name,
}
self.resource_api.create_project(self.project_id, self.project)
# 3) Create a user in new domain.
self.user = {
'name': self.user_name,
'domain_id': self.domain_id,
'project_id': self.project_id,
'password': self.user_password,
'email': self.user_email,
}
self.user = self.identity_api.create_user(self.user)
# Add IDP
self.idp = self._idp_ref(id=self.idp_id)
self.federation_api.create_idp(self.idp['id'],
self.idp)
# Add a role
self.role = {
'id': self.role_id,
'name': self.role_name,
}
self.role_api.create_role(self.role_id, self.role)
# Add a group
self.group = {
'name': self.group_name,
'domain_id': self.domain_id,
}
self.group = self.identity_api.create_group(self.group)
# Assign a role to the user on a project
self.assignment_api.add_role_to_user_and_project(
user_id=self.user['id'],
tenant_id=self.project_id,
role_id=self.role_id)
# Assign a role to the group on a project
self.assignment_api.create_grant(
role_id=self.role_id,
group_id=self.group['id'],
project_id=self.project_id)
def _load_mapping_rules(self, rules):
# Add a mapping
self.mapping = self._mapping_ref(rules=rules)
self.federation_api.create_mapping(self.mapping['id'],
self.mapping)
# Add protocols
self.proto_x509 = self._proto_ref(mapping_id=self.mapping['id'])
self.proto_x509['id'] = self.protocol_id
self.federation_api.create_protocol(self.idp['id'],
self.proto_x509['id'],
self.proto_x509)
def _idp_ref(self, id=None):
idp = {
'id': id or uuid.uuid4().hex,
'enabled': True,
'description': uuid.uuid4().hex
}
return idp
def _proto_ref(self, mapping_id=None):
proto = {
'id': uuid.uuid4().hex,
'mapping_id': mapping_id or uuid.uuid4().hex
}
return proto
def _mapping_ref(self, rules=None):
if rules is None:
mapped_rules = {}
else:
mapped_rules = rules.get('rules', {})
return {
'id': uuid.uuid4().hex,
'rules': mapped_rules
}
def _assert_tokenless_auth_context(self, context, ephemeral_user=False):
self.assertIsNotNone(context)
self.assertEqual(self.project_id, context['project_id'])
self.assertIn(self.role_name, context['roles'])
if ephemeral_user:
self.assertEqual(self.group['id'], context['group_ids'][0])
self.assertEqual('ephemeral',
context[federation_constants.PROTOCOL])
self.assertEqual(self.idp_id,
context[federation_constants.IDENTITY_PROVIDER])
else:
self.assertEqual(self.user['id'], context['user_id'])
def _create_context(self, request, mapping_ref=None,
exception_expected=False):
"""Builds the auth context from the given arguments.
auth context will be returned from the AuthContextMiddleware based on
what is being passed in the given request and what mapping is being
setup in the backend DB.
:param request: HTTP request
:param mapping_ref: A mapping in JSON structure will be setup in the
backend DB for mapping an user or a group.
:param exception_expected: Sets to True when an exception is expected
to raised based on the given arguments.
:returns: context an auth context contains user and role information
:rtype: dict
"""
if mapping_ref:
self._load_mapping_rules(mapping_ref)
if not exception_expected:
(middleware.AuthContextMiddleware('Tokenless_auth_test').
process_request(request))
context = request.environ.get(authorization.AUTH_CONTEXT_ENV)
else:
context = middleware.AuthContextMiddleware('Tokenless_auth_test')
return context
def test_context_already_exists(self):
req = make_request()
token_id = uuid.uuid4().hex
req.environ[authorization.AUTH_CONTEXT_ENV] = {'token_id': token_id}
context = self._create_context(request=req)
self.assertEqual(token_id, context['token_id'])
def test_not_applicable_to_token_request(self):
env = {}
env['PATH_INFO'] = '/auth/tokens'
env['REQUEST_METHOD'] = 'POST'
req = make_request(environ=env)
context = self._create_context(request=req)
self.assertIsNone(context)
def test_no_tokenless_attributes_request(self):
req = make_request()
context = self._create_context(request=req)
self.assertIsNone(context)
def test_no_issuer_attribute_request(self):
env = {}
env['HTTP_X_PROJECT_ID'] = uuid.uuid4().hex
req = make_request(environ=env)
context = self._create_context(request=req)
self.assertIsNone(context)
def test_has_only_issuer_and_project_name_request(self):
env = {}
# SSL_CLIENT_I_DN is the attribute name that wsgi env
# references to issuer of the client certificate.
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = uuid.uuid4().hex
req = make_request(environ=env)
context = self._create_context(request=req,
exception_expected=True)
self.assertRaises(exception.ValidationError,
context.process_request,
req)
def test_has_only_issuer_and_project_domain_name_request(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_DOMAIN_NAME'] = uuid.uuid4().hex
req = make_request(environ=env)
context = self._create_context(request=req,
exception_expected=True)
self.assertRaises(exception.ValidationError,
context.process_request,
req)
def test_has_only_issuer_and_project_domain_id_request(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_DOMAIN_ID'] = uuid.uuid4().hex
req = make_request(environ=env)
context = self._create_context(request=req,
exception_expected=True)
self.assertRaises(exception.ValidationError,
context.process_request,
req)
def test_missing_both_domain_and_project_request(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
req = make_request(environ=env)
context = self._create_context(request=req,
exception_expected=True)
self.assertRaises(exception.ValidationError,
context.process_request,
req)
def test_empty_trusted_issuer_list(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_ID'] = uuid.uuid4().hex
req = make_request(environ=env)
self.config_fixture.config(group='tokenless_auth',
trusted_issuer=[])
context = self._create_context(request=req)
self.assertIsNone(context)
def test_client_issuer_not_trusted(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.untrusted_client_issuer
env['HTTP_X_PROJECT_ID'] = uuid.uuid4().hex
req = make_request(environ=env)
context = self._create_context(request=req)
self.assertIsNone(context)
def test_proj_scope_with_proj_id_and_proj_dom_id_success(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_ID'] = self.project_id
env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id
# SSL_CLIENT_USER_NAME and SSL_CLIENT_DOMAIN_NAME are the types
# defined in the mapping that will map to the user name and
# domain name
env['SSL_CLIENT_USER_NAME'] = self.user_name
env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name
req = make_request(environ=env)
context = self._create_context(
request=req,
mapping_ref=mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME)
self._assert_tokenless_auth_context(context)
def test_proj_scope_with_proj_id_only_success(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_ID'] = self.project_id
env['SSL_CLIENT_USER_NAME'] = self.user_name
env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name
req = make_request(environ=env)
context = self._create_context(
request=req,
mapping_ref=mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME)
self._assert_tokenless_auth_context(context)
def test_proj_scope_with_proj_name_and_proj_dom_id_success(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_name
env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id
env['SSL_CLIENT_USER_NAME'] = self.user_name
env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name
req = make_request(environ=env)
context = self._create_context(
request=req,
mapping_ref=mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME)
self._assert_tokenless_auth_context(context)
def test_proj_scope_with_proj_name_and_proj_dom_name_success(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_name
env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name
env['SSL_CLIENT_USER_NAME'] = self.user_name
env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name
req = make_request(environ=env)
context = self._create_context(
request=req,
mapping_ref=mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME)
self._assert_tokenless_auth_context(context)
def test_proj_scope_with_proj_name_only_fail(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_id
env['SSL_CLIENT_USER_NAME'] = self.user_name
env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name
req = make_request(environ=env)
context = self._create_context(
request=req,
mapping_ref=mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME,
exception_expected=True)
self.assertRaises(exception.ValidationError,
context.process_request,
req)
def test_mapping_with_userid_and_domainid_success(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_name
env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name
env['SSL_CLIENT_USER_ID'] = self.user['id']
env['SSL_CLIENT_DOMAIN_ID'] = self.domain_id
req = make_request(environ=env)
context = self._create_context(
request=req,
mapping_ref=mapping_fixtures.MAPPING_WITH_USERID_AND_DOMAINID)
self._assert_tokenless_auth_context(context)
def test_mapping_with_userid_and_domainname_success(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_name
env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name
env['SSL_CLIENT_USER_ID'] = self.user['id']
env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name
req = make_request(environ=env)
context = self._create_context(
request=req,
mapping_ref=mapping_fixtures.MAPPING_WITH_USERID_AND_DOMAINNAME)
self._assert_tokenless_auth_context(context)
def test_mapping_with_username_and_domainid_success(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_name
env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name
env['SSL_CLIENT_USER_NAME'] = self.user_name
env['SSL_CLIENT_DOMAIN_ID'] = self.domain_id
req = make_request(environ=env)
context = self._create_context(
request=req,
mapping_ref=mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINID)
self._assert_tokenless_auth_context(context)
def test_only_domain_name_fail(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_ID'] = self.project_id
env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id
env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name
req = make_request(environ=env)
context = self._create_context(
request=req,
mapping_ref=mapping_fixtures.MAPPING_WITH_DOMAINNAME_ONLY,
exception_expected=True)
self.assertRaises(exception.ValidationError,
context.process_request,
req)
def test_only_domain_id_fail(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_ID'] = self.project_id
env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id
env['SSL_CLIENT_DOMAIN_ID'] = self.domain_id
req = make_request(environ=env)
context = self._create_context(
request=req,
mapping_ref=mapping_fixtures.MAPPING_WITH_DOMAINID_ONLY,
exception_expected=True)
self.assertRaises(exception.ValidationError,
context.process_request,
req)
def test_missing_domain_data_fail(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_ID'] = self.project_id
env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id
env['SSL_CLIENT_USER_NAME'] = self.user_name
req = make_request(environ=env)
context = self._create_context(
request=req,
mapping_ref=mapping_fixtures.MAPPING_WITH_USERNAME_ONLY,
exception_expected=True)
self.assertRaises(exception.ValidationError,
context.process_request,
req)
def test_userid_success(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_ID'] = self.project_id
env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id
env['SSL_CLIENT_USER_ID'] = self.user['id']
req = make_request(environ=env)
context = self._create_context(
request=req,
mapping_ref=mapping_fixtures.MAPPING_WITH_USERID_ONLY)
self._assert_tokenless_auth_context(context)
def test_domain_disable_fail(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_name
env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name
env['SSL_CLIENT_USER_NAME'] = self.user_name
env['SSL_CLIENT_DOMAIN_ID'] = self.domain_id
req = make_request(environ=env)
self.domain['enabled'] = False
self.domain = self.resource_api.update_domain(
self.domain['id'], self.domain)
context = self._create_context(
request=req,
mapping_ref=mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINID,
exception_expected=True)
self.assertRaises(exception.Unauthorized,
context.process_request,
req)
def test_user_disable_fail(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_name
env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name
env['SSL_CLIENT_USER_NAME'] = self.user_name
env['SSL_CLIENT_DOMAIN_ID'] = self.domain_id
req = make_request(environ=env)
self.user['enabled'] = False
self.user = self.identity_api.update_user(self.user['id'], self.user)
context = self._create_context(
request=req,
mapping_ref=mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINID,
exception_expected=True)
self.assertRaises(AssertionError,
context.process_request,
req)
def test_invalid_user_fail(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_ID'] = self.project_id
env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id
env['SSL_CLIENT_USER_NAME'] = uuid.uuid4().hex
env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name
req = make_request(environ=env)
context = self._create_context(
request=req,
mapping_ref=mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME,
exception_expected=True)
self.assertRaises(exception.UserNotFound,
context.process_request,
req)
def test_ephemeral_success(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_name
env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name
env['SSL_CLIENT_USER_NAME'] = self.user_name
req = make_request(environ=env)
self.config_fixture.config(group='tokenless_auth',
protocol='ephemeral')
self.protocol_id = 'ephemeral'
mapping = mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER.copy()
mapping['rules'][0]['local'][0]['group']['id'] = self.group['id']
context = self._create_context(
request=req,
mapping_ref=mapping)
self._assert_tokenless_auth_context(context, ephemeral_user=True)
def test_ephemeral_with_default_user_type_success(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_name
env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name
env['SSL_CLIENT_USER_NAME'] = self.user_name
req = make_request(environ=env)
self.config_fixture.config(group='tokenless_auth',
protocol='ephemeral')
self.protocol_id = 'ephemeral'
# this mapping does not have the user type defined
# and it should defaults to 'ephemeral' which is
# the expected type for the test case.
mapping = mapping_fixtures.MAPPING_FOR_DEFAULT_EPHEMERAL_USER.copy()
mapping['rules'][0]['local'][0]['group']['id'] = self.group['id']
context = self._create_context(
request=req,
mapping_ref=mapping)
self._assert_tokenless_auth_context(context, ephemeral_user=True)
def test_ephemeral_any_user_success(self):
"""Ephemeral user does not need a specified user
Keystone is not looking to match the user, but a corresponding group.
"""
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_name
env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name
env['SSL_CLIENT_USER_NAME'] = uuid.uuid4().hex
req = make_request(environ=env)
self.config_fixture.config(group='tokenless_auth',
protocol='ephemeral')
self.protocol_id = 'ephemeral'
mapping = mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER.copy()
mapping['rules'][0]['local'][0]['group']['id'] = self.group['id']
context = self._create_context(
request=req,
mapping_ref=mapping)
self._assert_tokenless_auth_context(context, ephemeral_user=True)
def test_ephemeral_invalid_scope_fail(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = uuid.uuid4().hex
env['HTTP_X_PROJECT_DOMAIN_NAME'] = uuid.uuid4().hex
env['SSL_CLIENT_USER_NAME'] = self.user_name
req = make_request(environ=env)
self.config_fixture.config(group='tokenless_auth',
protocol='ephemeral')
self.protocol_id = 'ephemeral'
mapping = mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER.copy()
mapping['rules'][0]['local'][0]['group']['id'] = self.group['id']
context = self._create_context(
request=req,
mapping_ref=mapping,
exception_expected=True)
self.assertRaises(exception.Unauthorized,
context.process_request,
req)
def test_ephemeral_no_group_found_fail(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_name
env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name
env['SSL_CLIENT_USER_NAME'] = self.user_name
req = make_request(environ=env)
self.config_fixture.config(group='tokenless_auth',
protocol='ephemeral')
self.protocol_id = 'ephemeral'
mapping = mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER.copy()
mapping['rules'][0]['local'][0]['group']['id'] = uuid.uuid4().hex
context = self._create_context(
request=req,
mapping_ref=mapping,
exception_expected=True)
self.assertRaises(exception.MappedGroupNotFound,
context.process_request,
req)
def test_ephemeral_incorrect_mapping_fail(self):
"""Ephemeral user picks up the non-ephemeral user mapping.
Looking up the mapping with protocol Id 'x509' will load up
the non-ephemeral user mapping, results unauthenticated.
"""
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
env['HTTP_X_PROJECT_NAME'] = self.project_name
env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name
env['SSL_CLIENT_USER_NAME'] = self.user_name
req = make_request(environ=env)
# This will pick up the incorrect mapping
self.config_fixture.config(group='tokenless_auth',
protocol='x509')
self.protocol_id = 'x509'
mapping = mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER.copy()
mapping['rules'][0]['local'][0]['group']['id'] = uuid.uuid4().hex
context = self._create_context(
request=req,
mapping_ref=mapping,
exception_expected=True)
self.assertRaises(exception.MappedGroupNotFound,
context.process_request,
req)
def test_create_idp_id_success(self):
env = {}
env['SSL_CLIENT_I_DN'] = self.client_issuer
auth = tokenless_auth.TokenlessAuthHelper(env)
idp_id = auth._build_idp_id()
self.assertEqual(self.idp_id, idp_id)
def test_create_idp_id_attri_not_found_fail(self):
env = {}
env[uuid.uuid4().hex] = self.client_issuer
auth = tokenless_auth.TokenlessAuthHelper(env)
expected_msg = ('Could not determine Identity Provider ID. The '
'configuration option %s was not found in the '
'request environment.' %
CONF.tokenless_auth.issuer_attribute)
# Check the content of the exception message as well
self.assertRaisesRegexp(exception.TokenlessAuthConfigError,
expected_msg,
auth._build_idp_id)
| |
from mapbox_vector_tile.Mapbox.vector_tile_pb2 import tile
from collections import namedtuple
class StringTableOptimiser(object):
"""
Optimises the order of keys and values in the MVT layer string table.
Counts the number of times an entry in the MVT string table (both keys
and values) is used. Then reorders the string table to have the most
commonly used entries first and updates the features to use the
replacement locations in the table. This can save several percent in a
tile with large numbers of features.
"""
def __init__(self):
self.key_counts = {}
self.val_counts = {}
def add_tags(self, feature_tags):
itr = iter(feature_tags)
for k, v in zip(itr, itr):
self.key_counts[k] = self.key_counts.get(k, 0) + 1
self.val_counts[v] = self.val_counts.get(v, 0) + 1
def _update_table(self, counts, table):
# sort string table by usage, so most commonly-used values are
# assigned the smallest indices. since indices are encoded as
# varints, this should make best use of the space.
sort = list(sorted(
((c, k) for k, c in counts.iteritems()),
reverse=True))
# construct the re-ordered string table
new_table = []
for _, x in sort:
new_table.append(table[x])
assert len(new_table) == len(table)
# delete table in place and replace with the new table
del table[:]
table.extend(new_table)
# construct a lookup table from the old to the new indices.
new_indexes = {}
for i, (c, k) in enumerate(sort):
new_indexes[k] = i
return new_indexes
def update_string_table(self, layer):
new_key = self._update_table(self.key_counts, layer.keys)
new_val = self._update_table(self.val_counts, layer.values)
for feature in layer.features:
for i in xrange(0, len(feature.tags), 2):
feature.tags[i] = new_key[feature.tags[i]]
feature.tags[i+1] = new_val[feature.tags[i+1]]
# return the signed integer corresponding to the "zig-zag" encoded unsigned
# input integer. this encoding is used for MVT geometry deltas.
def unzigzag(n):
return (n >> 1) ^ (-(n & 1))
# return the "zig-zag" encoded unsigned integer corresponding to the signed
# input integer. this encoding is used for MVT geometry deltas.
def zigzag(n):
return (n << 1) ^ (n >> 31)
# we assume that every linestring consists of a single MoveTo command followed
# by some number of LineTo commands, and we encode this as a Line object.
#
# normally, MVT linestrings are encoded relative to the preceding linestring
# (if any) in the geometry. however that's awkward for reodering, so we
# construct an absolute MoveTo for each Line. we also derive a corresponding
# EndsAt location, which isn't used in the encoding, but simplifies analysis.
MoveTo = namedtuple('MoveTo', 'x y')
EndsAt = namedtuple('EndsAt', 'x y')
Line = namedtuple('Line', 'moveto endsat cmds')
def _decode_lines(geom):
"""
Decode a linear MVT geometry into a list of Lines.
Each individual linestring in the MVT is extracted to a separate entry in
the list of lines.
"""
lines = []
current_line = []
current_moveto = None
# to keep track of the position. we'll adapt the move-to commands to all
# be relative to 0,0 at the beginning of each linestring.
x = 0
y = 0
end = len(geom)
i = 0
while i < end:
header = geom[i]
cmd = header & 7
run_length = header // 8
if cmd == 1: # move to
# flush previous line.
if current_moveto:
lines.append(Line(current_moveto, EndsAt(x, y), current_line))
current_line = []
assert run_length == 1
x += unzigzag(geom[i+1])
y += unzigzag(geom[i+2])
i += 3
current_moveto = MoveTo(x, y)
elif cmd == 2: # line to
assert current_moveto
# we just copy this run, since it's encoding isn't going to change
next_i = i + 1 + run_length * 2
current_line.extend(geom[i:next_i])
# but we still need to decode it to figure out where each move-to
# command is in absolute space.
for j in xrange(0, run_length):
dx = unzigzag(geom[i + 1 + 2 * j])
dy = unzigzag(geom[i + 2 + 2 * j])
x += dx
y += dy
i = next_i
else:
raise ValueError('Unhandled command: %d' % cmd)
if current_line:
assert current_moveto
lines.append(Line(current_moveto, EndsAt(x, y), current_line))
return lines
def _reorder_lines(lines):
"""
Reorder lines so that the distance from the end of one to the beginning of
the next is minimised.
"""
x = 0
y = 0
new_lines = []
# treat the list of lines as a stack, off which we keep popping the best
# one to add next.
while lines:
# looping over all the lines like this isn't terribly efficient, but
# in local tests seems to handle a few thousand lines without a
# problem.
min_dist = None
min_i = None
for i, line in enumerate(lines):
moveto, _, _ = line
dist = abs(moveto.x - x) + abs(moveto.y - y)
if min_dist is None or dist < min_dist:
min_dist = dist
min_i = i
assert min_i is not None
line = lines.pop(min_i)
_, endsat, _ = line
x = endsat.x
y = endsat.y
new_lines.append(line)
return new_lines
def _rewrite_geometry(geom, new_lines):
"""
Re-encode a list of Lines with absolute MoveTos as a continuous stream of
MVT geometry commands, each relative to the last. Replace geom with that
stream.
"""
new_geom = []
x = 0
y = 0
for line in new_lines:
moveto, endsat, lineto_cmds = line
dx = moveto.x - x
dy = moveto.y - y
x = endsat.x
y = endsat.y
new_geom.append(9) # move to, run_length = 1
new_geom.append(zigzag(dx))
new_geom.append(zigzag(dy))
new_geom.extend(lineto_cmds)
# write the lines back out to geom
del geom[:]
geom.extend(new_geom)
def optimise_multilinestring(geom):
# split the geometry into multiple lists, each starting with a move-to
# command and consisting otherwise of line-to commands. (perhaps with
# a close at the end? is that allowed for linestrings?)
lines = _decode_lines(geom)
# can't reorder anything unless it has multiple lines.
if len(lines) > 1:
lines = _reorder_lines(lines)
_rewrite_geometry(geom, lines)
def optimise_tile(tile_bytes):
"""
Decode a sequence of bytes as an MVT tile and reorder the string table of
its layers and the order of its multilinestrings to save a few bytes.
"""
t = tile()
t.ParseFromString(tile_bytes)
for layer in t.layers:
sto = StringTableOptimiser()
for feature in layer.features:
# (multi)linestrings only
if feature.type == 2:
optimise_multilinestring(feature.geometry)
sto.add_tags(feature.tags)
sto.update_string_table(layer)
return t.SerializeToString()
if __name__ == '__main__':
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('input_file', help='Input MVT file',
type=argparse.FileType('r'))
parser.add_argument('--output-file', help='Output file, default is stdout',
type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
output_bytes = optimise_tile(args.input_file.read())
args.output_file.write(output_bytes)
| |
# -*- coding: utf-8 -*-
'''
IPS pkg support for Solaris
.. important::
If you feel that Salt should be using this module to manage packages on a
minion, and it is using a different module (or gives an error similar to
*'pkg.install' is not available*), see :ref:`here
<module-provider-override>`.
This module provides support for Solaris 11 new package management - IPS (Image Packaging System).
This is the default pkg module for Solaris 11 (and later).
If you want to use also other packaging module (e.g. pkgutil) together with IPS, you need to override the ``pkg`` provider
in sls for each package:
.. code-block:: yaml
mypackage:
pkg.installed:
- provider: pkgutil
Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this:
.. code-block:: yaml
providers:
pkg: pkgutil
Or you can override it globally by setting the :conf_minion:`providers` parameter in your Minion config file like this:
.. code-block:: yaml
providers:
pkg: pkgutil
'''
# Import python libs
from __future__ import print_function
from __future__ import absolute_import
import copy
import logging
# Import salt libs
import salt.utils
from salt.exceptions import CommandExecutionError
# Define the module's virtual name
__virtualname__ = 'pkg'
log = logging.getLogger(__name__)
def __virtual__():
'''
Set the virtual pkg module if the os is Solaris 11
'''
if __grains__['os'] == 'Solaris' \
and float(__grains__['kernelrelease']) > 5.10 \
and salt.utils.which('pkg'):
return __virtualname__
return (False,
'The solarisips execution module failed to load: only available '
'on Solaris >= 11.')
ips_pkg_return_values = {
0: 'Command succeeded.',
1: 'An error occurred.',
2: 'Invalid command line options were specified.',
3: 'Multiple operations were requested, but only some of them succeeded.',
4: 'No changes were made - nothing to do.',
5: 'The requested operation cannot be performed on a live image.',
6: 'The requested operation cannot be completed because the licenses for '
'the packages being installed or updated have not been accepted.',
7: 'The image is currently in use by another process and cannot be '
'modified.'
}
def _ips_get_pkgname(line):
'''
Extracts package name from "pkg list -v" output.
Input: one line of the command output
Output: pkg name (e.g.: "pkg://solaris/x11/library/toolkit/libxt")
Example use:
line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--"
name = _ips_get_pkgname(line)
'''
return line.split()[0].split('@')[0].strip()
def _ips_get_pkgversion(line):
'''
Extracts package version from "pkg list -v" output.
Input: one line of the command output
Output: package version (e.g.: "1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z")
Example use:
line = "pkg://solaris/x11/library/toolkit/libxt@1.1.3,5.11-0.175.1.0.0.24.1317:20120904T180030Z i--"
name = _ips_get_pkgversion(line)
'''
return line.split()[0].split('@')[1].strip()
def refresh_db(full=False):
'''
Updates the remote repos database.
full : False
Set to ``True`` to force a refresh of the pkg DB from all publishers,
regardless of the last refresh time.
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db
salt '*' pkg.refresh_db full=True
'''
if full:
return __salt__['cmd.retcode']('/bin/pkg refresh --full') == 0
else:
return __salt__['cmd.retcode']('/bin/pkg refresh') == 0
def upgrade_available(name):
'''
Check if there is an upgrade available for a certain package
Accepts full or partial FMRI. Returns all matches found.
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade_available apache-22
'''
version = None
cmd = 'pkg list -Huv {0}'.format(name)
lines = __salt__['cmd.run_stdout'](cmd).splitlines()
if not lines:
return {}
ret = {}
for line in lines:
ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line)
return ret
def list_upgrades(refresh=False, **kwargs): # pylint: disable=W0613
'''
Lists all packages available for update.
When run in global zone, it reports only upgradable packages for the global
zone.
When run in non-global zone, it can report more upgradable packages than
``pkg update -vn``, because ``pkg update`` hides packages that require
newer version of ``pkg://solaris/entire`` (which means that they can be
upgraded only from the global zone). If ``pkg://solaris/entire`` is found
in the list of upgrades, then the global zone should be updated to get all
possible updates. Use ``refresh=True`` to refresh the package database.
refresh : False
Set to ``True`` to force a full pkg DB refresh before listing
CLI Example:
.. code-block:: bash
salt '*' pkg.list_upgrades
salt '*' pkg.list_upgrades refresh=True
'''
if salt.utils.is_true(refresh):
refresh_db(full=True)
upgrades = {}
# awk is in core-os package so we can use it without checking
lines = __salt__['cmd.run_stdout']("/bin/pkg list -Huv").splitlines()
for line in lines:
upgrades[_ips_get_pkgname(line)] = _ips_get_pkgversion(line)
return upgrades
def upgrade(refresh=False, **kwargs):
'''
Upgrade all packages to the latest possible version.
When run in global zone, it updates also all non-global zones.
In non-global zones upgrade is limited by dependency constrains linked to
the version of pkg://solaris/entire.
Returns also the raw output of the ``pkg update`` command (because if
update creates a new boot environment, no immediate changes are visible in
``pkg list``).
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
'''
ret = {'changes': {},
'result': True,
'comment': '',
}
if salt.utils.is_true(refresh):
refresh_db()
# Get a list of the packages before install so we can diff after to see
# what got installed.
old = list_pkgs()
# Install or upgrade the package
# If package is already installed
cmd = ['pkg', 'update', '-v', '--accept']
out = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.compare_dicts(old, new)
if out['retcode'] != 0:
raise CommandExecutionError(
'Error occurred updating package(s)',
info={
'changes': ret,
'retcode': ips_pkg_return_values[out['retcode']],
'errors': [out['stderr']]
}
)
return ret
def list_pkgs(versions_as_list=False, **kwargs):
'''
List the currently installed packages as a dict::
{'<package_name>': '<version>'}
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
'''
# not yet implemented or not applicable
if any([salt.utils.is_true(kwargs.get(x))
for x in ('removed', 'purge_desired')]):
return {}
if 'pkg.list_pkgs' in __context__:
if versions_as_list:
return __context__['pkg.list_pkgs']
else:
ret = copy.deepcopy(__context__['pkg.list_pkgs'])
__salt__['pkg_resource.stringify'](ret)
return ret
ret = {}
cmd = '/bin/pkg list -Hv'
lines = __salt__['cmd.run_stdout'](cmd).splitlines()
# column 1 is full FMRI name in form pkg://publisher/class/name@version
for line in lines:
name = _ips_get_pkgname(line)
version = _ips_get_pkgversion(line)
__salt__['pkg_resource.add_pkg'](ret, name, version)
__salt__['pkg_resource.sort_pkglist'](ret)
__context__['pkg.list_pkgs'] = copy.deepcopy(ret)
if not versions_as_list:
__salt__['pkg_resource.stringify'](ret)
return ret
def version(*names, **kwargs):
'''
Common interface for obtaining the version of installed packages.
Accepts full or partial FMRI. If called using pkg_resource, full FMRI is required.
CLI Example:
.. code-block:: bash
salt '*' pkg.version vim
salt '*' pkg.version foo bar baz
salt '*' pkg_resource.version pkg://solaris/entire
'''
namelist = ''
for pkgname in names:
namelist += '{0} '.format(pkgname)
cmd = '/bin/pkg list -Hv {0}'.format(namelist)
lines = __salt__['cmd.run_stdout'](cmd).splitlines()
ret = {}
for line in lines:
ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line)
if ret:
return ret
return ''
def latest_version(name, **kwargs):
'''
The available version of the package in the repository.
In case of multiple matches, it returns list of all matched packages.
Accepts full or partial FMRI.
Please use pkg.latest_version as pkg.available_version is being deprecated.
CLI Example:
.. code-block:: bash
salt '*' pkg.latest_version pkg://solaris/entire
'''
cmd = '/bin/pkg list -Hnv {0}'.format(name)
lines = __salt__['cmd.run_stdout'](cmd).splitlines()
ret = {}
for line in lines:
ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line)
if ret:
return ret
return ''
# available_version is being deprecated
available_version = salt.utils.alias_function(latest_version, 'available_version')
def get_fmri(name, **kwargs):
'''
Returns FMRI from partial name. Returns empty string ('') if not found.
In case of multiple match, the function returns list of all matched packages.
CLI Example:
.. code-block:: bash
salt '*' pkg.get_fmri bash
'''
if name.startswith('pkg://'):
# already full fmri
return name
cmd = '/bin/pkg list -aHv {0}'.format(name)
# there can be more packages matching the name
lines = __salt__['cmd.run_stdout'](cmd).splitlines()
if not lines:
# empty string = package not found
return ''
ret = []
for line in lines:
ret.append(_ips_get_pkgname(line))
return ret
def normalize_name(name, **kwargs):
'''
Internal function. Normalizes pkg name to full FMRI before running
pkg.install. In case of multiple matches or no match, it returns the name
without modifications.
CLI Example:
.. code-block:: bash
salt '*' pkg.normalize_name vim
'''
if name.startswith('pkg://'):
# already full fmri
return name
cmd = '/bin/pkg list -aHv {0}'.format(name)
# there can be more packages matching the name
lines = __salt__['cmd.run_stdout'](cmd).splitlines()
# if we get more lines, it's multiple match (name not unique)
# if we get zero lines, pkg is not installed
# in both ways it's safer to return original (unmodified) name and let "pkg install" to deal with it
if len(lines) != 1:
return name
# return pkg name
return _ips_get_pkgname(lines[0])
def is_installed(name, **kwargs):
'''
Returns True if the package is installed. Otherwise returns False.
Name can be full or partial FMRI.
In case of multiple match from partial FMRI name, it returns True.
CLI Example:
.. code-block:: bash
salt '*' pkg.is_installed bash
'''
cmd = '/bin/pkg list -Hv {0}'.format(name)
return __salt__['cmd.retcode'](cmd) == 0
def search(name, versions_as_list=False, **kwargs):
'''
Searches the repository for given pkg name.
The name can be full or partial FMRI. All matches are printed. Globs are
also supported.
CLI Example:
.. code-block:: bash
salt '*' pkg.search bash
'''
ret = {}
cmd = '/bin/pkg list -aHv {0}'.format(name)
out = __salt__['cmd.run_all'](cmd)
if out['retcode'] != 0:
# error = nothing found
return {}
# no error, processing pkg listing
# column 1 is full FMRI name in form pkg://publisher/pkg/name@version
for line in out['stdout'].splitlines():
name = _ips_get_pkgname(line)
version = _ips_get_pkgversion(line)
__salt__['pkg_resource.add_pkg'](ret, name, version)
if not versions_as_list:
__salt__['pkg_resource.stringify'](ret)
return ret
def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs):
'''
Install the named package using the IPS pkg command.
Accepts full or partial FMRI.
Returns a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
Multiple Package Installation Options:
pkgs
A list of packages to install. Must be passed as a python list.
CLI Example:
.. code-block:: bash
salt '*' pkg.install vim
salt '*' pkg.install pkg://solaris/editor/vim
salt '*' pkg.install pkg://solaris/editor/vim refresh=True
salt '*' pkg.install pkgs='["foo", "bar"]'
'''
if not pkgs:
if is_installed(name):
return 'Package already installed.'
if refresh:
refresh_db(full=True)
pkg2inst = ''
if pkgs: # multiple packages specified
for pkg in pkgs:
if list(pkg.items())[0][1]: # version specified
pkg2inst += '{0}@{1} '.format(list(pkg.items())[0][0],
list(pkg.items())[0][1])
else:
pkg2inst += '{0} '.format(list(pkg.items())[0][0])
log.debug(
'Installing these packages instead of {0}: {1}'.format(
name, pkg2inst
)
)
else: # install single package
if version:
pkg2inst = "{0}@{1}".format(name, version)
else:
pkg2inst = "{0}".format(name)
cmd = 'pkg install -v --accept '
if test:
cmd += '-n '
# Get a list of the packages before install so we can diff after to see
# what got installed.
old = list_pkgs()
# Install or upgrade the package
# If package is already installed
cmd += '{0}'.format(pkg2inst)
out = __salt__['cmd.run_all'](cmd, output_loglevel='trace')
# Get a list of the packages again, including newly installed ones.
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.compare_dicts(old, new)
if out['retcode'] != 0:
raise CommandExecutionError(
'Error occurred installing package(s)',
info={
'changes': ret,
'retcode': ips_pkg_return_values[out['retcode']],
'errors': [out['stderr']]
}
)
# No error occurred
if test:
return 'Test succeeded.'
return ret
def remove(name=None, pkgs=None, **kwargs):
'''
Remove specified package. Accepts full or partial FMRI.
In case of multiple match, the command fails and won't modify the OS.
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Returns a list containing the removed packages.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove tcsh
salt '*' pkg.remove pkg://solaris/shell/tcsh
salt '*' pkg.remove pkgs='["foo", "bar"]'
'''
pkg2rm = ''
if pkgs: # multiple packages specified
for pkg in pkgs:
pkg2rm += '{0} '.format(pkg)
log.debug(
'Installing these packages instead of {0}: {1}'.format(
name, pkg2rm
)
)
else: # remove single package
pkg2rm = '{0}'.format(name)
# Get a list of the currently installed pkgs.
old = list_pkgs()
# Remove the package(s)
cmd = '/bin/pkg uninstall -v {0}'.format(pkg2rm)
out = __salt__['cmd.run_all'](cmd, output_loglevel='trace')
# Get a list of the packages after the uninstall
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.compare_dicts(old, new)
if out['retcode'] != 0:
raise CommandExecutionError(
'Error occurred removing package(s)',
info={
'changes': ret,
'retcode': ips_pkg_return_values[out['retcode']],
'errors': [out['stderr']]
}
)
return ret
def purge(name, **kwargs):
'''
Remove specified package. Accepts full or partial FMRI.
Returns a list containing the removed packages.
CLI Example:
.. code-block:: bash
salt '*' pkg.purge <package name>
'''
return remove(name, **kwargs)
| |
# -*- coding: utf-8 -*-
"""
Code for interfacing with the Exoplanet Archive catalogs.
"""
from __future__ import division, print_function
import os
import logging
from pkg_resources import resource_filename
import pandas as pd
from six.moves import urllib
from .settings import PEERLESS_DATA_DIR
__all__ = [
"KOICatalog", "KICatalog", "EBCatalog", "BlacklistCatalog",
"TargetCatalog", "DatasetsCatalog", "CumulativeCatalog", "UeharaCatalog",
"WangCatalog",
]
def download():
for c in (KOICatalog, KICatalog):
print("Downloading {0}...".format(c.cls.__name__))
c().fetch(clobber=True)
class Catalog(object):
url = None
name = None
ext = ".h5"
def __init__(self, data_root=None):
self.data_root = PEERLESS_DATA_DIR if data_root is None else data_root
self._df = None
self._spatial = None
@property
def filename(self):
if self.name is None:
raise NotImplementedError("subclasses must provide a name")
return os.path.join(self.data_root, "catalogs", self.name + self.ext)
def fetch(self, clobber=False):
# Check for a local file first.
fn = self.filename
if os.path.exists(fn) and not clobber:
logging.info("Found local file: '{0}'".format(fn))
return
# Fetch the remote file.
if self.url is None:
raise NotImplementedError("subclasses must provide a URL")
url = self.url
logging.info("Downloading file from: '{0}'".format(url))
r = urllib.request.Request(url)
handler = urllib.request.urlopen(r)
code = handler.getcode()
if int(code) != 200:
raise CatalogDownloadError(code, url, "")
# Make sure that the root directory exists.
try:
os.makedirs(os.path.split(fn)[0])
except os.error:
pass
self._save_fetched_file(handler)
def _save_fetched_file(self, file_handle):
raise NotImplementedError("subclasses must implement this method")
@property
def df(self):
if self._df is None:
if not os.path.exists(self.filename):
self.fetch()
self._df = pd.read_hdf(self.filename, self.name)
return self._df
class ExoplanetArchiveCatalog(Catalog):
@property
def url(self):
if self.name is None:
raise NotImplementedError("subclasses must provide a name")
return ("http://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/"
"nph-nstedAPI?table={0}&select=*").format(self.name)
def _save_fetched_file(self, file_handle):
df = pd.read_csv(file_handle)
df.to_hdf(self.filename, self.name, format="t")
class KOICatalog(ExoplanetArchiveCatalog):
name = "q1_q17_dr24_koi"
def join_stars(self, df=None):
if df is None:
df = self.df
kic = KICatalog(data_root=self.data_root)
return pd.merge(df, kic.df, on="kepid")
class KICatalog(ExoplanetArchiveCatalog):
name = "q1_q17_dr24_stellar"
class CumulativeCatalog(ExoplanetArchiveCatalog):
name = "cumulative"
class CatalogDownloadError(Exception):
"""
Exception raised when an catalog download request fails.
:param code:
The HTTP status code that caused the failure.
:param url:
The endpoint (with parameters) of the request.
:param txt:
A human readable description of the error.
"""
def __init__(self, code, url, txt):
super(CatalogDownloadError, self).__init__(
"The download returned code {0} for URL: '{1}' with message:\n{2}"
.format(code, url, txt))
self.code = code
self.txt = txt
self.url = url
class LocalCatalog(object):
filename = None
args = dict()
def __init__(self):
self._df = None
@property
def df(self):
if self._df is None:
fn = os.path.join("data", self.filename)
self._df = pd.read_csv(resource_filename(__name__, fn),
**(self.args))
return self._df
class EBCatalog(LocalCatalog):
filename = "ebs.csv"
args = dict(skiprows=7)
class LongPeriodEBCatalog(LocalCatalog):
filename = "lpebs.csv"
args = dict(delim_whitespace=True,
names=["kicid", "period", "width", "t0", "ra", "dec"])
class BlacklistCatalog(LocalCatalog):
filename = "blacklist.csv"
class UeharaCatalog(LocalCatalog):
filename = "uehara.csv"
args = dict(delim_whitespace=True)
class WangCatalog(LocalCatalog):
filename = "wang.csv"
class TargetCatalog(LocalCatalog):
filename = "targets.csv"
@property
def df(self):
if self._df is None:
fn = os.path.join(PEERLESS_DATA_DIR, "catalogs", self.filename)
try:
self._df = pd.read_csv(fn, **(self.args))
except OSError:
print("The target catalog doesn't exist. "
"You need to run 'peerless-targets'")
raise
return self._df
class DatasetsCatalog(LocalCatalog):
filename = "datasets.h5"
@property
def df(self):
if self._df is None:
fn = os.path.join(PEERLESS_DATA_DIR, "catalogs", self.filename)
try:
self._df = pd.read_hdf(fn, "datasets", **(self.args))
except OSError:
print("The datasets catalog doesn't exist. "
"You need to run 'peerless-datasets'")
raise
return self._df
class singleton(object):
def __init__(self, cls):
self.cls = cls
self.inst = None
def __call__(self, *args, **kwargs):
if self.inst is None:
self.inst = self.cls(*args, **kwargs)
return self.inst
# Set all the catalogs to be singletons so that the data are shared across
# instances.
KOICatalog = singleton(KOICatalog)
KICatalog = singleton(KICatalog)
EBCatalog = singleton(EBCatalog)
BlacklistCatalog = singleton(BlacklistCatalog)
TargetCatalog = singleton(TargetCatalog)
DatasetsCatalog = singleton(DatasetsCatalog)
| |
# Copyright (c) 2009-2010, Cloud Matrix Pty. Ltd.
# All rights reserved; available under the terms of the BSD License.
"""
Esky - keep frozen apps fresh
==============================
Esky is an auto-update framework for frozen Python applications. It provides
a simple API through which apps can find, fetch and install updates, and a
bootstrapping mechanism that keeps the app safe in the face of failed or
partial updates.
Esky is currently capable of freezing apps with py2exe, py2app and cxfreeze.
Adding support for other freezer programs should be straightforward;
patches will be gratefully accepted.
See https://github.com/cloudmatrix/esky/ for more information:
"""
from __future__ import with_statement
from __future__ import absolute_import
__ver_major__ = 0
__ver_minor__ = 9
__ver_patch__ = 9
__ver_sub__ = "dev"
__ver_tuple__ = (__ver_major__, __ver_minor__, __ver_patch__, __ver_sub__)
__version__ = "%d.%d.%d%s" % __ver_tuple__
import sys
import errno
if sys.platform != "win32":
import fcntl
from esky.errors import *
from esky.sudo import SudoProxy, has_root, allow_from_sudo
from esky.util import (split_app_version, join_app_version,
is_version_dir, is_uninstalled_version_dir,
parse_version, get_best_version, appdir_from_executable,
copy_ownership_info, lock_version_dir, ESKY_CONTROL_DIR,
files_differ, lazy_import, ESKY_APPDATA_DIR,
get_all_versions, is_locked_version_dir,
is_installed_version_dir, really_rmtree, really_rename)
# Since all frozen apps are required to import this module and call the
# run_startup_hooks() function, we use a simple lazy import mechanism to
# make the initial import of this module as fast as possible.
@lazy_import
def os():
import os
return os
@lazy_import
def socket():
import socket
return socket
@lazy_import
def time():
import time
return time
@lazy_import
def subprocess():
import subprocess
return subprocess
@lazy_import
def atexit():
import atexit
return atexit
@lazy_import
def base64():
import base64
return base64
@lazy_import
def pickle():
try:
import cPickle as pickle
except ImportError:
import pickle
return pickle
@lazy_import
def threading():
try:
import threading
except ImportError:
threading = None
return threading
@lazy_import
def esky():
import esky
import esky.finder
import esky.fstransact
if sys.platform == "win32":
import esky.winres
return esky
class Esky(object):
"""Class representing an updatable frozen app.
Instances of this class point to a directory containing a frozen app in
the esky format. Through such an instance the app can be updated to a
new version in-place. Typical use of this class might be:
if hasattr(sys,"frozen"):
app = esky.Esky(sys.executable,"http://example.com/downloads/")
app.auto_update()
The first argument must be either the top-level application directory,
or the path of an executable from that application. The second argument
is a VersionFinder object that will be used to search for updates. If
a string it passed, it is assumed to be a URL and is passed to a new
DefaultVersionFinder instance.
"""
lock_timeout = 60*60 # 1 hour timeout on appdir locks
def __init__(self, appdir_or_exe, version_finder=None):
self._init_from_appdir(appdir_or_exe)
self._lock_count = 0
self.sudo_proxy = None
self.keep_sudo_proxy_alive = False
self._old_sudo_proxies = []
self.version_finder = version_finder
self.reinitialize()
def _init_from_appdir(self, appdir_or_exe):
"""Extension point to override the initial logic of Esky initialisation.
This method is expected to interrogate the given appdir and set up the
basic properties of the esky (e.g. name, platform) in response. It is
split into its own method to make it easier to override in subclasses.
"""
if os.path.isfile(appdir_or_exe):
self.appdir = appdir_from_executable(appdir_or_exe)
vsdir = self._get_versions_dir()
vdir = appdir_or_exe[len(vsdir):].split(os.sep)[1]
details = split_app_version(vdir)
self.name, self.active_version, self.platform = details
else:
self.active_version = None
self.appdir = appdir_or_exe
self.appdir = os.path.abspath(self.appdir)
def _get_version_finder(self):
return self.__version_finder
def _set_version_finder(self, version_finder):
if version_finder is not None:
if isinstance(version_finder, basestring):
kwds = {"download_url": version_finder}
version_finder = esky.finder.DefaultVersionFinder(**kwds)
self.__version_finder = version_finder
version_finder = property(_get_version_finder, _set_version_finder)
def _get_update_dir(self):
"""Get the directory path in which self.version_finder can work."""
return os.path.join(self._get_versions_dir(), "updates")
def _get_versions_dir(self):
"""Get the directory path containing individual version dirs."""
if not ESKY_APPDATA_DIR:
return self.appdir
# TODO: remove compatability hooks for ESKY_APPDATA_DIR=""
try:
for nm in os.listdir(os.path.join(self.appdir, ESKY_APPDATA_DIR)):
fullnm = os.path.join(self.appdir, ESKY_APPDATA_DIR, nm)
if is_version_dir(fullnm) and is_installed_version_dir(fullnm):
return os.path.join(self.appdir, ESKY_APPDATA_DIR)
except EnvironmentError:
pass
return self.appdir
def get_abspath(self, relpath):
"""Get the absolute path of a file within the current version."""
if self.active_version:
v = join_app_version(self.name, self.active_version, self.platform)
else:
v = join_app_version(self.name, self.version, self.platform)
# TODO: remove compatability hooks for ESKY_APPDATA_DIR=""
if os.path.exists(os.path.join(self._get_versions_dir(), v)):
return os.path.join(self._get_versions_dir(), v, relpath)
return os.path.join(self.appdir, v, relpath)
def reinitialize(self):
"""Reinitialize internal state by poking around in the app directory.
If the app directory is found to be in an inconsistent state, a
EskyBrokenError will be raised. This should never happen unless
another process has been messing with the files.
"""
best_version = get_best_version(self._get_versions_dir())
if best_version is None:
raise EskyBrokenError("no frozen versions found")
details = split_app_version(best_version)
self.name, self.version, self.platform = details
@allow_from_sudo()
def lock(self, num_retries=0):
"""Lock the application directory for exclusive write access.
If the appdir is already locked by another process/thread then
EskyLockedError is raised. There is no way to perform a blocking
lock on an appdir.
Locking is achieved by creating a "locked" directory and writing the
current process/thread ID into it. os.mkdir is atomic on all platforms
that we care about.
This also has the side-effect of failing early if the user does not
have permission to modify the application directory.
"""
if self.sudo_proxy is not None:
return self.sudo_proxy.lock()
if num_retries > 5:
raise EskyLockedError
if threading:
curthread = threading.currentThread()
try:
threadid = curthread.ident
except AttributeError:
threadid = curthread.getName()
else:
threadid = "0"
myid = "%s-%s-%s" % (socket.gethostname(), os.getpid(), threadid)
lockdir = os.path.join(self.appdir, "locked")
# Do I already own the lock?
if os.path.exists(os.path.join(lockdir, myid)):
# Update file mtime to keep it safe from breakers
os.utime(os.path.join(lockdir, myid), None)
self._lock_count += 1
return True
# Try to make the "locked" directory.
try:
os.mkdir(lockdir)
except OSError, e:
if e.errno != errno.EEXIST:
raise
# Is it stale? If so, break it and try again.
try:
newest_mtime = os.path.getmtime(lockdir)
for nm in os.listdir(lockdir):
mtime = os.path.getmtime(os.path.join(lockdir, nm))
if mtime > newest_mtime:
newest_mtime = mtime
if newest_mtime + self.lock_timeout < time.time():
really_rmtree(lockdir)
return self.lock(num_retries+1)
else:
raise EskyLockedError
except OSError, e:
if e.errno not in (errno.ENOENT, errno.ENOTDIR,):
raise
return self.lock(num_retries+1)
else:
# Success! Record my ownership
open(os.path.join(lockdir, myid), "wb").close()
self._lock_count = 1
return True
@allow_from_sudo()
def unlock(self):
"""Unlock the application directory for exclusive write access."""
if self.sudo_proxy is not None:
return self.sudo_proxy.unlock()
self._lock_count -= 1
if self._lock_count == 0:
if threading:
curthread = threading.currentThread()
try:
threadid = curthread.ident
except AttributeError:
threadid = curthread.getName()
else:
threadid = "0"
myid = "%s-%s-%s" % (socket.gethostname(), os.getpid(), threadid)
lockdir = os.path.join(self.appdir, "locked")
os.unlink(os.path.join(lockdir, myid))
os.rmdir(lockdir)
@allow_from_sudo()
def has_root(self):
"""Check whether the user currently has root/administrator access."""
if self.sudo_proxy is not None:
return self.sudo_proxy.has_root()
return has_root()
def get_root(self):
"""Attempt to gain root/administrator access by spawning helper app."""
if self.has_root():
return True
self.sudo_proxy = SudoProxy(self)
self.sudo_proxy.start()
if not self.sudo_proxy.has_root():
raise OSError(None, "could not escalate to root privileges")
def drop_root(self):
"""Drop root privileges by killing the helper app."""
if self.sudo_proxy is not None:
self.sudo_proxy.close()
if self.keep_sudo_proxy_alive:
self._old_sudo_proxies.append(self.sudo_proxy)
else:
self.sudo_proxy.terminate()
self.sudo_proxy = None
@allow_from_sudo()
def cleanup(self):
"""Perform cleanup tasks in the app directory.
This includes removing older versions of the app and completing any
failed update attempts. Such maintenance is not done automatically
since it can take a non-negligible amount of time.
If the cleanup proceeds sucessfully this method will return True; it
there is work that cannot currently be completed, it returns False.
"""
if self.sudo_proxy is not None:
return self.sudo_proxy.cleanup()
if not self.needs_cleanup():
return True
self.lock()
try:
# This is a little coroutine trampoline that executes each
# action yielded from self._cleanup_actions(). Any exceptions
# that the action raises are thrown back into the generator.
# The result of each is and-ed into the success code.
#
# If you're looking for the actual logic of the cleanup process,
# it's all in the _cleanup_actions() method.
success = True
actions = self._cleanup_actions()
try:
act = lambda: True
while True:
try:
if callable(act):
res = act()
elif len(act) == 1:
res = act[0]()
elif len(act) == 2:
res = act[0](*act[1])
else:
res = act[0](*act[1], **act[2])
if res is not None:
success &= res
except Exception:
act = actions.throw(*sys.exc_info())
else:
act = actions.next()
except StopIteration:
return success
finally:
self.unlock()
def needs_cleanup(self):
"""Check whether a call to cleanup() is necessary.
This method checks whether a call to the cleanup() method will have
any work to do, without obtaining a lock on the esky's appdir. You
might like to use this to avoid locking the appdir (which may require
elevating to root) when there's nothing to do.
"""
for act in self._cleanup_actions():
return True
return False
def _cleanup_actions(self):
"""Iterator giving (func,args,kwds) tuples of cleanup actions.
This encapsulates the logic of the "cleanup" method without actually
performing any of the actions, making it easy to check whether cleanup
is required without duplicating the logic.
"""
appdir = self.appdir
vsdir = self._get_versions_dir()
best_version = get_best_version(vsdir)
new_version = get_best_version(vsdir, include_partial_installs=True)
# If there's a partial install we must complete it, since it
# could have left exes in the bootstrap env and we don't want
# to accidentally delete their dependencies.
if best_version != new_version:
(_, v, _) = split_app_version(new_version)
yield (self.install_version, (v,))
best_version = new_version
# TODO: remove compatability hooks for ESKY_APPDATA_DIR=""
if vsdir == appdir and ESKY_APPDATA_DIR:
appdatadir = os.path.join(appdir, ESKY_APPDATA_DIR)
if os.path.isdir(appdatadir) and os.listdir(appdatadir):
new_version = get_best_version(appdatadir,
include_partial_installs=True)
if best_version != new_version:
(_, v, _) = split_app_version(new_version)
yield (self.install_version, (v,))
best_version = new_version
# Now we can safely remove all the old versions.
# We except the currently-executing version, and silently
# ignore any locked versions.
manifest = self._version_manifest(best_version)
manifest.add("updates")
manifest.add("locked")
manifest.add(best_version)
if self.active_version:
if self.active_version != split_app_version(best_version)[1]:
yield lambda: False
manifest.add(self.active_version)
# TODO: remove compatability hooks for ESKY_APPDATA_DIR=""
for tdir in (appdir, vsdir):
for nm in os.listdir(tdir):
if nm not in manifest:
fullnm = os.path.join(tdir, nm)
if ".old." in nm or nm.endswith(".old"):
# It's a temporary backup file; remove it.
yield (self._try_remove, (tdir, nm, manifest,))
elif not os.path.isdir(fullnm):
# It's an unaccounted-for file in the bootstrap env.
# Leave it alone.
pass
elif is_version_dir(fullnm):
# It's an installed-but-obsolete version. Properly
# uninstall it so it will clean up the bootstrap env.
(_, v, _) = split_app_version(nm)
try:
yield (self.uninstall_version, (v,))
except VersionLockedError:
yield lambda: False
else:
yield (self._try_remove, (tdir, nm, manifest,))
elif is_uninstalled_version_dir(fullnm):
# It's a partially-removed version; finish removing it.
yield (self._try_remove, (tdir, nm, manifest,))
else:
for (_, _, filenms) in os.walk(fullnm):
if filenms:
# It contains unaccounted-for files in the
# bootstrap env. Can't prove it's safe to
# remove, so leave it alone.
break
else:
# It's an empty directory structure, remove it.
yield (self._try_remove, (tdir, nm, manifest,))
# If there are pending overwrites, try to do them.
ovrdir = os.path.join(vsdir, best_version, ESKY_CONTROL_DIR,
"overwrite")
if os.path.exists(ovrdir):
try:
for (dirnm, _, filenms) in os.walk(ovrdir, topdown=False):
for nm in filenms:
ovrsrc = os.path.join(dirnm, nm)
ovrdst = os.path.join(appdir, ovrsrc[len(ovrdir)+1:])
yield (self._overwrite, (ovrsrc, ovrdst,))
yield (os.unlink, (ovrsrc,))
yield (os.rmdir, (dirnm,))
except EnvironmentError:
yield lambda: False
# Get the VersionFinder to clean up after itself
if self.version_finder is not None:
if self.version_finder.needs_cleanup(self):
yield (self.version_finder.cleanup, (self,))
def _overwrite(self, src, dst):
"""Directly overwrite file 'dst' with the contents of file 'src'."""
with open(src, "rb") as fIn:
with open(dst, "ab") as fOut:
fOut.seek(0)
chunk = fIn.read(512*16)
while chunk:
fOut.write(chunk)
chunk = fIn.read(512*16)
@allow_from_sudo()
def cleanup_at_exit(self):
"""Arrange for cleanup to occur after application exit.
This operates by using the atexit module to spawn a new instance of
this app, with appropriate flags that cause it to launch directly into
the cleanup process.
Recall that sys.executable points to a specific version dir, so this
new process will not hold any filesystem locks in the main app dir.
"""
if self.sudo_proxy is not None:
self.keep_sudo_proxy_alive = True
return self.sudo_proxy.cleanup_at_exit()
if not getattr(sys, "frozen", False):
exe = [sys.executable, "-c",
"import esky; esky.run_startup_hooks()",
"--esky-spawn-cleanup"]
else:
exe = sys.executable
# Try to re-launch the best available version, so that
# the currently in-use version can be cleaned up.
if self.active_version is not None:
vsdir = self._get_versions_dir()
bestver = get_best_version(vsdir,
include_partial_installs=True)
if bestver is not None:
(_, version, _) = split_app_version(bestver)
if self.active_version != version:
if self.active_version in exe:
exe = exe.replace(self.active_version, version)
if not os.path.isfile(exe):
exe = sys.executable
if os.path.basename(exe).lower() in ("python", "pythonw"):
exe = [exe, "-c", "import esky; esky.run_startup_hooks()",
"--esky-spawn-cleanup"]
else:
if not _startup_hooks_were_run:
raise OSError(None,
"unable to cleanup: startup hooks not run")
exe = [exe, "--esky-spawn-cleanup"]
appdata = pickle.dumps(self, pickle.HIGHEST_PROTOCOL)
exe = exe + [base64.b64encode(appdata).decode("ascii")]
@atexit.register
def spawn_cleanup():
rnul = open(os.devnull, "r")
wnul = open(os.devnull, "w")
if sys.platform == "win32":
if sys.hexversion >= 0x02060000:
kwds = dict(close_fds=True)
else:
kwds = {}
else:
kwds = dict(stdin=rnul, stdout=wnul, stderr=wnul,
close_fds=True)
subprocess.Popen(exe, **kwds)
def _try_remove(self, tdir, path, manifest=[]):
"""Try to remove the file/directory at given path in the target dir.
This method attempts to remove the file or directory at the given path,
but will fail silently under a number of conditions:
* if a file is locked or permission is denied
* if a directory cannot be emptied of all contents
* if the path appears on sys.path
* if the path appears in the given manifest
"""
fullpath = os.path.join(tdir, path)
if fullpath in sys.path:
return False
if path in manifest:
return False
try:
if os.path.isdir(fullpath):
# Remove paths starting with "esky" last, since we use
# these to maintain state information.
esky_paths = []
success = True
for nm in os.listdir(fullpath):
if nm == "esky" or nm.startswith("esky-"):
esky_paths.append(nm)
else:
subdir = os.path.join(path, nm)
success &= self._try_remove(tdir, subdir, manifest)
if not success:
return False
for nm in sorted(esky_paths):
self._try_remove(tdir, os.path.join(path, nm), manifest)
os.rmdir(fullpath)
else:
os.unlink(fullpath)
except EnvironmentError, e:
if e.errno not in self._errors_to_ignore:
raise
return False
else:
return True
_errors_to_ignore = (errno.ENOENT, errno.EPERM, errno.EACCES,
errno.ENOTDIR, errno.EISDIR, errno.EINVAL,
errno.ENOTEMPTY,)
def auto_update(self, callback=None):
"""Automatically install the latest version of the app.
This method automatically performs the following sequence of actions,
escalating to root privileges if a permission error is encountered:
* find the latest version [self.find_update()]
* fetch the new version [self.fetch_version()]
* install the new version [self.install_version()]
* attempt to uninstall the old version [self.uninstall_version()]
* reinitialize internal state [self.reinitialize()]
* clean up the appdir [self.cleanup()]
This method is mostly here to help you get started. For an app of
any serious complexity, you will probably want to build your own
variant that e.g. operates in a background thread, prompts the user
for confirmation, etc.
"""
if self.version_finder is None:
raise NoVersionFinderError
if callback is None:
callback = lambda *args: True
got_root = False
cleaned = False
try:
callback({"status": "searching"})
version = self.find_update()
if version is not None:
callback({"status": "found", "new_version": version})
# Try to install the new version. If it fails with
# a permission error, escalate to root and try again.
try:
self._do_auto_update(version, callback)
except EnvironmentError:
exc_type, exc_value, exc_traceback = sys.exc_info()
if exc_value.errno != errno.EACCES or self.has_root():
raise
try:
self.get_root()
except Exception, e:
raise exc_type, exc_value, exc_traceback
else:
got_root = True
self._do_auto_update(version, callback)
self.reinitialize()
# Try to clean up the app dir. If it fails with a
# permission error, escalate to root and try again.
try:
callback({"status": "cleaning up"})
cleaned = self.cleanup()
except EnvironmentError:
exc_type, exc_value, exc_traceback = sys.exc_info()
if exc_value.errno != errno.EACCES or self.has_root():
raise
try:
self.get_root()
except Exception, e:
raise exc_type, exc_value, exc_traceback
else:
got_root = True
callback({"status": "cleaning up"})
cleaned = self.cleanup()
except Exception, e:
callback({"status": "error", "exception": e})
raise
else:
callback({"status": "done"})
finally:
# Drop root privileges as soon as possible.
if not cleaned and self.needs_cleanup():
self.cleanup_at_exit()
if got_root:
self.drop_root()
def _do_auto_update(self, version, callback):
"""Actual sequence of operations for auto-update.
This is a separate method so it can easily be retried after gaining
root privileges.
"""
self.fetch_version(version, callback)
callback({"status": "installing", "new_version": version})
self.install_version(version)
try:
self.uninstall_version(self.version)
except VersionLockedError:
pass
def find_update(self):
"""Check for an available update to this app.
This method returns either None, or a string giving the version of
the newest available update.
"""
if self.version_finder is None:
raise NoVersionFinderError
best_version = None
best_version_p = parse_version(self.version)
for version in self.version_finder.find_versions(self):
version_p = parse_version(version)
if version_p > best_version_p:
best_version_p = version_p
best_version = version
return best_version
def fetch_version(self, version, callback=None):
"""Fetch the specified updated version of the app."""
if self.sudo_proxy is not None:
for status in self.sudo_proxy.fetch_version_iter(version):
if callback is not None:
callback(status)
return self.version_finder.has_version(self, version)
if self.version_finder is None:
raise NoVersionFinderError
# Guard against malicious input (might be called with root privs)
vsdir = self._get_versions_dir()
target = join_app_version(self.name, version, self.platform)
target = os.path.join(vsdir, target)
assert os.path.dirname(target) == vsdir
# Get the new version using the VersionFinder
loc = self.version_finder.has_version(self, version)
if not loc:
loc = self.version_finder.fetch_version(self, version, callback)
# Adjust permissions to match the current version
vdir = join_app_version(self.name, self.version, self.platform)
copy_ownership_info(os.path.join(vsdir, vdir), loc)
return loc
@allow_from_sudo(str, iterator=True)
def fetch_version_iter(self, version):
"""Fetch specified version of the app, with iterator control flow."""
if self.sudo_proxy is not None:
for status in self.sudo_proxy.fetch_version_iter(version):
yield status
return
if self.version_finder is None:
raise NoVersionFinderError
# Guard against malicious input (might be called with root privs)
vsdir = self._get_versions_dir()
target = join_app_version(self.name, version, self.platform)
target = os.path.join(vsdir, target)
assert os.path.dirname(target) == vsdir
# Get the new version using the VersionFinder
loc = self.version_finder.has_version(self, version)
if not loc:
for status in self.version_finder.fetch_version_iter(self,
version):
if status["status"] != "ready":
yield status
else:
loc = status["path"]
# Adjust permissions to match the current version
vdir = join_app_version(self.name, self.version, self.platform)
copy_ownership_info(os.path.join(vsdir, vdir), loc)
yield {"status": "ready", "path": loc}
@allow_from_sudo(str)
def install_version(self, version):
"""Install the specified version of the app.
This fetches the specified version if necessary, then makes it
available as a version directory inside the app directory. It
does not modify any other installed versions.
"""
if self.sudo_proxy is not None:
return self.sudo_proxy.install_version(version)
# Extract update then rename into position in main app directory
vsdir = self._get_versions_dir()
target = join_app_version(self.name, version, self.platform)
target = os.path.join(vsdir, target)
# Guard against malicious input (might be called with root privs)
assert os.path.dirname(target) == vsdir
if not os.path.exists(target):
self.fetch_version(version)
source = self.version_finder.has_version(self, version)
# TODO: remove compatability hooks for ESKY_APPDATA_DIR="".
# This is our chance to migrate to the new appdata dir layout,
# by installing into it.
if vsdir == self.appdir and ESKY_APPDATA_DIR:
vsdir = os.path.join(self.appdir, ESKY_APPDATA_DIR)
try:
os.mkdir(vsdir)
except EnvironmentError, e:
if e.errno not in (errno.EEXIST,):
raise
else:
copy_ownership_info(self.appdir, vsdir)
target = os.path.join(vsdir, os.path.basename(target))
self.lock()
try:
if not os.path.exists(target):
really_rename(source, target)
trn = esky.fstransact.FSTransaction(self.appdir)
try:
self._unpack_bootstrap_env(target, trn)
except Exception:
trn.abort()
raise
else:
trn.commit()
finally:
self.unlock()
def _unpack_bootstrap_env(self, target, trn):
"""Unpack the bootstrap env from the given target directory."""
vdir = os.path.basename(target)
# Move new bootrapping environment into main app dir.
# Be sure to move dependencies before executables.
bootstrap = os.path.join(target, ESKY_CONTROL_DIR, "bootstrap")
for nm in self._version_manifest(vdir):
bssrc = os.path.join(bootstrap, nm)
bsdst = os.path.join(self.appdir, nm)
if os.path.exists(bssrc):
# On windows we can't atomically replace files.
# If they differ in a "safe" way we put them aside
# to overwrite at a later time.
if sys.platform == "win32" and os.path.exists(bsdst):
if not files_differ(bssrc, bsdst):
trn.remove(bssrc)
elif esky.winres.is_safe_to_overwrite(bssrc, bsdst):
ovrdir = os.path.join(target, ESKY_CONTROL_DIR)
ovrdir = os.path.join(ovrdir, "overwrite")
if not os.path.exists(ovrdir):
os.mkdir(ovrdir)
trn.move(bssrc, os.path.join(ovrdir, nm))
else:
trn.move(bssrc, bsdst)
else:
trn.move(bssrc, bsdst)
if os.path.isdir(os.path.dirname(bssrc)):
if not os.listdir(os.path.dirname(bssrc)):
trn.remove(os.path.dirname(bssrc))
# Remove the bootstrap dir; the new version is now installed
trn.remove(bootstrap)
@allow_from_sudo(str)
def uninstall_version(self, version):
"""Uninstall the specified version of the app."""
if self.sudo_proxy is not None:
return self.sudo_proxy.uninstall_version(version)
vsdir = self._get_versions_dir()
target_name = join_app_version(self.name, version, self.platform)
target = os.path.join(vsdir, target_name)
# Guard against malicious input (might be called with root privs)
assert os.path.dirname(target) == vsdir
# TODO: remove compatability hooks for ESKY_APPDATA_DIR="".
if ESKY_APPDATA_DIR and not os.path.exists(target):
if vsdir == self.appdir:
target = os.path.join(self.appdir, ESKY_APPDATA_DIR,
target_name)
else:
target = os.path.join(self.appdir, target_name)
lockfile = os.path.join(target, ESKY_CONTROL_DIR, "lockfile.txt")
bsfile = os.path.join(target, ESKY_CONTROL_DIR,
"bootstrap-manifest.txt")
bsfile_old = os.path.join(target, ESKY_CONTROL_DIR,
"bootstrap-manifest-old.txt")
self.lock()
try:
if not os.path.exists(target):
return
# Clean up the bootstrapping environment in a transaction.
# This might fail on windows if the version is locked.
try:
trn = esky.fstransact.FSTransaction(self.appdir)
try:
self._cleanup_bootstrap_env(version, trn)
except Exception:
trn.abort()
raise
else:
trn.commit()
except EnvironmentError:
if is_locked_version_dir(target):
raise VersionLockedError("version in use: %s" % (version,))
raise
# Disable the version by renaming its bootstrap-manifest.txt file.
# To avoid clobbering in-use version, respect locks on this file.
if sys.platform == "win32":
try:
really_rename(bsfile, bsfile_old)
except EnvironmentError:
raise VersionLockedError("version in use: %s" % (version,))
else:
try:
f = open(lockfile, "r")
except EnvironmentError, e:
if e.errno != errno.ENOENT:
raise
else:
try:
fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
except EnvironmentError, e:
if e.errno not in (errno.EACCES, errno.EAGAIN,):
raise
msg = "version in use: %s" % (version,)
raise VersionLockedError(msg)
else:
really_rename(bsfile, bsfile_old)
finally:
f.close()
finally:
self.unlock()
def _cleanup_bootstrap_env(self, version, trn):
"""Cleanup the bootstrap env populated by the given version."""
target_name = join_app_version(self.name, version, self.platform)
# Get set of all files that must stay in the main appdir
to_keep = set()
for vname in os.listdir(self._get_versions_dir()):
if vname == target_name:
continue
details = split_app_version(vname)
if details[0] != self.name:
continue
if parse_version(details[1]) < parse_version(version):
continue
to_keep.update(self._version_manifest(vname))
# Remove files used only by the version being removed
to_rem = self._version_manifest(target_name) - to_keep
for nm in to_rem:
fullnm = os.path.join(self.appdir, nm)
if os.path.exists(fullnm):
trn.remove(fullnm)
if os.path.isdir(os.path.dirname(fullnm)):
if not os.listdir(os.path.dirname(fullnm)):
trn.remove(os.path.dirname(fullnm))
def _version_manifest(self, vdir):
"""Get the bootstrap manifest for the given version directory.
This is the set of files/directories that the given version expects
to be in the main app directory.
"""
vsdir = self._get_versions_dir()
mpath = os.path.join(vsdir, vdir, ESKY_CONTROL_DIR)
mpath = os.path.join(mpath, "bootstrap-manifest.txt")
# TODO: remove compatability hooks for ESKY_APPDATA_DIR="".
if not os.path.exists(mpath):
if vsdir == self.appdir:
mpath = os.path.join(self.appdir, ESKY_APPDATA_DIR, vdir,
ESKY_CONTROL_DIR)
else:
mpath = os.path.join(self.appdir, vdir, ESKY_CONTROL_DIR)
mpath = os.path.join(mpath, "bootstrap-manifest.txt")
manifest = set()
try:
with open(mpath, "rt") as mf:
for ln in mf:
# Guard against malicious input, since we might try
# to manipulate these files with root privs.
nm = os.path.normpath(ln.strip())
assert not os.path.isabs(nm)
assert not nm.startswith("..")
manifest.add(nm)
except IOError:
pass
return manifest
_startup_hooks_were_run = False
def run_startup_hooks():
global _startup_hooks_were_run
_startup_hooks_were_run = True
# Lock the version dir while we're executing, so other instances don't
# delete files out from under us.
if getattr(sys, "frozen", False):
appdir = appdir_from_executable(sys.executable)
# TODO: remove ESKY_APPDATA_DIR="" compatability hooks
if ESKY_APPDATA_DIR:
vdir = os.sep.join(sys.executable[len(appdir):].split(os.sep)[1:3])
vdir = os.path.join(appdir, vdir)
if not is_version_dir(vdir):
vdir = sys.executable[len(appdir):].split(os.sep)[1]
vdir = os.path.join(appdir, vdir)
else:
vdir = sys.executable[len(appdir):].split(os.sep)[1]
vdir = os.path.join(appdir, vdir)
if not is_version_dir(vdir):
vdir = os.sep.join(sys.executable[len(appdir):].split(os.sep)[1:3])
vdir = os.path.join(appdir, vdir)
lock_version_dir(vdir)
# Run the "spawn-cleanup" hook if given.
if len(sys.argv) > 1 and sys.argv[1] == "--esky-spawn-cleanup":
app = pickle.loads(base64.b64decode(sys.argv[2].encode("ascii")))
time.sleep(1)
app.cleanup()
sys.exit(0)
# Let esky.slaveproc run its hooks.
import esky.slaveproc
esky.slaveproc.run_startup_hooks()
# Let esky.sudo run its hooks.
import esky.sudo
esky.sudo.run_startup_hooks()
| |
"""
tmdb.py --- Jen Plugin for accessing tmdb data
Copyright (C) 2017, Midraal
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Usage Examples:
<dir>
<title>TMDB Popular</title>
<tmdb>movies/popular</tmdb>
</dir>
<dir>
<title>TMDB Now Playing</title>
<tmdb>movies/now_playing</tmdb>
</dir>
<dir>
<title>TMDB Top Rated</title>
<tmdb>movies/top_rated</tmdb>
</dir>
<dir>
<title>TMDB Action Movies</title>
<tmdb>genre/movies/28</tmdb>
</dir>
<dir>
<title>TMDB Star Wars Collection</title>
<tmdb>collection/10</tmdb>
</dir>
<dir>
<title>TMDB Popular</title>
<tmdb>tv/popular</tmdb>
</dir>
<dir>
<title>TMDB Top Rated</title>
<tmdb>tv/top_rated</tmdb>
</dir>
<dir>
<title>TMDB Animation Shows</title>
<tmdb>genre/shows/16</tmdb>
</dir>
<dir>
<title>TMDB List: Animal Kingdom</title>
<tmdb>list/13488</tmdb>
</dir>
<dir>
<title>Bryan Cranston Shows TMDB</title>
<tmdb>person/shows/17419</tmdb>
</dir>
<dir>
<title>Bryan Cranston Movies TMDB</title>
<tmdb>person/movies/17419</tmdb>
</dir>
"""
import pickle
import time
import koding
import resources.lib.external.tmdbsimple as tmdbsimple
import xbmcaddon
from koding import route
from resources.lib.plugin import Plugin
from resources.lib.util.context import get_context_items
from resources.lib.util.xml import JenItem, JenList, display_list
from unidecode import unidecode
CACHE_TIME = 3600 # change to wanted cache time in seconds
addon_fanart = xbmcaddon.Addon().getAddonInfo('fanart')
addon_icon = xbmcaddon.Addon().getAddonInfo('icon')
class TMDB(Plugin):
name = "tmdb"
def process_item(self, item_xml):
if "<tmdb>" in item_xml:
item = JenItem(item_xml)
result_item = {
'label': item["title"],
'icon': item.get("thumbnail", addon_icon),
'fanart': item.get("fanart", addon_fanart),
'mode': "tmdb",
'url': item.get("tmdb", ""),
'folder': True,
'imdb': "0",
'content': "files",
'season': "0",
'episode': "0",
'info': {},
'year': "0",
'context': get_context_items(item),
"summary": item.get("summary", None)
}
result_item["properties"] = {'fanart_image': result_item["fanart"]}
result_item['fanart_small'] = result_item["fanart"]
return result_item
elif "tmdb_tv_show" in item_xml:
item = JenItem(item_xml)
url = item.get("link", ")").replace("tmdb_tv_show(", "")[:-1]
result_item = {
'label': item["title"],
'icon': item["thumbnail"],
'fanart': item.get("fanart", addon_fanart),
'mode': "tmdb_tv_show",
'url': "tmdb_id" + url,
'folder': True,
'content': "tvshows",
'season': "0",
'episode': "0",
'info': {},
'year': item.get("year", ""),
'context': get_context_items(item),
"summary": item.get("summary", None)
}
result_item["properties"] = {'fanart_image': result_item["fanart"]}
result_item['fanart_small'] = result_item["fanart"]
return result_item
elif "tmdb_season(" in item_xml:
item = JenItem(item_xml)
url = item.get("link", ")").replace("tmdb_season(", "")[:-1]
season = url.split(",")[1]
result_item = {
'label': item["title"],
'icon': item["thumbnail"],
'fanart': item.get("fanart", addon_fanart),
'mode': "tmdb_season",
'url': "tmdb_id" + url,
'folder': True,
'content': "seasons",
'season': str(season),
'episode': "0",
'info': {},
'year': item.get("year", ""),
'context': {},
"summary": item.get("summary", None)
}
result_item["properties"] = {'fanart_image': result_item["fanart"]}
result_item['fanart_small'] = result_item["fanart"]
return result_item
@route(mode='tmdb', args=["url"])
def tmdb(url):
xml = ""
page = 1
response = fetch_from_db(url)
if url.startswith("movies"):
if url.startswith("movies/popular"):
last = url.split("/")[-1]
if last.isdigit():
page = int(last)
if not response:
response = tmdbsimple.Movies().popular(page=page)
if url.startswith("movies/now_playing"):
last = url.split("/")[-1]
if last.isdigit():
page = int(last)
if not response:
response = tmdbsimple.Movies().now_playing(page=page)
if url.startswith("movies/top_rated"):
last = url.split("/")[-1]
if last.isdigit():
page = int(last)
if not response:
response = tmdbsimple.Movies().top_rated(page=page)
for item in response["results"]:
xml += get_movie_xml(item)
elif url.startswith("tv"):
if url.startswith("tv/popular"):
last = url.split("/")[-1]
if last.isdigit():
page = int(last)
if not response:
response = tmdbsimple.TV().popular(page=page)
elif url.startswith("tv/top_rated"):
last = url.split("/")[-1]
if last.isdigit():
page = int(last)
if not response:
response = tmdbsimple.TV().top_rated(page=page)
elif url.startswith("tv/today"):
last = url.split("/")[-1]
if last.isdigit():
page = int(last)
if not response:
response = tmdbsimple.TV().airing_today(page=page)
for item in response["results"]:
xml += get_show_xml(item)
elif url.startswith("list"):
list_id = url.split("/")[-1]
if not response:
response = tmdbsimple.Lists(list_id).info()
for item in response["items"]:
if "title" in item:
xml += get_movie_xml(item)
elif "name" in item:
xml += get_show_xml(item)
elif url.startswith("person"):
split_url = url.split("/")
person_id = split_url[-1]
media = split_url[-2]
if media == "movies":
if not response:
response = tmdbsimple.People(person_id).movie_credits()
elif media == "shows":
if not response:
response = tmdbsimple.People(person_id).tv_credits()
for job in response:
if job == "id":
continue
for item in response[job]:
if media == "movies":
xml += get_movie_xml(item)
elif media == "shows":
xml += get_show_xml(item)
elif url.startswith("genre"):
split_url = url.split("/")
if len(split_url) == 3:
url += "/1"
split_url.append(1)
page = int(split_url[-1])
genre_id = split_url[-2]
media = split_url[-3]
if media == "movies":
if not response:
response = tmdbsimple.Discover().movie(with_genres=genre_id,
page=page)
elif media == "shows":
if not response:
response = tmdbsimple.Discover().tv(with_genres=genre_id,
page=page)
for item in response["results"]:
if media == "movies":
xml += get_movie_xml(item)
elif media == "shows":
xml += get_show_xml(item)
elif url.startswith("collection"):
split_url = url.split("/")
collection_id = split_url[-1]
if not response:
response = tmdbsimple.Collections(collection_id).info()
for item in response["parts"]:
xml += get_movie_xml(item)
save_to_db(response, url)
if page < response.get("total_pages", 0):
base = url.split("/")
if base[-1].isdigit():
base = base[:-1]
next_url = "/".join(base) + "/" + str(page + 1)
xml += "<dir>"\
"<title>Next Page >></title>"\
"<tmdb>%s</tmdb>"\
"<summary>Go To Page %s</summary>"\
"</dir>" % (next_url, page + 1)
jenlist = JenList(xml)
display_list(jenlist.get_list(), jenlist.get_content_type())
def get_movie_xml(item):
title = remove_non_ascii(item["title"])
year = item["release_date"].split("-")[0]
summary = item.get("overview", "")
if summary:
summary = remove_non_ascii(summary)
if item["poster_path"]:
thumbnail = "https://image.tmdb.org/t/p/w1280/" + item["poster_path"]
else:
thumbnail = ""
if item.get("backdrop_path", ""):
fanart = "https://image.tmdb.org/t/p/w1280/" + item["backdrop_path"]
else:
fanart = ""
xml = "<item>" \
"<title>%s</title>" \
"<meta>" \
"<content>movie</content>" \
"<title>%s</title>" \
"<year>%s</year>" \
"</meta>" \
"<link>" \
"<sublink>search</sublink>" \
"<sublink>searchsd</sublink>" \
"</link>" \
"<thumbnail>%s</thumbnail>" \
"<fanart>%s</fanart>" \
"<summary>%s</summary>"\
"</item>" % (title, title, year, thumbnail, fanart, summary)
return xml
def get_show_xml(item):
title = remove_non_ascii(item["name"])
year = item["first_air_date"].split("-")[0]
tmdb_id = item["id"]
summary = remove_non_ascii(item["overview"])
if item["poster_path"]:
thumbnail = "https://image.tmdb.org/t/p/w1280/" + item["poster_path"]
else:
thumbnail = ""
if item.get("backdrop_path", ""):
fanart = "https://image.tmdb.org/t/p/w1280/" + item["backdrop_path"]
else:
fanart = ""
xml = "<dir>"\
"<title>%s</title>"\
"<meta>"\
"<content>tvshow</content>"\
"<tvshowtitle>%s</tvshowtitle>"\
"<year>%s</year>"\
"</meta>"\
"<link>tmdb_tv_show(%s, %s, %s)</link>"\
"<thumbnail>%s</thumbnail>" \
"<fanart>%s</fanart>"\
"<summary>%s</summary>"\
"</dir>" % (title, title, year, tmdb_id, year, title,
thumbnail, fanart, summary)
return xml
def get_season_xml(item, tmdb_id, year, tvtitle):
season = item["season_number"]
if item["poster_path"]:
thumbnail = "https://image.tmdb.org/t/p/w1280/" + item["poster_path"]
else:
thumbnail = ""
if item.get("backdrop_path", ""):
fanart = "https://image.tmdb.org/t/p/w1280/" + item["backdrop_path"]
else:
fanart = ""
xml = "<dir>"\
"<title>Season %s</title>"\
"<meta>"\
"<content>season</content>"\
"<season>%s</season>"\
"</meta>"\
"<thumbnail>%s</thumbnail>"\
"<fanart>%s</fanart>"\
"<link>tmdb_season(%s,%s, %s, %s)</link>"\
"</dir>" % (season, season, thumbnail, fanart, tmdb_id, season, year,
tvtitle)
return xml
def get_episode_xml(item, tmdb_id, year, tvtitle):
title = remove_non_ascii(item["name"])
season = item["season_number"]
episode = item["episode_number"]
premiered = item["air_date"]
if item["still_path"]:
thumbnail = "https://image.tmdb.org/t/p/w1280/" + item["still_path"]
else:
thumbnail = ""
if item.get("backdrop_path", ""):
fanart = "https://image.tmdb.org/t/p/w1280/" + item["backdrop_path"]
else:
fanart = ""
summary = remove_non_ascii(item["overview"])
xml = "<item>"\
"<title>%s</title>"\
"<meta>"\
"<content>episode</content>"\
"<tvshowtitle>%s</tvshowtitle>"\
"<year>%s</year>"\
"<title>%s</title>"\
"<premiered>%s</premiered>"\
"<season>%s</season>"\
"<episode>%s</episode>"\
"</meta>"\
"<link>"\
"<sublink>search</sublink>"\
"<sublink>searchsd</sublink>"\
"</link>"\
"<thumbnail>%s</thumbnail>"\
"<fanart>%s</fanart>"\
"<summary>%s</summary>"\
"</item>" % (title, tvtitle, year, title,
premiered, season, episode, thumbnail, fanart, summary)
return xml
@route(mode='tmdb_tv_show', args=["url"])
def tmdb_tv_show(url):
response = fetch_from_db(url)
tmdb_id, year, tvtitle = url.replace("tmdb_id", "").split(",")
if not response:
response = tmdbsimple.TV(tmdb_id).info()
save_to_db(response, url)
seasons = response["seasons"]
xml = ""
for season in seasons:
xml += get_season_xml(season, tmdb_id, year, tvtitle)
jenlist = JenList(xml)
display_list(jenlist.get_list(), jenlist.get_content_type())
@route(mode='tmdb_season', args=["url"])
def tmdb_season(url):
response = fetch_from_db(url)
tmdb_id, season, year, tvtitle = url.replace("tmdb_id", "").split(",")
if not response:
response = tmdbsimple.TV_Seasons(tmdb_id, season).info()
save_to_db(response, url)
episodes = response["episodes"]
xml = ""
for episode in episodes:
xml += get_episode_xml(episode, tmdb_id, year, tvtitle)
jenlist = JenList(xml)
display_list(jenlist.get_list(), jenlist.get_content_type())
def remove_non_ascii(text):
return unidecode(text)
def save_to_db(item, url):
koding.Remove_From_Table(
"tmdb_plugin",
{
"url": url
})
koding.Add_To_Table("tmdb_plugin",
{
"url": url,
"item": pickle.dumps(item),
"created": time.time()
})
def fetch_from_db(url):
tmdb_plugin_spec = {
"columns": {
"url": "TEXT",
"item": "TEXT",
"created": "TEXT"
},
"constraints": {
"unique": "url"
}
}
koding.Create_Table("tmdb_plugin", tmdb_plugin_spec)
match = koding.Get_From_Table(
"tmdb_plugin", {"url": url})
if match:
match = match[0]
if not match["item"]:
return None
created_time = match["created"]
if created_time and float(created_time) <= time.time() + CACHE_TIME:
match_item = match["item"].replace("'", "\"")
try:
match_item = match_item.encode('ascii', 'ignore')
except:
match_item = match_item.decode('utf-8').encode('ascii', 'ignore')
return pickle.loads(match_item)
else:
return []
else:
return []
| |
"""
Imports the SpatialRefSys and GeometryColumns models dependent on the
spatial database backend.
"""
import re
from django.conf import settings
# Checking for the presence of GDAL (needed for the SpatialReference object)
from django.contrib.gis.gdal import HAS_GDAL, PYTHON23
if HAS_GDAL:
from django.contrib.gis.gdal import SpatialReference
class SpatialRefSysMixin(object):
"""
The SpatialRefSysMixin is a class used by the database-dependent
SpatialRefSys objects to reduce redundnant code.
"""
# For pulling out the spheroid from the spatial reference string. This
# regular expression is used only if the user does not have GDAL installed.
# TODO: Flattening not used in all ellipsoids, could also be a minor axis,
# or 'b' parameter.
spheroid_regex = re.compile(r'.+SPHEROID\[\"(?P<name>.+)\",(?P<major>\d+(\.\d+)?),(?P<flattening>\d{3}\.\d+),')
# For pulling out the units on platforms w/o GDAL installed.
# TODO: Figure out how to pull out angular units of projected coordinate system and
# fix for LOCAL_CS types. GDAL should be highly recommended for performing
# distance queries.
units_regex = re.compile(r'.+UNIT ?\["(?P<unit_name>[\w \'\(\)]+)", ?(?P<unit>[\d\.]+)(,AUTHORITY\["(?P<unit_auth_name>[\w \'\(\)]+)","(?P<unit_auth_val>\d+)"\])?\]([\w ]+)?(,AUTHORITY\["(?P<auth_name>[\w \'\(\)]+)","(?P<auth_val>\d+)"\])?\]$')
def srs(self):
"""
Returns a GDAL SpatialReference object, if GDAL is installed.
"""
if HAS_GDAL:
# TODO: Is caching really necessary here? Is complexity worth it?
if hasattr(self, '_srs'):
# Returning a clone of the cached SpatialReference object.
return self._srs.clone()
else:
# Attempting to cache a SpatialReference object.
# Trying to get from WKT first.
try:
self._srs = SpatialReference(self.wkt)
return self.srs
except Exception, msg:
pass
try:
self._srs = SpatialReference(self.proj4text)
return self.srs
except Exception, msg:
pass
raise Exception('Could not get OSR SpatialReference from WKT: %s\nError:\n%s' % (self.wkt, msg))
else:
raise Exception('GDAL is not installed.')
srs = property(srs)
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening).
"""
if HAS_GDAL:
return self.srs.ellipsoid
else:
m = self.spheroid_regex.match(self.wkt)
if m: return (float(m.group('major')), float(m.group('flattening')))
else: return None
ellipsoid = property(ellipsoid)
def name(self):
"Returns the projection name."
return self.srs.name
name = property(name)
def spheroid(self):
"Returns the spheroid name for this spatial reference."
return self.srs['spheroid']
spheroid = property(spheroid)
def datum(self):
"Returns the datum for this spatial reference."
return self.srs['datum']
datum = property(datum)
def projected(self):
"Is this Spatial Reference projected?"
if HAS_GDAL:
return self.srs.projected
else:
return self.wkt.startswith('PROJCS')
projected = property(projected)
def local(self):
"Is this Spatial Reference local?"
if HAS_GDAL:
return self.srs.local
else:
return self.wkt.startswith('LOCAL_CS')
local = property(local)
def geographic(self):
"Is this Spatial Reference geographic?"
if HAS_GDAL:
return self.srs.geographic
else:
return self.wkt.startswith('GEOGCS')
geographic = property(geographic)
def linear_name(self):
"Returns the linear units name."
if HAS_GDAL:
return self.srs.linear_name
elif self.geographic:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit_name')
linear_name = property(linear_name)
def linear_units(self):
"Returns the linear units."
if HAS_GDAL:
return self.srs.linear_units
elif self.geographic:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit')
linear_units = property(linear_units)
def angular_name(self):
"Returns the name of the angular units."
if HAS_GDAL:
return self.srs.angular_name
elif self.projected:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit_name')
angular_name = property(angular_name)
def angular_units(self):
"Returns the angular units."
if HAS_GDAL:
return self.srs.angular_units
elif self.projected:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit')
angular_units = property(angular_units)
def units(self):
"Returns a tuple of the units and the name."
if self.projected or self.local:
return (self.linear_units, self.linear_name)
elif self.geographic:
return (self.angular_units, self.angular_name)
else:
return (None, None)
units = property(units)
def get_units(cls, wkt):
"""
Class method used by GeometryField on initialization to
retrive the units on the given WKT, without having to use
any of the database fields.
"""
if HAS_GDAL:
return SpatialReference(wkt).units
else:
m = cls.units_regex.match(wkt)
return m.group('unit'), m.group('unit_name')
get_units = classmethod(get_units)
def get_spheroid(cls, wkt, string=True):
"""
Class method used by GeometryField on initialization to
retrieve the `SPHEROID[..]` parameters from the given WKT.
"""
if HAS_GDAL:
srs = SpatialReference(wkt)
sphere_params = srs.ellipsoid
sphere_name = srs['spheroid']
else:
m = cls.spheroid_regex.match(wkt)
if m:
sphere_params = (float(m.group('major')), float(m.group('flattening')))
sphere_name = m.group('name')
else:
return None
if not string:
return sphere_name, sphere_params
else:
# `string` parameter used to place in format acceptable by PostGIS
if len(sphere_params) == 3:
radius, flattening = sphere_params[0], sphere_params[2]
else:
radius, flattening = sphere_params
return 'SPHEROID["%s",%s,%s]' % (sphere_name, radius, flattening)
get_spheroid = classmethod(get_spheroid)
def __unicode__(self):
"""
Returns the string representation. If GDAL is installed,
it will be 'pretty' OGC WKT.
"""
try:
return unicode(self.srs)
except:
return unicode(self.wkt)
# Django test suite on 2.3 platforms will choke on code inside this
# conditional.
if not PYTHON23:
try:
# try/except'ing the importation of SpatialBackend. Have to fail
# silently because this module may be inadvertently invoked by
# non-GeoDjango users (e.g., when the Django test suite executes
# the models.py of all contrib apps).
from django.contrib.gis.db.backend import SpatialBackend
if SpatialBackend.mysql: raise Exception
# Exposing the SpatialRefSys and GeometryColumns models.
class SpatialRefSys(SpatialBackend.SpatialRefSys, SpatialRefSysMixin):
pass
GeometryColumns = SpatialBackend.GeometryColumns
except:
pass
| |
# (c) Copyright 2014 Brocade Communications Systems Inc.
# All Rights Reserved.
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Script to push the zone configuration to brocade SAN switches.
"""
import random
import re
from eventlet import greenthread
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _, _LE
from cinder import ssh_utils
from cinder import utils
import cinder.zonemanager.drivers.brocade.fc_zone_constants as ZoneConstant
LOG = logging.getLogger(__name__)
class BrcdFCZoneClientCLI(object):
switch_ip = None
switch_port = '22'
switch_user = 'admin'
switch_pwd = 'none'
patrn = re.compile('[;\s]+')
def __init__(self, ipaddress, username, password, port):
"""initializing the client."""
self.switch_ip = ipaddress
self.switch_port = port
self.switch_user = username
self.switch_pwd = password
self.sshpool = None
def get_active_zone_set(self):
"""Return the active zone configuration.
Return active zoneset from fabric. When none of the configurations
are active then it will return empty map.
:returns: Map -- active zone set map in the following format
{
'zones':
{'openstack50060b0000c26604201900051ee8e329':
['50060b0000c26604', '201900051ee8e329']
},
'active_zone_config': 'OpenStack_Cfg'
}
"""
zone_set = {}
zone = {}
zone_member = None
zone_name = None
switch_data = None
zone_set_name = None
try:
switch_data = self._get_switch_info(
[ZoneConstant.GET_ACTIVE_ZONE_CFG])
except exception.BrocadeZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed getting active zone set "
"from fabric %s"), self.switch_ip)
try:
for line in switch_data:
line_split = re.split('\\t', line)
if len(line_split) > 2:
line_split = [x.replace(
'\n', '') for x in line_split]
line_split = [x.replace(
' ',
'') for x in line_split]
if ZoneConstant.CFG_ZONESET in line_split:
zone_set_name = line_split[1]
continue
if line_split[1]:
zone_name = line_split[1]
zone[zone_name] = list()
if line_split[2]:
zone_member = line_split[2]
zone_member_list = zone.get(zone_name)
zone_member_list.append(zone_member)
zone_set[ZoneConstant.CFG_ZONES] = zone
zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG] = zone_set_name
except Exception:
# Incase of parsing error here, it should be malformed cli output.
msg = _("Malformed zone configuration: (switch=%(switch)s "
"zone_config=%(zone_config)s)."
) % {'switch': self.switch_ip,
'zone_config': switch_data}
LOG.exception(msg)
raise exception.FCZoneDriverException(reason=msg)
switch_data = None
return zone_set
def add_zones(self, zones, activate, active_zone_set=None):
"""Add zone configuration.
This method will add the zone configuration passed by user.
input params:
zones - zone names mapped to members.
zone members are colon separated but case-insensitive
{ zonename1:[zonememeber1,zonemember2,...],
zonename2:[zonemember1, zonemember2,...]...}
e.g: {'openstack50060b0000c26604201900051ee8e329':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']
}
activate - True/False
active_zone_set - active zone set dict retrieved from
get_active_zone_set method
"""
LOG.debug("Add Zones - Zones passed: %s", zones)
cfg_name = None
iterator_count = 0
zone_with_sep = ''
if not active_zone_set:
active_zone_set = self.get_active_zone_set()
LOG.debug("Active zone set: %s", active_zone_set)
zone_list = active_zone_set[ZoneConstant.CFG_ZONES]
LOG.debug("zone list: %s", zone_list)
for zone in zones.keys():
# if zone exists, its an update. Delete & insert
# TODO(skolathur): This can be optimized to an update call later
if (zone in zone_list):
try:
self.delete_zones(zone, activate, active_zone_set)
except exception.BrocadeZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Deleting zone failed %s"), zone)
LOG.debug("Deleted Zone before insert : %s", zone)
zone_members_with_sep = ';'.join(str(member) for
member in zones[zone])
LOG.debug("Forming command for add zone")
cmd = 'zonecreate "%(zone)s", "%(zone_members_with_sep)s"' % {
'zone': zone,
'zone_members_with_sep': zone_members_with_sep}
LOG.debug("Adding zone, cmd to run %s", cmd)
self.apply_zone_change(cmd.split())
LOG.debug("Created zones on the switch")
if(iterator_count > 0):
zone_with_sep += ';'
iterator_count += 1
zone_with_sep += zone
try:
cfg_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG]
cmd = None
if not cfg_name:
cfg_name = ZoneConstant.OPENSTACK_CFG_NAME
cmd = 'cfgcreate "%(zoneset)s", "%(zones)s"' \
% {'zoneset': cfg_name, 'zones': zone_with_sep}
else:
cmd = 'cfgadd "%(zoneset)s", "%(zones)s"' \
% {'zoneset': cfg_name, 'zones': zone_with_sep}
LOG.debug("New zone %s", cmd)
self.apply_zone_change(cmd.split())
if activate:
self.activate_zoneset(cfg_name)
else:
self._cfg_save()
except Exception as e:
self._cfg_trans_abort()
msg = _("Creating and activating zone set failed: "
"(Zone set=%(cfg_name)s error=%(err)s)."
) % {'cfg_name': cfg_name, 'err': six.text_type(e)}
LOG.error(msg)
raise exception.BrocadeZoningCliException(reason=msg)
def activate_zoneset(self, cfgname):
"""Method to Activate the zone config. Param cfgname - ZonesetName."""
cmd_list = [ZoneConstant.ACTIVATE_ZONESET, cfgname]
return self._ssh_execute(cmd_list, True, 1)
def deactivate_zoneset(self):
"""Method to deActivate the zone config."""
return self._ssh_execute([ZoneConstant.DEACTIVATE_ZONESET], True, 1)
def delete_zones(self, zone_names, activate, active_zone_set=None):
"""Delete zones from fabric.
Method to delete the active zone config zones
params zone_names: zoneNames separated by semicolon
params activate: True/False
params active_zone_set: the active zone set dict retrieved
from get_active_zone_set method
"""
active_zoneset_name = None
zone_list = []
if not active_zone_set:
active_zone_set = self.get_active_zone_set()
active_zoneset_name = active_zone_set[
ZoneConstant.ACTIVE_ZONE_CONFIG]
zone_list = active_zone_set[ZoneConstant.CFG_ZONES]
zones = self.patrn.split(''.join(zone_names))
cmd = None
try:
if len(zones) == len(zone_list):
self.deactivate_zoneset()
cmd = 'cfgdelete "%(active_zoneset_name)s"' \
% {'active_zoneset_name': active_zoneset_name}
# Active zoneset is being deleted, hence reset activate flag
activate = False
else:
cmd = 'cfgremove "%(active_zoneset_name)s", "%(zone_names)s"' \
% {'active_zoneset_name': active_zoneset_name,
'zone_names': zone_names
}
LOG.debug("Delete zones: Config cmd to run: %s", cmd)
self.apply_zone_change(cmd.split())
for zone in zones:
self._zone_delete(zone)
if activate:
self.activate_zoneset(active_zoneset_name)
else:
self._cfg_save()
except Exception as e:
msg = _("Deleting zones failed: (command=%(cmd)s error=%(err)s)."
) % {'cmd': cmd, 'err': six.text_type(e)}
LOG.error(msg)
self._cfg_trans_abort()
raise exception.BrocadeZoningCliException(reason=msg)
def get_nameserver_info(self):
"""Get name server data from fabric.
This method will return the connected node port wwn list(local
and remote) for the given switch fabric
"""
cli_output = None
return_list = []
try:
cmd = '%(nsshow)s;%(nscamshow)s' % {
'nsshow': ZoneConstant.NS_SHOW,
'nscamshow': ZoneConstant.NS_CAM_SHOW}
cli_output = self._get_switch_info([cmd])
except exception.BrocadeZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed collecting nsshow "
"info for fabric %s"), self.switch_ip)
if (cli_output):
return_list = self._parse_ns_output(cli_output)
cli_output = None
return return_list
def _cfg_save(self):
self._ssh_execute([ZoneConstant.CFG_SAVE], True, 1)
def _zone_delete(self, zone_name):
cmd = 'zonedelete "%(zone_name)s"' % {'zone_name': zone_name}
self.apply_zone_change(cmd.split())
def _cfg_trans_abort(self):
is_abortable = self._is_trans_abortable()
if(is_abortable):
self.apply_zone_change([ZoneConstant.CFG_ZONE_TRANS_ABORT])
def _is_trans_abortable(self):
is_abortable = False
stdout, stderr = None, None
stdout, stderr = self._run_ssh(
[ZoneConstant.CFG_SHOW_TRANS], True, 1)
output = stdout.splitlines()
is_abortable = False
for line in output:
if(ZoneConstant.TRANS_ABORTABLE in line):
is_abortable = True
break
if stderr:
msg = _("Error while checking transaction status: %s") % stderr
raise exception.BrocadeZoningCliException(reason=msg)
else:
return is_abortable
def apply_zone_change(self, cmd_list):
"""Execute zoning cli with no status update.
Executes CLI commands such as addZone where status return is
not expected.
"""
stdout, stderr = None, None
LOG.debug("Executing command via ssh: %s", cmd_list)
stdout, stderr = self._run_ssh(cmd_list, True, 1)
# no output expected, so output means there is an error
if stdout:
msg = _("Error while running zoning CLI: (command=%(cmd)s "
"error=%(err)s).") % {'cmd': cmd_list, 'err': stdout}
LOG.error(msg)
self._cfg_trans_abort()
raise exception.BrocadeZoningCliException(reason=msg)
def is_supported_firmware(self):
"""Check firmware version is v6.4 or higher.
This API checks if the firmware version per the plug-in support level.
This only checks major and minor version.
"""
cmd = ['version']
firmware = 0
try:
stdout, stderr = self._execute_shell_cmd(cmd)
if (stdout):
for line in stdout:
if 'Fabric OS: v' in line:
LOG.debug("Firmware version string: %s", line)
ver = line.split('Fabric OS: v')[1].split('.')
if (ver):
firmware = int(ver[0] + ver[1])
return firmware > 63
else:
LOG.error(_LE("No CLI output for firmware version check"))
return False
except processutils.ProcessExecutionError as e:
msg = _("Error while getting data via ssh: (command=%(cmd)s "
"error=%(err)s).") % {'cmd': cmd, 'err': six.text_type(e)}
LOG.error(msg)
raise exception.BrocadeZoningCliException(reason=msg)
def _get_switch_info(self, cmd_list):
stdout, stderr, sw_data = None, None, None
try:
stdout, stderr = self._run_ssh(cmd_list, True, 1)
if (stdout):
sw_data = stdout.splitlines()
return sw_data
except processutils.ProcessExecutionError as e:
msg = _("Error while getting data via ssh: (command=%(cmd)s "
"error=%(err)s).") % {'cmd': cmd_list,
'err': six.text_type(e)}
LOG.error(msg)
raise exception.BrocadeZoningCliException(reason=msg)
def _parse_ns_output(self, switch_data):
"""Parses name server data.
Parses nameserver raw data and adds the device port wwns to the list
:returns: List -- list of device port wwn from ns info
"""
return_list = []
for line in switch_data:
if not(" NL " in line or " N " in line):
continue
linesplit = line.split(';')
if len(linesplit) > 2:
node_port_wwn = linesplit[2]
return_list.append(node_port_wwn)
else:
msg = _("Malformed nameserver string: %s") % line
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
return return_list
def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1):
# TODO(skolathur): Need to implement ssh_injection check
# currently, the check will fail for zonecreate command
# as zone members are separated by ';'which is a danger char
command = ' '. join(cmd_list)
if not self.sshpool:
self.sshpool = ssh_utils.SSHPool(self.switch_ip,
self.switch_port,
None,
self.switch_user,
self.switch_pwd,
min_size=1,
max_size=5)
last_exception = None
try:
with self.sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
return processutils.ssh_execute(
ssh,
command,
check_exit_code=check_exit_code)
except Exception as e:
LOG.exception(_LE('Error executing SSH command.'))
last_exception = e
greenthread.sleep(random.randint(20, 500) / 100.0)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,
stdout=last_exception.stdout,
stderr=last_exception.stderr,
cmd=last_exception.cmd)
except AttributeError:
raise processutils.ProcessExecutionError(
exit_code=-1,
stdout="",
stderr="Error running SSH command",
cmd=command)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error running SSH command: %s"), command)
def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1):
"""Execute cli with status update.
Executes CLI commands such as cfgsave where status return is expected.
"""
utils.check_ssh_injection(cmd_list)
command = ' '. join(cmd_list)
if not self.sshpool:
self.sshpool = ssh_utils.SSHPool(self.switch_ip,
self.switch_port,
None,
self.switch_user,
self.switch_pwd,
min_size=1,
max_size=5)
stdin, stdout, stderr = None, None, None
LOG.debug("Executing command via ssh: %s", command)
last_exception = None
try:
with self.sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
stdin, stdout, stderr = ssh.exec_command(command)
stdin.write("%s\n" % ZoneConstant.YES)
channel = stdout.channel
exit_status = channel.recv_exit_status()
LOG.debug("Exit Status from ssh: %s", exit_status)
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug('Result was %s', exit_status)
if check_exit_code and exit_status != 0:
raise processutils.ProcessExecutionError(
exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=command)
else:
return True
else:
return True
except Exception as e:
LOG.exception(_LE('Error executing SSH command.'))
last_exception = e
greenthread.sleep(random.randint(20, 500) / 100.0)
LOG.debug("Handling error case after "
"SSH: %s", last_exception)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,
stdout=last_exception.stdout,
stderr=last_exception.stderr,
cmd=last_exception.cmd)
except AttributeError:
raise processutils.ProcessExecutionError(
exit_code=-1,
stdout="",
stderr="Error running SSH command",
cmd=command)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error executing command via ssh: %s"), e)
finally:
if stdin:
stdin.flush()
stdin.close()
if stdout:
stdout.close()
if stderr:
stderr.close()
def _execute_shell_cmd(self, cmd):
"""Run command over shell for older firmware versions.
We invoke shell and issue the command and return the output.
This is primarily used for issuing read commands when we are not sure
if the firmware supports exec_command.
"""
utils.check_ssh_injection(cmd)
command = ' '. join(cmd)
stdout, stderr = None, None
if not self.sshpool:
self.sshpool = ssh_utils.SSHPool(self.switch_ip,
self.switch_port,
None,
self.switch_user,
self.switch_pwd,
min_size=1,
max_size=5)
with self.sshpool.item() as ssh:
LOG.debug('Running cmd (SSH): %s', command)
channel = ssh.invoke_shell()
stdin_stream = channel.makefile('wb')
stdout_stream = channel.makefile('rb')
stderr_stream = channel.makefile('rb')
stdin_stream.write('''%s
exit
''' % command)
stdin_stream.flush()
stdout = stdout_stream.readlines()
stderr = stderr_stream.readlines()
stdin_stream.close()
stdout_stream.close()
stderr_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug('Result was %s', exit_status)
if exit_status != 0:
LOG.debug("command %s failed", command)
raise processutils.ProcessExecutionError(
exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=command)
try:
channel.close()
except Exception:
LOG.exception(_LE('Error closing channel.'))
LOG.debug("_execute_cmd: stderr to return: %s", stderr)
return (stdout, stderr)
def cleanup(self):
self.sshpool = None
| |
from django.core.mail import EmailMessage
from django.core.cache import cache
from django.template.loader import render_to_string
from django.conf import settings
from django.contrib.admin.models import LogEntry, CHANGE, ADDITION
from django.contrib.contenttypes.models import ContentType
from sponsors.models import Sponsorship, Contract, BenefitFeature
class BaseEmailSponsorshipNotification:
subject_template = None
message_template = None
email_context_keys = None
def get_subject(self, context):
return render_to_string(self.subject_template, context).strip()
def get_message(self, context):
return render_to_string(self.message_template, context).strip()
def get_recipient_list(self, context):
raise NotImplementedError
def get_attachments(self, context):
"""
Returns list with attachments tuples (filename, content, mime type)
"""
return []
def get_email_context(self, **kwargs):
return {k: kwargs.get(k) for k in self.email_context_keys}
def notify(self, **kwargs):
context = self.get_email_context(**kwargs)
email = EmailMessage(
subject=self.get_subject(context),
body=self.get_message(context),
to=self.get_recipient_list(context),
from_email=settings.SPONSORSHIP_NOTIFICATION_FROM_EMAIL,
)
for attachment in self.get_attachments(context):
email.attach(*attachment)
email.send()
class AppliedSponsorshipNotificationToPSF(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/psf_new_application_subject.txt"
message_template = "sponsors/email/psf_new_application.txt"
email_context_keys = ["request", "sponsorship"]
def get_recipient_list(self, context):
return [settings.SPONSORSHIP_NOTIFICATION_TO_EMAIL]
class AppliedSponsorshipNotificationToSponsors(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/sponsor_new_application_subject.txt"
message_template = "sponsors/email/sponsor_new_application.txt"
email_context_keys = ["sponsorship", "request"]
def get_recipient_list(self, context):
return context["sponsorship"].verified_emails
def get_email_context(self, **kwargs):
context = super().get_email_context(**kwargs)
context["required_assets"] = BenefitFeature.objects.from_sponsorship(context["sponsorship"]).required_assets()
return context
class RejectedSponsorshipNotificationToPSF(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/psf_rejected_sponsorship_subject.txt"
message_template = "sponsors/email/psf_rejected_sponsorship.txt"
email_context_keys = ["sponsorship"]
def get_recipient_list(self, context):
return [settings.SPONSORSHIP_NOTIFICATION_TO_EMAIL]
class RejectedSponsorshipNotificationToSponsors(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/sponsor_rejected_sponsorship_subject.txt"
message_template = "sponsors/email/sponsor_rejected_sponsorship.txt"
email_context_keys = ["sponsorship"]
def get_recipient_list(self, context):
return context["sponsorship"].verified_emails
class ContractNotificationToPSF(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/psf_contract_subject.txt"
message_template = "sponsors/email/psf_contract.txt"
email_context_keys = ["contract"]
def get_recipient_list(self, context):
return [settings.SPONSORSHIP_NOTIFICATION_TO_EMAIL]
def get_attachments(self, context):
document = context["contract"].document
with document.open("rb") as fd:
content = fd.read()
return [("Contract.pdf", content, "application/pdf")]
class ContractNotificationToSponsors(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/sponsor_contract_subject.txt"
message_template = "sponsors/email/sponsor_contract.txt"
email_context_keys = ["contract"]
def get_recipient_list(self, context):
return context["contract"].sponsorship.verified_emails
def get_attachments(self, context):
contract = context["contract"]
if contract.document_docx:
document = contract.document_docx
ext, app_type = "docx", "msword"
else: # fallback to PDF for existing contracts
document = contract.document
ext, app_type = "pdf", "pdf"
document = context["contract"].document
with document.open("rb") as fd:
content = fd.read()
return [(f"Contract.{ext}", content, f"application/{app_type}")]
class SponsorshipApprovalLogger():
def notify(self, request, sponsorship, contract, **kwargs):
LogEntry.objects.log_action(
user_id=request.user.id,
content_type_id=ContentType.objects.get_for_model(Sponsorship).pk,
object_id=sponsorship.pk,
object_repr=str(sponsorship),
action_flag=CHANGE,
change_message="Sponsorship Approval"
)
LogEntry.objects.log_action(
user_id=request.user.id,
content_type_id=ContentType.objects.get_for_model(Contract).pk,
object_id=contract.pk,
object_repr=str(contract),
action_flag=ADDITION,
change_message="Created After Sponsorship Approval"
)
class SentContractLogger():
def notify(self, request, contract, **kwargs):
LogEntry.objects.log_action(
user_id=request.user.id,
content_type_id=ContentType.objects.get_for_model(Contract).pk,
object_id=contract.pk,
object_repr=str(contract),
action_flag=CHANGE,
change_message="Contract Sent"
)
class ExecutedContractLogger():
def notify(self, request, contract, **kwargs):
LogEntry.objects.log_action(
user_id=request.user.id,
content_type_id=ContentType.objects.get_for_model(Contract).pk,
object_id=contract.pk,
object_repr=str(contract),
action_flag=CHANGE,
change_message="Contract Executed"
)
class ExecutedExistingContractLogger():
def notify(self, request, contract, **kwargs):
LogEntry.objects.log_action(
user_id=request.user.id,
content_type_id=ContentType.objects.get_for_model(Contract).pk,
object_id=contract.pk,
object_repr=str(contract),
action_flag=CHANGE,
change_message="Existing Contract Uploaded and Executed"
)
class NullifiedContractLogger():
def notify(self, request, contract, **kwargs):
LogEntry.objects.log_action(
user_id=request.user.id,
content_type_id=ContentType.objects.get_for_model(Contract).pk,
object_id=contract.pk,
object_repr=str(contract),
action_flag=CHANGE,
change_message="Contract Nullified"
)
class SendSponsorNotificationLogger():
def notify(self, notification, sponsorship, contact_types, request, **kwargs):
contacts = ", ".join(contact_types)
msg = f"Notification '{notification.internal_name}' was sent to contacts: {contacts}"
LogEntry.objects.log_action(
user_id=request.user.id,
content_type_id=ContentType.objects.get_for_model(Sponsorship).pk,
object_id=sponsorship.pk,
object_repr=str(sponsorship),
action_flag=CHANGE,
change_message=msg
)
class RefreshSponsorshipsCache:
def notify(self, *args, **kwargs):
# clean up cached used by "sponsors/partials/sponsors-list.html"
cache.delete("CACHED_SPONSORS_LIST")
class AssetCloseToDueDateNotificationToSponsors(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/sponsor_expiring_assets_subject.txt"
message_template = "sponsors/email/sponsor_expiring_assets.txt"
email_context_keys = ["sponsorship", "required_assets", "due_date", "days"]
def get_recipient_list(self, context):
return context["sponsorship"].verified_emails
def get_email_context(self, **kwargs):
context = super().get_email_context(**kwargs)
context["required_assets"] = BenefitFeature.objects.from_sponsorship(context["sponsorship"]).required_assets()
return context
| |
import unittest
import numpy as np
import pysal
#from pysal.spreg.twosls_sp import BaseGM_Lag, GM_Lag
import pysal.spreg.diagnostics as D
from scipy import sparse as SP
from functools import partial
from pysal.contrib.handler import Model
GM_Lag = partial(Model, mtype='GM_Lag')
BaseGM_Lag = partial(Model, mtype='BaseGM_Lag')
class TestBaseGMLag(unittest.TestCase):
def setUp(self):
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
self.db = pysal.open(pysal.examples.get_path("columbus.dbf"), 'r')
y = np.array(self.db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
def test___init__(self):
X = []
X.append(self.db.by_col("INC"))
X.append(self.db.by_col("CRIME"))
self.X = np.array(X).T
yd2, q2 = pysal.spreg.utils.set_endog(self.y, self.X, self.w, None, None, 2, True)
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.X = SP.csr_matrix(self.X)
reg = BaseGM_Lag(self.y, self.X, yend=yd2, q=q2, w=self.w, w_lags=2)
betas = np.array([[ 4.53017056e+01], [ 6.20888617e-01], [ -4.80723451e-01], [ 2.83622122e-02]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
h_0 = np.array([ 1. , 19.531 , 15.72598 , 18.594 ,
24.7142675 , 13.72216667, 27.82929567])
np.testing.assert_array_almost_equal(reg.h.toarray()[0], h_0)
hth = np.array([ 49. , 704.371999 , 1721.312371 , 724.7435916 ,
1707.35412945, 711.31248483, 1729.63201243])
np.testing.assert_array_almost_equal(reg.hth[0], hth, 7)
hthi = np.array([ 7.33701328e+00, 2.27764882e-02, 2.18153588e-02,
-5.11035447e-02, 1.22515181e-03, -2.38079378e-01,
-1.20149133e-01])
np.testing.assert_array_almost_equal(reg.hthi[0], hthi, 7)
self.assertEqual(reg.k, 4)
self.assertEqual(reg.kstar, 1)
self.assertAlmostEqual(reg.mean_y, 38.436224469387746, 7)
self.assertEqual(reg.n, 49)
pfora1a2 = np.array([ 80.5588479 , -1.06625281, -0.61703759, -1.10071931])
np.testing.assert_array_almost_equal(reg.pfora1a2[0], pfora1a2, 7)
predy_5 = np.array([[ 50.87411532],[ 50.76969931],[ 41.77223722],[ 33.44262382],[ 28.77418036]])
np.testing.assert_array_almost_equal(reg.predy[0:5], predy_5, 7)
q_5 = np.array([ 18.594 , 24.7142675 , 13.72216667, 27.82929567])
np.testing.assert_array_almost_equal(reg.q[0], q_5)
self.assertAlmostEqual(reg.sig2n_k, 234.54258763039289, 7)
self.assertAlmostEqual(reg.sig2n, 215.39625394627919, 7)
self.assertAlmostEqual(reg.sig2, 215.39625394627919, 7)
self.assertAlmostEqual(reg.std_y, 18.466069465206047, 7)
u_5 = np.array( [[ 29.59288768], [ -6.20269831], [-15.42223722], [ -0.24262282], [ -5.54918036]])
np.testing.assert_array_almost_equal(reg.u[0:5], u_5, 7)
self.assertAlmostEqual(reg.utu, 10554.41644336768, 7)
varb = np.array( [[ 1.48966377e+00, -2.28698061e-02, -1.20217386e-02, -1.85763498e-02],
[ -2.28698061e-02, 1.27893998e-03, 2.74600023e-04, -1.33497705e-04],
[ -1.20217386e-02, 2.74600023e-04, 1.54257766e-04, 6.86851184e-05],
[ -1.85763498e-02, -1.33497705e-04, 6.86851184e-05, 4.67711582e-04]])
np.testing.assert_array_almost_equal(reg.varb, varb, 7)
vm = np.array([[ 3.20867996e+02, -4.92607057e+00, -2.58943746e+00, -4.00127615e+00],
[ -4.92607057e+00, 2.75478880e-01, 5.91478163e-02, -2.87549056e-02],
[ -2.58943746e+00, 5.91478163e-02, 3.32265449e-02, 1.47945172e-02],
[ -4.00127615e+00, -2.87549056e-02, 1.47945172e-02, 1.00743323e-01]])
np.testing.assert_array_almost_equal(reg.vm, vm, 6)
x_0 = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.x.toarray()[0], x_0, 7)
y_5 = np.array( [[ 80.467003], [ 44.567001], [ 26.35 ], [ 33.200001], [ 23.225 ]])
np.testing.assert_array_almost_equal(reg.y[0:5], y_5, 7)
yend_5 = np.array( [[ 35.4585005 ], [ 46.67233467], [ 45.36475125], [ 32.81675025], [ 30.81785714]])
np.testing.assert_array_almost_equal(reg.yend[0:5], yend_5, 7)
z_0 = np.array([ 1. , 19.531 , 15.72598 , 35.4585005])
np.testing.assert_array_almost_equal(reg.z.toarray()[0], z_0, 7)
zthhthi = np.array( [[ 1.00000000e+00, -2.22044605e-16, -2.22044605e-16 , 2.22044605e-16,
4.44089210e-16, 0.00000000e+00, -8.88178420e-16],
[ 0.00000000e+00, 1.00000000e+00, -3.55271368e-15 , 3.55271368e-15,
-7.10542736e-15, 7.10542736e-14, 0.00000000e+00],
[ 1.81898940e-12, 2.84217094e-14, 1.00000000e+00 , 0.00000000e+00,
-2.84217094e-14, 5.68434189e-14, 5.68434189e-14],
[ -8.31133940e+00, -3.76104678e-01, -2.07028208e-01 , 1.32618931e+00,
-8.04284562e-01, 1.30527047e+00, 1.39136816e+00]])
np.testing.assert_array_almost_equal(reg.zthhthi, zthhthi, 7)
def test_init_white_(self):
X = []
X.append(self.db.by_col("INC"))
X.append(self.db.by_col("CRIME"))
self.X = np.array(X).T
yd2, q2 = pysal.spreg.utils.set_endog(self.y, self.X, self.w, None, None, 2, True)
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.X = SP.csr_matrix(self.X)
base_gm_lag = BaseGM_Lag(self.y, self.X, yend=yd2, q=q2, w=self.w, w_lags=2, robust='white')
tbetas = np.array([[ 4.53017056e+01], [ 6.20888617e-01], [ -4.80723451e-01], [ 2.83622122e-02]])
np.testing.assert_array_almost_equal(base_gm_lag.betas, tbetas)
dbetas = D.se_betas(base_gm_lag)
se_betas = np.array([ 20.47077481, 0.50613931, 0.20138425, 0.38028295 ])
np.testing.assert_array_almost_equal(dbetas, se_betas)
def test_init_hac_(self):
X = []
X.append(self.db.by_col("INC"))
X.append(self.db.by_col("CRIME"))
self.X = np.array(X).T
yd2, q2 = pysal.spreg.utils.set_endog(self.y, self.X, self.w, None, None, 2, True)
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.X = SP.csr_matrix(self.X)
gwk = pysal.kernelW_from_shapefile(pysal.examples.get_path('columbus.shp'),k=15,function='triangular', fixed=False)
base_gm_lag = BaseGM_Lag(self.y, self.X, yend=yd2, q=q2, w=self.w, w_lags=2, robust='hac', gwk=gwk)
tbetas = np.array([[ 4.53017056e+01], [ 6.20888617e-01], [ -4.80723451e-01], [ 2.83622122e-02]])
np.testing.assert_array_almost_equal(base_gm_lag.betas, tbetas)
dbetas = D.se_betas(base_gm_lag)
se_betas = np.array([ 19.08513569, 0.51769543, 0.18244862, 0.35460553])
np.testing.assert_array_almost_equal(dbetas, se_betas)
def test_init_discbd(self):
X = np.array(self.db.by_col("INC"))
self.X = np.reshape(X, (49,1))
yd = np.array(self.db.by_col("CRIME"))
yd = np.reshape(yd, (49,1))
q = np.array(self.db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
yd2, q2 = pysal.spreg.utils.set_endog(self.y, self.X, self.w, yd, q, 2, True)
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.X = SP.csr_matrix(self.X)
reg = BaseGM_Lag(self.y, self.X, yend=yd2, q=q2, w=self.w, w_lags=2)
tbetas = np.array([[ 100.79359082], [ -0.50215501], [ -1.14881711], [ -0.38235022]])
np.testing.assert_array_almost_equal(tbetas, reg.betas)
dbetas = D.se_betas(reg)
se_betas = np.array([ 53.0829123 , 1.02511494, 0.57589064, 0.59891744 ])
np.testing.assert_array_almost_equal(dbetas, se_betas)
def test_n_k(self):
X = []
X.append(self.db.by_col("INC"))
X.append(self.db.by_col("CRIME"))
self.X = np.array(X).T
yd2, q2 = pysal.spreg.utils.set_endog(self.y, self.X, self.w, None, None, 2, True)
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.X = SP.csr_matrix(self.X)
reg = BaseGM_Lag(self.y, self.X, yend=yd2, q=q2, w=self.w, w_lags=2, sig2n_k=True)
betas = np. array([[ 4.53017056e+01], [ 6.20888617e-01], [ -4.80723451e-01], [ 2.83622122e-02]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
vm = np.array( [[ 3.49389596e+02, -5.36394351e+00, -2.81960968e+00, -4.35694515e+00],
[ -5.36394351e+00, 2.99965892e-01, 6.44054000e-02, -3.13108972e-02],
[ -2.81960968e+00, 6.44054000e-02, 3.61800155e-02, 1.61095854e-02],
[ -4.35694515e+00, -3.13108972e-02, 1.61095854e-02, 1.09698285e-01]])
np.testing.assert_array_almost_equal(reg.vm, vm, 7)
def test_lag_q(self):
X = np.array(self.db.by_col("INC"))
self.X = np.reshape(X, (49,1))
yd = np.array(self.db.by_col("CRIME"))
yd = np.reshape(yd, (49,1))
q = np.array(self.db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
yd2, q2 = pysal.spreg.utils.set_endog(self.y, self.X, self.w, yd, q, 2, False)
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.X = SP.csr_matrix(self.X)
reg = BaseGM_Lag(self.y, self.X, yend=yd2, q=q2, w=self.w, w_lags=2, lag_q=False)
tbetas = np.array( [[ 108.83261383], [ -0.48041099], [ -1.18950006], [ -0.56140186]])
np.testing.assert_array_almost_equal(tbetas, reg.betas)
dbetas = D.se_betas(reg)
se_betas = np.array([ 58.33203837, 1.09100446, 0.62315167, 0.68088777])
np.testing.assert_array_almost_equal(dbetas, se_betas)
class TestGMLag(unittest.TestCase):
def setUp(self):
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
self.db = pysal.open(pysal.examples.get_path("columbus.dbf"), 'r')
y = np.array(self.db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
def test___init__(self):
X = []
X.append(self.db.by_col("INC"))
X.append(self.db.by_col("CRIME"))
self.X = np.array(X).T
self.X = SP.csr_matrix(self.X)
reg = GM_Lag(self.y, self.X, w=self.w, w_lags=2)
betas = np.array([[ 4.53017056e+01], [ 6.20888617e-01], [ -4.80723451e-01], [ 2.83622122e-02]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
e_5 = np.array( [[ 29.28976367], [ -6.07439501], [-15.30080685], [ -0.41773375], [ -5.67197968]])
np.testing.assert_array_almost_equal(reg.e_pred[0:5], e_5, 7)
h_0 = np.array([ 1. , 19.531 , 15.72598 , 18.594 ,
24.7142675 , 13.72216667, 27.82929567])
np.testing.assert_array_almost_equal(reg.h.toarray()[0], h_0)
hth = np. array([ 49. , 704.371999 , 1721.312371 , 724.7435916 ,
1707.35412945, 711.31248483, 1729.63201243])
np.testing.assert_array_almost_equal(reg.hth[0], hth, 7)
hthi = np.array([ 7.33701328e+00, 2.27764882e-02, 2.18153588e-02,
-5.11035447e-02, 1.22515181e-03, -2.38079378e-01,
-1.20149133e-01])
np.testing.assert_array_almost_equal(reg.hthi[0], hthi, 7)
self.assertEqual(reg.k, 4)
self.assertEqual(reg.kstar, 1)
self.assertAlmostEqual(reg.mean_y, 38.436224469387746, 7)
self.assertEqual(reg.n, 49)
pfora1a2 = np.array([ 80.5588479 , -1.06625281, -0.61703759, -1.10071931])
self.assertAlmostEqual(reg.pr2, 0.3551928222612527, 7)
self.assertAlmostEqual(reg.pr2_e, 0.34763857386174174, 7)
np.testing.assert_array_almost_equal(reg.pfora1a2[0], pfora1a2, 7)
predy_5 = np.array([[ 50.87411532],[ 50.76969931],[ 41.77223722],[ 33.44262382],[ 28.77418036]])
np.testing.assert_array_almost_equal(reg.predy[0:5], predy_5, 7)
predy_e_5 = np.array( [[ 51.17723933], [ 50.64139601], [ 41.65080685], [ 33.61773475], [ 28.89697968]])
np.testing.assert_array_almost_equal(reg.predy_e[0:5], predy_e_5, 7)
q_5 = np.array([ 18.594 , 24.7142675 , 13.72216667, 27.82929567])
np.testing.assert_array_almost_equal(reg.q.toarray()[0], q_5)
self.assertEqual(reg.robust, 'unadjusted')
self.assertAlmostEqual(reg.sig2n_k, 234.54258763039289, 7)
self.assertAlmostEqual(reg.sig2n, 215.39625394627919, 7)
self.assertAlmostEqual(reg.sig2, 215.39625394627919, 7)
self.assertAlmostEqual(reg.std_y, 18.466069465206047, 7)
u_5 = np.array( [[ 29.59288768], [ -6.20269831], [-15.42223722], [ -0.24262282], [ -5.54918036]])
np.testing.assert_array_almost_equal(reg.u[0:5], u_5, 7)
self.assertAlmostEqual(reg.utu, 10554.41644336768, 7)
varb = np.array( [[ 1.48966377e+00, -2.28698061e-02, -1.20217386e-02, -1.85763498e-02],
[ -2.28698061e-02, 1.27893998e-03, 2.74600023e-04, -1.33497705e-04],
[ -1.20217386e-02, 2.74600023e-04, 1.54257766e-04, 6.86851184e-05],
[ -1.85763498e-02, -1.33497705e-04, 6.86851184e-05, 4.67711582e-04]])
np.testing.assert_array_almost_equal(reg.varb, varb, 7)
vm = np.array([[ 3.20867996e+02, -4.92607057e+00, -2.58943746e+00, -4.00127615e+00],
[ -4.92607057e+00, 2.75478880e-01, 5.91478163e-02, -2.87549056e-02],
[ -2.58943746e+00, 5.91478163e-02, 3.32265449e-02, 1.47945172e-02],
[ -4.00127615e+00, -2.87549056e-02, 1.47945172e-02, 1.00743323e-01]])
np.testing.assert_array_almost_equal(reg.vm, vm, 6)
x_0 = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.x.toarray()[0], x_0, 7)
y_5 = np.array( [[ 80.467003], [ 44.567001], [ 26.35 ], [ 33.200001], [ 23.225 ]])
np.testing.assert_array_almost_equal(reg.y[0:5], y_5, 7)
yend_5 = np.array( [[ 35.4585005 ], [ 46.67233467], [ 45.36475125], [ 32.81675025], [ 30.81785714]])
np.testing.assert_array_almost_equal(reg.yend[0:5], yend_5, 7)
z_0 = np.array([ 1. , 19.531 , 15.72598 , 35.4585005])
np.testing.assert_array_almost_equal(reg.z.toarray()[0], z_0, 7)
zthhthi = np.array( [[ 1.00000000e+00, -2.22044605e-16, -2.22044605e-16 , 2.22044605e-16,
4.44089210e-16, 0.00000000e+00, -8.88178420e-16],
[ 0.00000000e+00, 1.00000000e+00, -3.55271368e-15 , 3.55271368e-15,
-7.10542736e-15, 7.10542736e-14, 0.00000000e+00],
[ 1.81898940e-12, 2.84217094e-14, 1.00000000e+00 , 0.00000000e+00,
-2.84217094e-14, 5.68434189e-14, 5.68434189e-14],
[ -8.31133940e+00, -3.76104678e-01, -2.07028208e-01 , 1.32618931e+00,
-8.04284562e-01, 1.30527047e+00, 1.39136816e+00]])
np.testing.assert_array_almost_equal(reg.zthhthi, zthhthi, 7)
def test_init_white_(self):
X = []
X.append(self.db.by_col("INC"))
X.append(self.db.by_col("CRIME"))
self.X = np.array(X).T
self.X = SP.csr_matrix(self.X)
base_gm_lag = GM_Lag(self.y, self.X, w=self.w, w_lags=2, robust='white')
tbetas = np.array([[ 4.53017056e+01], [ 6.20888617e-01], [ -4.80723451e-01], [ 2.83622122e-02]])
np.testing.assert_array_almost_equal(base_gm_lag.betas, tbetas)
dbetas = D.se_betas(base_gm_lag)
se_betas = np.array([ 20.47077481, 0.50613931, 0.20138425, 0.38028295 ])
np.testing.assert_array_almost_equal(dbetas, se_betas)
def test_init_hac_(self):
X = []
X.append(self.db.by_col("INC"))
X.append(self.db.by_col("CRIME"))
self.X = np.array(X).T
self.X = SP.csr_matrix(self.X)
gwk = pysal.kernelW_from_shapefile(pysal.examples.get_path('columbus.shp'),k=15,function='triangular', fixed=False)
base_gm_lag = GM_Lag(self.y, self.X, w=self.w, w_lags=2, robust='hac', gwk=gwk)
tbetas = np.array([[ 4.53017056e+01], [ 6.20888617e-01], [ -4.80723451e-01], [ 2.83622122e-02]])
np.testing.assert_array_almost_equal(base_gm_lag.betas, tbetas)
dbetas = D.se_betas(base_gm_lag)
se_betas = np.array([ 19.08513569, 0.51769543, 0.18244862, 0.35460553])
np.testing.assert_array_almost_equal(dbetas, se_betas)
def test_init_discbd(self):
X = np.array(self.db.by_col("INC"))
X = np.reshape(X, (49,1))
X = SP.csr_matrix(X)
yd = np.array(self.db.by_col("CRIME"))
yd = np.reshape(yd, (49,1))
q = np.array(self.db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
reg = GM_Lag(self.y, X, w=self.w, yend=yd, q=q, w_lags=2)
tbetas = np.array([[ 100.79359082], [ -0.50215501], [ -1.14881711], [ -0.38235022]])
np.testing.assert_array_almost_equal(tbetas, reg.betas)
dbetas = D.se_betas(reg)
se_betas = np.array([ 53.0829123 , 1.02511494, 0.57589064, 0.59891744 ])
np.testing.assert_array_almost_equal(dbetas, se_betas)
def test_n_k(self):
X = []
X.append(self.db.by_col("INC"))
X.append(self.db.by_col("CRIME"))
self.X = np.array(X).T
self.X = SP.csr_matrix(self.X)
reg = GM_Lag(self.y, self.X, w=self.w, w_lags=2, sig2n_k=True)
betas = np. array([[ 4.53017056e+01], [ 6.20888617e-01], [ -4.80723451e-01], [ 2.83622122e-02]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
vm = np.array( [[ 3.49389596e+02, -5.36394351e+00, -2.81960968e+00, -4.35694515e+00],
[ -5.36394351e+00, 2.99965892e-01, 6.44054000e-02, -3.13108972e-02],
[ -2.81960968e+00, 6.44054000e-02, 3.61800155e-02, 1.61095854e-02],
[ -4.35694515e+00, -3.13108972e-02, 1.61095854e-02, 1.09698285e-01]])
np.testing.assert_array_almost_equal(reg.vm, vm, 7)
def test_lag_q(self):
X = np.array(self.db.by_col("INC"))
X = np.reshape(X, (49,1))
X = SP.csr_matrix(X)
yd = np.array(self.db.by_col("CRIME"))
yd = np.reshape(yd, (49,1))
q = np.array(self.db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
reg = GM_Lag(self.y, X, w=self.w, yend=yd, q=q, w_lags=2, lag_q=False)
tbetas = np.array( [[ 108.83261383], [ -0.48041099], [ -1.18950006], [ -0.56140186]])
np.testing.assert_array_almost_equal(tbetas, reg.betas)
dbetas = D.se_betas(reg)
se_betas = np.array([ 58.33203837, 1.09100446, 0.62315167, 0.68088777])
np.testing.assert_array_almost_equal(dbetas, se_betas)
def test_spatial(self):
X = np.array(self.db.by_col("INC"))
X = np.reshape(X, (49,1))
X = SP.csr_matrix(X)
yd = np.array(self.db.by_col("CRIME"))
yd = np.reshape(yd, (49,1))
q = np.array(self.db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
w = pysal.queen_from_shapefile(pysal.examples.get_path('columbus.shp'))
reg = GM_Lag(self.y, X, yd, q, spat_diag=True, w=w)
betas = np.array([[ 5.46344924e+01], [ 4.13301682e-01], [ -5.92637442e-01], [ -7.40490883e-03]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
vm = np.array( [[ 4.45202654e+02, -1.50290275e+01, -6.36557072e+00, -5.71403440e-03],
[ -1.50290275e+01, 5.93124683e-01, 2.19169508e-01, -6.70675916e-03],
[ -6.36557072e+00, 2.19169508e-01, 1.06577542e-01, -2.96533875e-03],
[ -5.71403440e-03, -6.70675916e-03, -2.96533875e-03, 1.15655425e-03]])
np.testing.assert_array_almost_equal(reg.vm, vm, 6)
ak_test = np.array([ 2.52597326, 0.11198567])
np.testing.assert_array_almost_equal(reg.ak_test, ak_test, 7)
def test_names(self):
X = np.array(self.db.by_col("INC"))
X = np.reshape(X, (49,1))
X = SP.csr_matrix(X)
yd = np.array(self.db.by_col("CRIME"))
yd = np.reshape(yd, (49,1))
q = np.array(self.db.by_col("DISCBD"))
q = np.reshape(q, (49,1))
w = pysal.queen_from_shapefile(pysal.examples.get_path('columbus.shp'))
gwk = pysal.kernelW_from_shapefile(pysal.examples.get_path('columbus.shp'),k=5,function='triangular', fixed=False)
name_x = ['inc']
name_y = 'crime'
name_yend = ['crime']
name_q = ['discbd']
name_w = 'queen'
name_gwk = 'k=5'
name_ds = 'columbus'
reg = GM_Lag(self.y, X, yd, q,
spat_diag=True, w=w, robust='hac', gwk=gwk,
name_x=name_x, name_y=name_y, name_q=name_q, name_w=name_w,
name_yend=name_yend, name_gwk=name_gwk, name_ds=name_ds)
betas = np.array([[ 5.46344924e+01], [ 4.13301682e-01], [ -5.92637442e-01], [ -7.40490883e-03]])
np.testing.assert_array_almost_equal(reg.betas, betas, 7)
vm = np.array( [[ 5.70817052e+02, -1.83655385e+01, -8.36602575e+00, 2.37538877e-02],
[ -1.85224661e+01, 6.53311383e-01, 2.84209566e-01, -6.47694160e-03],
[ -8.31105622e+00, 2.78772694e-01, 1.38144928e-01, -3.98175246e-03],
[ 2.66662466e-02, -6.23783104e-03, -4.11092891e-03, 1.10936528e-03]])
np.testing.assert_array_almost_equal(reg.vm, vm, 6)
self.assertListEqual(reg.name_x, ['CONSTANT']+name_x)
name_yend.append('W_crime')
self.assertListEqual(reg.name_yend, name_yend)
name_q.extend(['W_inc', 'W_discbd'])
self.assertListEqual(reg.name_q, name_q)
self.assertEqual(reg.name_y, name_y)
self.assertEqual(reg.name_w, name_w)
self.assertEqual(reg.name_gwk, name_gwk)
self.assertEqual(reg.name_ds, name_ds)
if __name__ == '__main__':
unittest.main()
| |
import mutagen
import collections
import uuid
from .format import Vorbis, AAC, ID3
from . import Exceptions, Tag
from . import Utilities
from audio_pipeline import Constants
class BaseAudioFile:
audiofile_type = "BaseAudioFile"
renameable_tags = {"item_code": "ITEMCODE",
"barcode": "Barcode",
"catalog_num": "Catalog #",
"file_under": "File Under",
"obscenity": "FCC Rating",
"radio_edit": "Radio Edit",
"category": "Category"}
default_release_width = 15
default_track_width = 25
vorbis = Vorbis.Format
id3 = ID3.Format
aac = AAC.Format
def __init__(self, file_name, release_tags=None, track_tags=None,
tb_release_tags=None, tb_track_tags=None):
self.format = None
self.file_name = file_name
try:
self.audio = mutagen.File(file_name)
if not self.audio:
raise Exceptions.UnsupportedFiletypeError(file_name)
except IOError as e:
# if there's an error opening the file (probably not an audio file)
# propagate the resulting exception on up
raise e
for mime_type in self.audio.mime:
# get the appropriate tag Format for this file type
if mime_type in Tag.Formats.mime_map:
t = Tag.Formats.mime_map[mime_type]
if t.casefold() == "aac":
self.format = self.aac
elif t.casefold() == "id3":
self.format = self.id3
elif t.casefold() == "vorbis":
self.format = self.vorbis
break
if not self.format:
# Can't process this type of audio file; raise UnsupportedFileType error
raise Exceptions.UnsupportedFiletypeError(file_name)
# get tags
#######################
# release-level tags
#######################
self.release_tags = collections.OrderedDict()
self.tb_release_tags = []
self.mbid = self.format.mbid(self.audio)
self.album = self.format.album(self.audio)
self.album_artist = self.format.album_artist(self.audio)
self.release_date = self.format.release_date(self.audio)
self.label = self.format.label(self.audio)
self.country = self.format.country(self.audio)
self.release_type = self.format.release_type(self.audio)
self.media_format = self.format.media_format(self.audio)
self.item_code = self.format.custom_tag(BaseAudioFile.renameable_tags["item_code"], self.audio)
self.barcode = self.format.custom_tag(BaseAudioFile.renameable_tags['barcode'], self.audio)
self.catalog_num = self.format.custom_tag(BaseAudioFile.renameable_tags['catalog_num'], self.audio)
# a basic filesystem arrangement - should be easily toggled on/off
self.file_under = self.format.custom_tag(BaseAudioFile.renameable_tags['file_under'], self.audio)
if not self.file_under.value:
artist_value = str(self.album_artist)
if self.album_artist.value is not None:
starticle = artist_value.split()[0].casefold()
if starticle in {"a", "an", "the"} and len(artist_value.split()) > 1:
artist_value = artist_value[len(starticle):].strip()
self.file_under.value = artist_value[0:2].upper()
self.file_under.save()
# get custom release tag values
if release_tags:
self.release_tags = {r_tag: self.format.custom_tag(r_tag, self.audio) for r_tag in release_tags}
if tb_release_tags:
for t in tb_release_tags:
if t not in self.release_tags:
self.release_tags[t] = self.format.custom_tag(t, self.audio)
self.tb_release_tags.append(t)
#######################
# track-level tags
#######################
self.track_tags = collections.OrderedDict()
self.tb_track_tags = []
self.title = self.format.title(self.audio)
self.artist = self.format.artist(self.audio)
self.disc_num = self.format.disc_num(self.audio)
self.track_num = self.format.track_num(self.audio)
self.length = self.format.length(self.audio)
self.acoustid = self.format.acoustid(self.audio)
self.recording_mbid = self.format.recording_mbid(self.audio)
self.track_mbid = self.format.track_mbid(self.audio)
if track_tags:
self.track_tags = {t_tag: self.format.custom_tag(t_tag, self.audio) for t_tag in track_tags}
if tb_track_tags:
for t in tb_track_tags:
self.tb_track_tags.append(t)
if t not in self.track_tags:
self.track_tags[t] = self.format.custom_tag(t, self.audio)
self.meta_stuffed = self.format.custom_tag("meta_stuffed", self.audio)
self.custom_tags = [self.meta_stuffed]
def save(self):
for item in self:
item.set()
self.audio.save()
def __iter__(self):
release = self.release()
for item in release:
yield item
track = self.track()
for item in track:
yield item
for item in self.custom_tags:
yield item
def track(self):
tracks = [self.track_num, self.title, self.artist, self.length, self.item_code, self.track_mbid, self.recording_mbid]
tracks += [v for v in self.track_tags.values()]
return tracks
def tb_release(self):
TBTag = collections.namedtuple('TBTag', ['width', 'row', 'tag'])
release_tags = [TBTag(25, 0, self.album_artist), TBTag(30, 0, self.album),
TBTag(20, 0, self.label), TBTag(10, 0, self.disc_num),
TBTag(self.default_release_width, 0, self.release_date),
TBTag(30, 0, self.mbid), TBTag(self.default_release_width, 0, self.country),
TBTag(self.default_release_width, 1, self.release_type),
TBTag(self.default_release_width, 1, self.media_format),
TBTag(self.default_release_width, 1, self.barcode),
TBTag(self.default_release_width, 1, self.catalog_num)]
for tag in self.tb_release_tags:
release_tags.append(TBTag(self.default_release_width, 1, self.release_tags[tag]))
return release_tags
def tb_track(self):
TBTag = collections.namedtuple('TBTag', ['width', 'tag'])
track_tags = [TBTag(5, self.track_num), TBTag(30, self.title), TBTag(25, self.artist),
TBTag(10, self.length)]
for tag in self.tb_track_tags:
track_tags.append(TBTag(self.default_track_width, self.track_tags[tag]))
return track_tags
def release(self):
release = [self.album_artist, self.album, self.label, self.disc_num, self.release_date, self.mbid,
self.country, self.release_type, self.media_format, self.barcode, self.catalog_num]
release += [v for v in self.release_tags.values()]
return release
def has_minimum_metadata(self):
"""
Checks if this audiofile has the minimum 'required' metadata -
album name, album artist name, track name, and track artist name
:return: True if minimum metadata is there, False otherwise
"""
min_meta = bool(self.album.value and Utilities.know_artist_name(self.album_artist.value) and self.title.value
and Utilities.know_artist_name(self.artist.value))
print("Has minimum metadata: " + str(min_meta))
return min_meta
def should_stuff_metadata(self):
"""
Checks if this audiofile has already been stuffed with metadata.
:return: True if audiofile has had metadata stuffed, false otherwise
"""
return self.meta_stuffed.value
def has_mbid(self):
"""
Check whether the track has an MBID value, and that value looks like
an MBID (i.e. is a UUID)
:return: True if valid MBID value, False otherwise
"""
if self.mbid.value:
try:
id = uuid.UUID(self.mbid.value)
return True
except ValueError as e:
pass
return False
class AudioFileFactory:
audiofiles = dict()
setup = False
@classmethod
def get(cls, file_name):
if file_name in cls.audiofiles:
return cls.audiofiles[file_name]
else:
af = BaseAudioFile(file_name, Constants.custom_release_tags, Constants.custom_track_tags,
Constants.tb_release_tags, Constants.tb_track_tags)
cls.audiofiles[file_name] = af
return af
| |
""" OpenstackDriver for Network
based on BaseDriver
"""
from keystoneauth1.identity import v3
from keystoneauth1 import session
from neutronclient.v2_0 import client
from calplus.v1.network.drivers.base import BaseDriver, BaseQuota
PROVIDER = "OPENSTACK"
class OpenstackDriver(BaseDriver):
"""docstring for OpenstackDriver"""
def __init__(self, cloud_config):
super(OpenstackDriver, self).__init__()
self.auth_url = cloud_config['os_auth_url']
self.project_name = cloud_config['os_project_name']
self.username = cloud_config['os_username']
self.password = cloud_config['os_password']
self.user_domain_name = \
cloud_config.get('os_project_domain_name', 'default')
self.project_domain_name = \
cloud_config.get('os_user_domain_name', 'default')
self.driver_name = \
cloud_config.get('driver_name', 'default')
self.tenant_id = cloud_config.get('tenant_id', None)
self.limit = cloud_config.get('limit', None)
self._setup()
def _setup(self):
auth = v3.Password(auth_url=self.auth_url,
user_domain_name=self.user_domain_name,
username=self.username,
password=self.password,
project_domain_name=self.project_domain_name,
project_name=self.project_name)
sess = session.Session(auth=auth)
self.client = client.Client(session=sess)
self.network_quota = OpenstackQuota(
self.client, self.tenant_id, self.limit)
def _check_external_network(self):
networks = self.client.list_networks().get('networks')
for network in networks:
external = network.get('provider:physical_network')
if external is not None:
return network.get('id')
return None
def _check_router_external_gateway(self):
routers = self.client.list_routers().get('routers')
for router in routers:
external = router.get('external_gateway_info')
if external is not None:
return router.get('id')
return None
def create(self, name, cidr, **kargs):
admin_state_up = kargs.pop('admin_state_up', True)
ip_version = kargs.pop('ip_version', 4)
# step1: create network with empty name and admin_state_up
network = {'name': '',
'admin_state_up': admin_state_up}
net = self.client.create_network({'network': network}).get('network')
network_id = net['id']
# step 2: create subnet
sub = {"network_id": network_id,
"ip_version": ip_version,
"cidr": cidr,
"name": name}
subnet = self.client.create_subnet({'subnet': sub}).get('subnet')
result = {'name': subnet['name'],
'description': None,
'id': subnet['id'],
'cidr': subnet['cidr'],
'cloud': PROVIDER,
'gateway_ip': subnet['gateway_ip'],
'security_group': None,
'allocation_pools': subnet['allocation_pools'],
'dns_nameservers': subnet['dns_nameservers']
}
return result
def show(self, subnet_id):
subnet = self.client.show_subnet(subnet_id).get('subnet')
result = {'name': subnet['name'],
'description': None,
'id': subnet['id'],
'cidr': subnet['cidr'],
'cloud': PROVIDER,
'gateway': subnet['gateway_ip'],
'security_group': None,
'allocation_pools': subnet['allocation_pools'],
'dns_nameservers': subnet['dns_nameservers']
}
return result
def list(self, **search_opts):
subnets = self.client.list_subnets(**search_opts).get('subnets')
result = []
for subnet in subnets:
sub = {'name': subnet['name'],
'description': None,
'id': subnet['id'],
'cidr': subnet['cidr'],
'cloud': PROVIDER,
'gateway': subnet['gateway_ip'],
'security_group': None,
'allocation_pools': subnet['allocation_pools'],
'dns_nameservers': subnet['dns_nameservers']
}
result.append(sub)
return result
def update(self, network_id, network):
# Now we can't update network, I'm trying again
return None
def delete(self, network_id):
return self.client.delete_network(network_id)
def connect_external_net(self, subnet_id):
router_id = self._check_router_external_gateway()
if router_id is None:
network_id = self._check_external_network()
if network_id is None:
raise Exception()
router = {
"name": "default",
"external_gateway_info": {
"network_id": "{}".format(network_id)
}
}
router = self.create_router({'router': router})
body = {
"subnet_id": "{}".format(subnet_id)
}
return self.client.add_interface_router(router_id, body)
def disconnect_external_net(self, network_id):
#just detach all connect to router have external_gateway
pass
def allocate_public_ip(self):
external_net = self._check_external_network()
if external_net:
create_dict = {'floating_network_id': external_net,
'tenant_id': self.network_quota.tenant_id}
self.client.create_floatingip({'floatingip': create_dict})
else:
return False
return True
def list_public_ip(self, **search_opts):
"""
:param search_opts:
:return: list PublicIP
"""
result = self.client.list_floatingips(**search_opts)
ips = result.get('floatingips')
return_format = []
for ip in ips:
return_format.append({
'public_ip': ip.get('floating_ip_address'),
'id': ip.get('id')
})
return return_format
def release_public_ip(self, public_ip_id):
self.client.delete_floatingip(public_ip_id)
return True
class OpenstackQuota(BaseQuota):
"""docstring for OpenstackQuota"""
def __init__(self, client, tenant_id=None, limit=None):
super(OpenstackQuota, self).__init__()
self.client = client
self.tenant_id = tenant_id
self.limit = limit
self._setup()
def _setup(self):
if self.tenant_id is None:
self.tenant_id = \
self.client.get_quotas_tenant().get('tenant')['tenant_id']
if self.limit is None:
self.limit = self.client.show_quota(self.tenant_id).get('quota')
def get_networks(self):
subnets = self.client.list_subnets().get('subnets')
list_cidrs = []
for subnet in subnets:
list_cidrs.append({
"net_id": subnet['id'],
"cidr": "{}".format(subnet['cidr']),
"allocation_pools": subnet['allocation_pools']
})
networks = {
"max": self.limit['network'],
"used": len(list_cidrs),
"list_cidrs": list_cidrs,
"VPCs": None
}
return networks
def get_security_groups(self):
list_security_groups = self.client.list_security_groups(
tenant_id=self.tenant_id).get('security_groups')
list_scgs = []
for scg in list_security_groups:
list_scgs.append({
"security_group_id": scg['id'],
"rules_max": self.limit['security_group_rule'],
"rules_used": len(scg['security_group_rules']),
"list_rules": scg['security_group_rules']
})
security_groups = {
"max": self.limit['security_group'],
"used": len(list_security_groups),
"list_security_groups": list_scgs
}
return security_groups
def get_floating_ips(self):
ips = self.client.list_floatingips().get('floatingips')
list_ips = []
for ip in ips:
list_ips.append(ip['floating_ip_address'])
floating_ips = {
"max": self.limit['security_group'],
"used": len(list_ips),
"list_floating_ips": list_ips
}
return floating_ips
def get_routers(self):
rts = self.client.list_routers().get('routers')
list_routers = []
for router in rts:
list_routers.append({
"router_id": router['id'],
"is_gateway": True
})
routers = {
"max": self.limit['router'],
"used": len(list_routers),
"list_routers": list_routers
}
return routers
def get_internet_gateways(self):
routers = self.client.list_routers().get('routers')
internet_gateways = []
for router in routers:
egi = router.get('external_gateway_info', None)
if egi is not None:
internet_gateways.append({
'internet_gateway_id': router['id']
})
return internet_gateways
| |
#!/usr/bin/env python
#
# Use the raw transactions API to spend icoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a icoind or iCoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting ICN values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the icoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/iCoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "iCoin")
return os.path.expanduser("~/.icoin")
def read_icoin_config(dbdir):
"""Read the icoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "icoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a icoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19888 if testnet else 9888
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the icoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(icoind):
info = icoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
icoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = icoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(icoind):
address_summary = dict()
address_to_account = dict()
for info in icoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = icoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = icoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-icoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(icoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(icoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f ICN available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to icoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = icoind.createrawtransaction(inputs, outputs)
signed_rawtx = icoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(icoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = icoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(icoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = icoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(icoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get icoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send icoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of icoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_icoin_config(options.datadir)
if options.testnet: config['testnet'] = True
icoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(icoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(icoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(icoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(icoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = icoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| |
# ./smdx_gen.py
# -*- coding: utf-8 -*-
# PyXB bindings for NM:e92452c8d3e28a9e27abfc9994d2007779e7f4c9
# Generated 2013-10-27 00:48:37.033045 by PyXB version 1.2.3
# Namespace AbsentNamespace0
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:2ff6e4c2-3edc-11e3-b9a4-c82a1455dc46')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.3'
# Generated bindings are not compatible across PyXB versions
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
# Import bindings for namespaces imported into schema
import pyxb.binding.datatypes
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.CreateAbsentNamespace()
Namespace.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, unicode):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Atomic simple type: PointTypeDefinition
class PointTypeDefinition (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'PointTypeDefinition')
_XSDLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 106, 2)
_Documentation = None
PointTypeDefinition._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=PointTypeDefinition, enum_prefix=None)
PointTypeDefinition.int16 = PointTypeDefinition._CF_enumeration.addEnumeration(unicode_value=u'int16', tag=u'int16')
PointTypeDefinition.uint16 = PointTypeDefinition._CF_enumeration.addEnumeration(unicode_value=u'uint16', tag=u'uint16')
PointTypeDefinition.acc16 = PointTypeDefinition._CF_enumeration.addEnumeration(unicode_value=u'acc16', tag=u'acc16')
PointTypeDefinition.int32 = PointTypeDefinition._CF_enumeration.addEnumeration(unicode_value=u'int32', tag=u'int32')
PointTypeDefinition.uint32 = PointTypeDefinition._CF_enumeration.addEnumeration(unicode_value=u'uint32', tag=u'uint32')
PointTypeDefinition.float32 = PointTypeDefinition._CF_enumeration.addEnumeration(unicode_value=u'float32', tag=u'float32')
PointTypeDefinition.acc32 = PointTypeDefinition._CF_enumeration.addEnumeration(unicode_value=u'acc32', tag=u'acc32')
PointTypeDefinition.int64 = PointTypeDefinition._CF_enumeration.addEnumeration(unicode_value=u'int64', tag=u'int64')
PointTypeDefinition.uint64 = PointTypeDefinition._CF_enumeration.addEnumeration(unicode_value=u'uint64', tag=u'uint64')
PointTypeDefinition.float64 = PointTypeDefinition._CF_enumeration.addEnumeration(unicode_value=u'float64', tag=u'float64')
PointTypeDefinition.acc64 = PointTypeDefinition._CF_enumeration.addEnumeration(unicode_value=u'acc64', tag=u'acc64')
PointTypeDefinition.enum16 = PointTypeDefinition._CF_enumeration.addEnumeration(unicode_value=u'enum16', tag=u'enum16')
PointTypeDefinition.enum32 = PointTypeDefinition._CF_enumeration.addEnumeration(unicode_value=u'enum32', tag=u'enum32')
PointTypeDefinition.bitfield16 = PointTypeDefinition._CF_enumeration.addEnumeration(unicode_value=u'bitfield16', tag=u'bitfield16')
PointTypeDefinition.bitfield32 = PointTypeDefinition._CF_enumeration.addEnumeration(unicode_value=u'bitfield32', tag=u'bitfield32')
PointTypeDefinition.sunssf = PointTypeDefinition._CF_enumeration.addEnumeration(unicode_value=u'sunssf', tag=u'sunssf')
PointTypeDefinition.string = PointTypeDefinition._CF_enumeration.addEnumeration(unicode_value=u'string', tag=u'string')
PointTypeDefinition.pad = PointTypeDefinition._CF_enumeration.addEnumeration(unicode_value=u'pad', tag=u'pad')
PointTypeDefinition.ipaddr = PointTypeDefinition._CF_enumeration.addEnumeration(unicode_value=u'ipaddr', tag=u'ipaddr')
PointTypeDefinition.ipv6addr = PointTypeDefinition._CF_enumeration.addEnumeration(unicode_value=u'ipv6addr', tag=u'ipv6addr')
PointTypeDefinition._InitializeFacetMap(PointTypeDefinition._CF_enumeration)
Namespace.addCategoryObject('typeBinding', u'PointTypeDefinition', PointTypeDefinition)
# Atomic simple type: PointAccessDefinition
class PointAccessDefinition (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'PointAccessDefinition')
_XSDLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 131, 2)
_Documentation = None
PointAccessDefinition._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=PointAccessDefinition, enum_prefix=None)
PointAccessDefinition.r = PointAccessDefinition._CF_enumeration.addEnumeration(unicode_value=u'r', tag=u'r')
PointAccessDefinition.rw = PointAccessDefinition._CF_enumeration.addEnumeration(unicode_value=u'rw', tag=u'rw')
PointAccessDefinition.w = PointAccessDefinition._CF_enumeration.addEnumeration(unicode_value=u'w', tag=u'w')
PointAccessDefinition._InitializeFacetMap(PointAccessDefinition._CF_enumeration)
Namespace.addCategoryObject('typeBinding', u'PointAccessDefinition', PointAccessDefinition)
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 17, 4)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element model uses Python identifier model
__model = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'model'), 'model', '__AbsentNamespace0_CTD_ANON_model', True, pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 27, 6), )
model = property(__model.value, __model.set, None, None)
# Element strings uses Python identifier strings
__strings = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'strings'), 'strings', '__AbsentNamespace0_CTD_ANON_strings', True, pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 28, 6), )
strings = property(__strings.value, __strings.set, None, None)
# Attribute v uses Python identifier v
__v = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'v'), 'v', '__AbsentNamespace0_CTD_ANON_v', pyxb.binding.datatypes.string, unicode_default=u'1')
__v._DeclarationLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 21, 6)
__v._UseLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 21, 6)
v = property(__v.value, __v.set, None, None)
_ElementMap.update({
__model.name() : __model,
__strings.name() : __strings
})
_AttributeMap.update({
__v.name() : __v
})
# Complex type ModelDefinition with content type ELEMENT_ONLY
class ModelDefinition (pyxb.binding.basis.complexTypeDefinition):
"""Complex type ModelDefinition with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'ModelDefinition')
_XSDLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 32, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element block uses Python identifier block
__block = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'block'), 'block', '__AbsentNamespace0_ModelDefinition_block', True, pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 34, 6), )
block = property(__block.value, __block.set, None, None)
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'id'), 'id', '__AbsentNamespace0_ModelDefinition_id', pyxb.binding.datatypes.integer)
__id._DeclarationLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 36, 4)
__id._UseLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 36, 4)
id = property(__id.value, __id.set, None, None)
# Attribute len uses Python identifier len
__len = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'len'), 'len', '__AbsentNamespace0_ModelDefinition_len', pyxb.binding.datatypes.integer)
__len._DeclarationLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 37, 4)
__len._UseLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 37, 4)
len = property(__len.value, __len.set, None, None)
_ElementMap.update({
__block.name() : __block
})
_AttributeMap.update({
__id.name() : __id,
__len.name() : __len
})
Namespace.addCategoryObject('typeBinding', u'ModelDefinition', ModelDefinition)
# Complex type BlockDefinition with content type ELEMENT_ONLY
class BlockDefinition (pyxb.binding.basis.complexTypeDefinition):
"""Complex type BlockDefinition with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'BlockDefinition')
_XSDLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 40, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element point uses Python identifier point
__point = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'point'), 'point', '__AbsentNamespace0_BlockDefinition_point', True, pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 42, 6), )
point = property(__point.value, __point.set, None, None)
# Attribute len uses Python identifier len
__len = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'len'), 'len', '__AbsentNamespace0_BlockDefinition_len', pyxb.binding.datatypes.integer)
__len._DeclarationLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 44, 4)
__len._UseLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 44, 4)
len = property(__len.value, __len.set, None, None)
# Attribute type uses Python identifier type
__type = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'type'), 'type', '__AbsentNamespace0_BlockDefinition_type', pyxb.binding.datatypes.string)
__type._DeclarationLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 45, 4)
__type._UseLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 45, 4)
type = property(__type.value, __type.set, None, None)
_ElementMap.update({
__point.name() : __point
})
_AttributeMap.update({
__len.name() : __len,
__type.name() : __type
})
Namespace.addCategoryObject('typeBinding', u'BlockDefinition', BlockDefinition)
# Complex type SymbolDefinition with content type SIMPLE
class SymbolDefinition (pyxb.binding.basis.complexTypeDefinition):
"""Complex type SymbolDefinition with content type SIMPLE"""
_TypeDefinition = pyxb.binding.datatypes.string
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_SIMPLE
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'SymbolDefinition')
_XSDLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 62, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.string
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'id'), 'id', '__AbsentNamespace0_SymbolDefinition_id', pyxb.binding.datatypes.string, required=True)
__id._DeclarationLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 65, 8)
__id._UseLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 65, 8)
id = property(__id.value, __id.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__id.name() : __id
})
Namespace.addCategoryObject('typeBinding', u'SymbolDefinition', SymbolDefinition)
# Complex type StringsDefinition with content type ELEMENT_ONLY
class StringsDefinition (pyxb.binding.basis.complexTypeDefinition):
"""Complex type StringsDefinition with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'StringsDefinition')
_XSDLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 70, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element model uses Python identifier model
__model = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'model'), 'model', '__AbsentNamespace0_StringsDefinition_model', False, pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 72, 6), )
model = property(__model.value, __model.set, None, None)
# Element point uses Python identifier point
__point = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'point'), 'point', '__AbsentNamespace0_StringsDefinition_point', True, pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 73, 6), )
point = property(__point.value, __point.set, None, None)
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'id'), 'id', '__AbsentNamespace0_StringsDefinition_id', pyxb.binding.datatypes.integer, required=True)
__id._DeclarationLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 75, 4)
__id._UseLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 75, 4)
id = property(__id.value, __id.set, None, None)
# Attribute locale uses Python identifier locale
__locale = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'locale'), 'locale', '__AbsentNamespace0_StringsDefinition_locale', pyxb.binding.datatypes.string)
__locale._DeclarationLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 76, 4)
__locale._UseLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 76, 4)
locale = property(__locale.value, __locale.set, None, None)
_ElementMap.update({
__model.name() : __model,
__point.name() : __point
})
_AttributeMap.update({
__id.name() : __id,
__locale.name() : __locale
})
Namespace.addCategoryObject('typeBinding', u'StringsDefinition', StringsDefinition)
# Complex type StringsModelDefinition with content type ELEMENT_ONLY
class StringsModelDefinition (pyxb.binding.basis.complexTypeDefinition):
"""Complex type StringsModelDefinition with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'StringsModelDefinition')
_XSDLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 79, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element label uses Python identifier label
__label = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'label'), 'label', '__AbsentNamespace0_StringsModelDefinition_label', False, pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 81, 6), )
label = property(__label.value, __label.set, None, None)
# Element description uses Python identifier description
__description = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'description'), 'description', '__AbsentNamespace0_StringsModelDefinition_description', False, pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 82, 6), )
description = property(__description.value, __description.set, None, None)
# Element notes uses Python identifier notes
__notes = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'notes'), 'notes', '__AbsentNamespace0_StringsModelDefinition_notes', False, pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 83, 6), )
notes = property(__notes.value, __notes.set, None, None)
_ElementMap.update({
__label.name() : __label,
__description.name() : __description,
__notes.name() : __notes
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'StringsModelDefinition', StringsModelDefinition)
# Complex type StringsPointDefinition with content type ELEMENT_ONLY
class StringsPointDefinition (pyxb.binding.basis.complexTypeDefinition):
"""Complex type StringsPointDefinition with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'StringsPointDefinition')
_XSDLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 87, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element label uses Python identifier label
__label = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'label'), 'label', '__AbsentNamespace0_StringsPointDefinition_label', False, pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 89, 6), )
label = property(__label.value, __label.set, None, None)
# Element description uses Python identifier description
__description = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'description'), 'description', '__AbsentNamespace0_StringsPointDefinition_description', False, pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 90, 6), )
description = property(__description.value, __description.set, None, None)
# Element notes uses Python identifier notes
__notes = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'notes'), 'notes', '__AbsentNamespace0_StringsPointDefinition_notes', False, pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 91, 6), )
notes = property(__notes.value, __notes.set, None, None)
# Element symbol uses Python identifier symbol
__symbol = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'symbol'), 'symbol', '__AbsentNamespace0_StringsPointDefinition_symbol', True, pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 92, 6), )
symbol = property(__symbol.value, __symbol.set, None, None)
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'id'), 'id', '__AbsentNamespace0_StringsPointDefinition_id', pyxb.binding.datatypes.string, required=True)
__id._DeclarationLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 94, 4)
__id._UseLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 94, 4)
id = property(__id.value, __id.set, None, None)
_ElementMap.update({
__label.name() : __label,
__description.name() : __description,
__notes.name() : __notes,
__symbol.name() : __symbol
})
_AttributeMap.update({
__id.name() : __id
})
Namespace.addCategoryObject('typeBinding', u'StringsPointDefinition', StringsPointDefinition)
# Complex type StringsSymbolDefinition with content type ELEMENT_ONLY
class StringsSymbolDefinition (pyxb.binding.basis.complexTypeDefinition):
"""Complex type StringsSymbolDefinition with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'StringsSymbolDefinition')
_XSDLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 97, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element label uses Python identifier label
__label = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'label'), 'label', '__AbsentNamespace0_StringsSymbolDefinition_label', False, pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 99, 6), )
label = property(__label.value, __label.set, None, None)
# Element description uses Python identifier description
__description = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'description'), 'description', '__AbsentNamespace0_StringsSymbolDefinition_description', False, pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 100, 6), )
description = property(__description.value, __description.set, None, None)
# Element notes uses Python identifier notes
__notes = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'notes'), 'notes', '__AbsentNamespace0_StringsSymbolDefinition_notes', False, pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 101, 6), )
notes = property(__notes.value, __notes.set, None, None)
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'id'), 'id', '__AbsentNamespace0_StringsSymbolDefinition_id', pyxb.binding.datatypes.string, required=True)
__id._DeclarationLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 103, 4)
__id._UseLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 103, 4)
id = property(__id.value, __id.set, None, None)
_ElementMap.update({
__label.name() : __label,
__description.name() : __description,
__notes.name() : __notes
})
_AttributeMap.update({
__id.name() : __id
})
Namespace.addCategoryObject('typeBinding', u'StringsSymbolDefinition', StringsSymbolDefinition)
# Complex type PointDefinition with content type ELEMENT_ONLY
class PointDefinition (pyxb.binding.basis.complexTypeDefinition):
"""Complex type PointDefinition with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'PointDefinition')
_XSDLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 48, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element symbol uses Python identifier symbol
__symbol = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'symbol'), 'symbol', '__AbsentNamespace0_PointDefinition_symbol', True, pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 50, 6), )
symbol = property(__symbol.value, __symbol.set, None, None)
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'id'), 'id', '__AbsentNamespace0_PointDefinition_id', pyxb.binding.datatypes.string, required=True)
__id._DeclarationLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 52, 4)
__id._UseLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 52, 4)
id = property(__id.value, __id.set, None, None)
# Attribute len uses Python identifier len
__len = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'len'), 'len', '__AbsentNamespace0_PointDefinition_len', pyxb.binding.datatypes.integer)
__len._DeclarationLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 53, 4)
__len._UseLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 53, 4)
len = property(__len.value, __len.set, None, None)
# Attribute offset uses Python identifier offset
__offset = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'offset'), 'offset', '__AbsentNamespace0_PointDefinition_offset', pyxb.binding.datatypes.integer)
__offset._DeclarationLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 54, 4)
__offset._UseLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 54, 4)
offset = property(__offset.value, __offset.set, None, None)
# Attribute type uses Python identifier type
__type = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'type'), 'type', '__AbsentNamespace0_PointDefinition_type', PointTypeDefinition)
__type._DeclarationLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 55, 4)
__type._UseLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 55, 4)
type = property(__type.value, __type.set, None, None)
# Attribute sf uses Python identifier sf
__sf = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'sf'), 'sf', '__AbsentNamespace0_PointDefinition_sf', pyxb.binding.datatypes.string)
__sf._DeclarationLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 56, 4)
__sf._UseLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 56, 4)
sf = property(__sf.value, __sf.set, None, None)
# Attribute units uses Python identifier units
__units = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'units'), 'units', '__AbsentNamespace0_PointDefinition_units', pyxb.binding.datatypes.string)
__units._DeclarationLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 57, 4)
__units._UseLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 57, 4)
units = property(__units.value, __units.set, None, None)
# Attribute access uses Python identifier access
__access = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'access'), 'access', '__AbsentNamespace0_PointDefinition_access', PointAccessDefinition)
__access._DeclarationLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 58, 4)
__access._UseLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 58, 4)
access = property(__access.value, __access.set, None, None)
# Attribute mandatory uses Python identifier mandatory
__mandatory = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'mandatory'), 'mandatory', '__AbsentNamespace0_PointDefinition_mandatory', pyxb.binding.datatypes.boolean)
__mandatory._DeclarationLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 59, 4)
__mandatory._UseLocation = pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 59, 4)
mandatory = property(__mandatory.value, __mandatory.set, None, None)
_ElementMap.update({
__symbol.name() : __symbol
})
_AttributeMap.update({
__id.name() : __id,
__len.name() : __len,
__offset.name() : __offset,
__type.name() : __type,
__sf.name() : __sf,
__units.name() : __units,
__access.name() : __access,
__mandatory.name() : __mandatory
})
Namespace.addCategoryObject('typeBinding', u'PointDefinition', PointDefinition)
sunSpecModels = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, u'sunSpecModels'), CTD_ANON, location=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 16, 2))
Namespace.addCategoryObject('elementBinding', sunSpecModels.name().localName(), sunSpecModels)
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'model'), ModelDefinition, scope=CTD_ANON, location=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 27, 6)))
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'strings'), StringsDefinition, scope=CTD_ANON, location=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 28, 6)))
def _BuildAutomaton ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton
del _BuildAutomaton
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 19, 8))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0L, max=1L, metadata=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 27, 6))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 28, 6))
counters.add(cc_2)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(None, u'model')), pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 27, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(None, u'strings')), pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 28, 6))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_2, True) ]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON._Automaton = _BuildAutomaton()
ModelDefinition._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'block'), BlockDefinition, scope=ModelDefinition, location=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 34, 6)))
def _BuildAutomaton_ ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_
del _BuildAutomaton_
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=1, max=2L, metadata=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 34, 6))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(ModelDefinition._UseForTag(pyxb.namespace.ExpandedName(None, u'block')), pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 34, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
ModelDefinition._Automaton = _BuildAutomaton_()
BlockDefinition._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'point'), PointDefinition, scope=BlockDefinition, location=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 42, 6)))
def _BuildAutomaton_2 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_2
del _BuildAutomaton_2
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 42, 6))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(BlockDefinition._UseForTag(pyxb.namespace.ExpandedName(None, u'point')), pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 42, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
BlockDefinition._Automaton = _BuildAutomaton_2()
StringsDefinition._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'model'), StringsModelDefinition, scope=StringsDefinition, location=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 72, 6)))
StringsDefinition._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'point'), StringsPointDefinition, scope=StringsDefinition, location=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 73, 6)))
def _BuildAutomaton_3 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_3
del _BuildAutomaton_3
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0L, max=1L, metadata=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 72, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 73, 6))
counters.add(cc_1)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(StringsDefinition._UseForTag(pyxb.namespace.ExpandedName(None, u'model')), pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 72, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(StringsDefinition._UseForTag(pyxb.namespace.ExpandedName(None, u'point')), pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 73, 6))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
StringsDefinition._Automaton = _BuildAutomaton_3()
StringsModelDefinition._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'label'), pyxb.binding.datatypes.string, scope=StringsModelDefinition, location=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 81, 6)))
StringsModelDefinition._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'description'), pyxb.binding.datatypes.string, scope=StringsModelDefinition, location=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 82, 6)))
StringsModelDefinition._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'notes'), pyxb.binding.datatypes.string, scope=StringsModelDefinition, location=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 83, 6)))
def _BuildAutomaton_4 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_4
del _BuildAutomaton_4
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0L, max=1L, metadata=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 81, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0L, max=1L, metadata=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 82, 6))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0L, max=1L, metadata=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 83, 6))
counters.add(cc_2)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(StringsModelDefinition._UseForTag(pyxb.namespace.ExpandedName(None, u'label')), pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 81, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(StringsModelDefinition._UseForTag(pyxb.namespace.ExpandedName(None, u'description')), pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 82, 6))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(StringsModelDefinition._UseForTag(pyxb.namespace.ExpandedName(None, u'notes')), pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 83, 6))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
st_2._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
StringsModelDefinition._Automaton = _BuildAutomaton_4()
StringsPointDefinition._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'label'), pyxb.binding.datatypes.string, scope=StringsPointDefinition, location=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 89, 6)))
StringsPointDefinition._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'description'), pyxb.binding.datatypes.string, scope=StringsPointDefinition, location=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 90, 6)))
StringsPointDefinition._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'notes'), pyxb.binding.datatypes.string, scope=StringsPointDefinition, location=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 91, 6)))
StringsPointDefinition._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'symbol'), StringsSymbolDefinition, scope=StringsPointDefinition, location=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 92, 6)))
def _BuildAutomaton_5 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_5
del _BuildAutomaton_5
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0L, max=1L, metadata=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 89, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0L, max=1L, metadata=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 90, 6))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0L, max=1L, metadata=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 91, 6))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 92, 6))
counters.add(cc_3)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(StringsPointDefinition._UseForTag(pyxb.namespace.ExpandedName(None, u'label')), pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 89, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(StringsPointDefinition._UseForTag(pyxb.namespace.ExpandedName(None, u'description')), pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 90, 6))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(StringsPointDefinition._UseForTag(pyxb.namespace.ExpandedName(None, u'notes')), pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 91, 6))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(StringsPointDefinition._UseForTag(pyxb.namespace.ExpandedName(None, u'symbol')), pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 92, 6))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_3, True) ]))
st_3._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
StringsPointDefinition._Automaton = _BuildAutomaton_5()
StringsSymbolDefinition._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'label'), pyxb.binding.datatypes.string, scope=StringsSymbolDefinition, location=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 99, 6)))
StringsSymbolDefinition._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'description'), pyxb.binding.datatypes.string, scope=StringsSymbolDefinition, location=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 100, 6)))
StringsSymbolDefinition._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'notes'), pyxb.binding.datatypes.string, scope=StringsSymbolDefinition, location=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 101, 6)))
def _BuildAutomaton_6 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_6
del _BuildAutomaton_6
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0L, max=1L, metadata=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 99, 6))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0L, max=1L, metadata=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 100, 6))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0L, max=1L, metadata=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 101, 6))
counters.add(cc_2)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(StringsSymbolDefinition._UseForTag(pyxb.namespace.ExpandedName(None, u'label')), pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 99, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(StringsSymbolDefinition._UseForTag(pyxb.namespace.ExpandedName(None, u'description')), pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 100, 6))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(StringsSymbolDefinition._UseForTag(pyxb.namespace.ExpandedName(None, u'notes')), pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 101, 6))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
st_2._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
StringsSymbolDefinition._Automaton = _BuildAutomaton_6()
PointDefinition._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, u'symbol'), SymbolDefinition, scope=PointDefinition, location=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 50, 6)))
def _BuildAutomaton_7 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_7
del _BuildAutomaton_7
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0L, max=None, metadata=pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 50, 6))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(PointDefinition._UseForTag(pyxb.namespace.ExpandedName(None, u'symbol')), pyxb.utils.utility.Location('/Users/brett/dev/gdwd/ped/plantextract/smdx.xsd', 50, 6))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
PointDefinition._Automaton = _BuildAutomaton_7()
| |
#: -*- encoding: utf-8 -*-
from kivy.lang import Builder
from kivy.metrics import sp
from kivy.uix.widget import Widget
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import (NumericProperty, AliasProperty, OptionProperty,
ReferenceListProperty, BoundedNumericProperty, ListProperty)
Builder.load_string('''
<RangeSlider>:
canvas:
Color:
rgb: 1, 1, 1
BorderImage:
border: (0, 18, 0, 18) if self.orientation == 'horizontal' else (18, 0, 18, 0)
pos: (self.x + self.padding, self.center_y - sp(18)) if self.orientation == 'horizontal' else (self.center_x - sp(18), self.y + self.padding)
size: (self.width - self.padding * 2, sp(36)) if self.orientation == 'horizontal' else (sp(36), self.height - self.padding * 2)
source: 'atlas://data/images/defaulttheme/slider{}_background{}'.format(self.orientation[0], '_disabled' if self.disabled else '')
Color:
rgba: self.connector_color
BorderImage:
pos: (self.value1_pos[0], self.center_y - sp(18)) if self.orientation == 'horizontal' else (self.center_x - sp(18), self.value1_pos[1])
size: (self.value2_pos[0] - self.value1_pos[0], sp(36)) if self.orientation == 'horizontal' else (sp(36), self.value2_pos[1] - self.value1_pos[1])
source: 'atlas://data/images/defaulttheme/slider{}_background{}'.format(self.orientation[0], '_disabled' if self.disabled else '')
Color:
rgb: 1, 1, 1
Rectangle:
pos: (self.value1_pos[0] - sp(16), self.center_y - sp(17)) if self.orientation == 'horizontal' else (self.center_x - sp(16), self.value1_pos[1] - sp(16))
size: (sp(32), sp(32))
source: 'atlas://data/images/defaulttheme/slider_cursor{}'.format('_disabled' if self.disabled else '')
Rectangle:
pos: (self.value2_pos[0] - sp(16), self.center_y - sp(17)) if self.orientation == 'horizontal' else (self.center_x - sp(16), self.value2_pos[1] - sp(16))
size: (sp(32), sp(32))
source: 'atlas://data/images/defaulttheme/slider_cursor{}'.format('_disabled' if self.disabled else '')
''')
class RangeSlider(Widget):
"""Class for creating a RangeSlider widget.
Check module documentation for more details.
"""
connector_color = ListProperty([.2, .7, 0.9, 1])
'''Connector bar color, in the format (r, g, b, a).
for disabling this bar use a = .0 '''
def _get_value(self):
return [self.value1, self.value2]
def _set_value(self, value):
self.value1, self.value2 = value
value = AliasProperty(_get_value, _set_value, bind=('value1', 'value2'))
'''Current value used for the both sliders.
:attr:`value` is an :class:`~kivy.properties.AliasProperty` and defaults
to [0, 0].'''
value1 = NumericProperty(0.)
'''Current value used for the first slider.
:attr:`value` is a :class:`~kivy.properties.NumericProperty` and defaults
to 0.'''
value2 = NumericProperty(100.)
'''Current value used for the second slider.
:attr:`value` is a :class:`~kivy.properties.NumericProperty` and defaults
to 0.'''
min = NumericProperty(0.)
'''Minimum value allowed for :attr:`value`.
:attr:`min` is a :class:`~kivy.properties.NumericProperty` and defaults to
0.'''
max = NumericProperty(100.)
'''Maximum value allowed for :attr:`value`.
:attr:`max` is a :class:`~kivy.properties.NumericProperty` and defaults to
100.'''
padding = NumericProperty(sp(16))
'''Padding of the slider. The padding is used for graphical representation
and interaction. It prevents the cursor from going out of the bounds of the
slider bounding box.
By default, padding is sp(16). The range of the slider is reduced from
padding \*2 on the screen. It allows drawing the default cursor of sp(32)
width without having the cursor go out of the widget.
:attr:`padding` is a :class:`~kivy.properties.NumericProperty` and defaults
to sp(16).'''
orientation = OptionProperty('horizontal', options=(
'vertical', 'horizontal'))
'''Orientation of the slider.
:attr:`orientation` is an :class:`~kivy.properties.OptionProperty` and
defaults to 'horizontal'. Can take a value of 'vertical' or 'horizontal'.
'''
range = ReferenceListProperty(min, max)
'''Range of the slider in the format (minimum value, maximum value)::
>>> slider = Slider(min=10, max=80)
>>> slider.range
[10, 80]
>>> slider.range = (20, 100)
>>> slider.min
20
>>> slider.max
100
:attr:`range` is a :class:`~kivy.properties.ReferenceListProperty` of
(:attr:`min`, :attr:`max`) properties.
'''
step = BoundedNumericProperty(0, min=0)
'''Step size of the slider.
.. versionadded:: 1.4.0
Determines the size of each interval or step the slider takes between
min and max. If the value range can't be evenly divisible by step the
last step will be capped by slider.max
:attr:`step` is a :class:`~kivy.properties.NumericProperty` and defaults
to 1.'''
# The following two methods constrain the slider's value
# to range(min,max). Otherwise it may happen that self.value < self.min
# at init.
def on_min(self, *largs):
self.value1 = min(self.max, max(self.min, self.value1))
self.value2 = min(self.max, max(self.min, self.value2))
def on_max(self, *largs):
self.value1 = min(self.max, max(self.min, self.value1))
self.value2 = min(self.max, max(self.min, self.value2))
def get_norm_value1(self):
vmin = self.min
d = self.max - vmin
if d == 0:
return 0
return (self.value1 - vmin) / float(d)
def get_norm_value2(self):
vmin = self.min
d = self.max - vmin
if d == 0:
return 0
return (self.value2 - vmin) / float(d)
def set_norm_value1(self, value):
vmin = self.min
step = self.step
val = value * (self.max - vmin) + vmin
if step == 0:
self.value1 = val
else:
self.value1 = min(round((val - vmin) / step) * step + vmin,
self.max)
def set_norm_value2(self, value):
vmin = self.min
step = self.step
val = value * (self.max - vmin) + vmin
if step == 0:
self.value2 = val
else:
self.value2 = min(round((val - vmin) / step) * step + vmin,
self.max)
value1_normalized = AliasProperty(get_norm_value1, set_norm_value1,
bind=('value1', 'min', 'max', 'step'))
value2_normalized = AliasProperty(get_norm_value2, set_norm_value2,
bind=('value2', 'min', 'max', 'step'))
'''Normalized value inside the :attr:`range` (min/max) to 0-1 range::
>>> slider = Slider(value=50, min=0, max=100)
>>> slider.value
50
>>> slider.value_normalized
0.5
>>> slider.value = 0
>>> slider.value_normalized
0
>>> slider.value = 100
>>> slider.value_normalized
1
You can also use it for setting the real value without knowing the minimum
and maximum::
>>> slider = Slider(min=0, max=200)
>>> slider.value_normalized = .5
>>> slider.value
100
>>> slider.value_normalized = 1.
>>> slider.value
200
:attr:`value_normalized` is an :class:`~kivy.properties.AliasProperty`.
'''
def get_value1_pos(self):
padding = self.padding
x = self.x
y = self.y
nval = self.value1_normalized
if self.orientation == 'horizontal':
return (x + padding + nval * (self.width - 2 * padding), y)
else:
return (x, y + padding + nval * (self.height - 2 * padding))
def get_value2_pos(self):
padding = self.padding
x = self.x
y = self.y
nval = self.value2_normalized
if self.orientation == 'horizontal':
return (x + padding + nval * (self.width - 2 * padding), y)
else:
return (x, y + padding + nval * (self.height - 2 * padding))
def set_value1_pos(self, pos):
padding = self.padding
x = min(self.right - padding, max(pos[0], self.x + padding))
y = min(self.top - padding, max(pos[1], self.y + padding))
if self.orientation == 'horizontal':
if self.width == 0:
self.value1_normalized = 0
else:
self.value1_normalized = (x - self.x - padding
) / float(self.width - 2 * padding)
else:
if self.height == 0:
self.value1_normalized = 0
else:
self.value1_normalized = (y - self.y - padding
) / float(self.height - 2 * padding)
def set_value2_pos(self, pos):
padding = self.padding
x = min(self.right - padding, max(pos[0], self.x + padding))
y = min(self.top - padding, max(pos[1], self.y + padding))
if self.orientation == 'horizontal':
if self.width == 0:
self.value2_normalized = 0
else:
self.value2_normalized = (x - self.x - padding
) / float(self.width - 2 * padding)
else:
if self.height == 0:
self.value2_normalized = 0
else:
self.value2_normalized = (y - self.y - padding
) / float(self.height - 2 * padding)
value1_pos = AliasProperty(get_value1_pos, set_value1_pos,
bind=('x', 'y', 'width', 'height', 'min',
'max', 'value1_normalized', 'orientation'))
value2_pos = AliasProperty(get_value2_pos, set_value2_pos,
bind=('x', 'y', 'width', 'height', 'min',
'max', 'value2_normalized', 'orientation'))
'''Position of the internal cursor, based on the normalized value.
:attr:`value_pos` is an :class:`~kivy.properties.AliasProperty`.
'''
def _touch_normalized_value(self, touch):
pos = touch.pos
padding = self.padding
x = min(self.right - padding, max(pos[0], self.x + padding))
y = min(self.top - padding, max(pos[1], self.y + padding))
if self.orientation == 'horizontal':
value = (x - self.x - padding
) / float(self.width - 2 * padding)
else:
value = (y - self.y - padding
) / float(self.height - 2 * padding)
return value
def on_touch_down(self, touch):
if self.disabled or not self.collide_point(*touch.pos):
return
touch.grab(self)
t_value = self._touch_normalized_value(touch)
if abs(self.value1_normalized - t_value) < abs(self.value2_normalized - t_value):
self.value1_pos = touch.pos
touch.ud['cursorid'] = 1
else:
self.value2_pos = touch.pos
touch.ud['cursorid'] = 2
return True
def on_touch_move(self, touch):
if touch.grab_current == self:
if 'cursorid' in touch.ud:
if touch.ud['cursorid'] == 1:
self.value1_pos = touch.pos
if self.value1 > self.value2:
self.value1_pos = self.value2_pos
elif touch.ud['cursorid'] == 2:
self.value2_pos = touch.pos
if self.value2 < self.value1:
self.value2_pos = self.value1_pos
return True
def on_touch_up(self, touch):
if touch.grab_current == self:
touch.ungrab(self)
return True
if __name__ == '__main__':
from kivy.app import App
Builder.load_string('''
<RangeSliderApp>:
orientation: 'vertical'
BoxLayout:
size_hint_y: .3
height: '48dp'
Label:
text: 'Default'
Label:
text: '{}'.format(s1.value[0])
RangeSlider:
id: s1
value: 40, 80
Label:
text: '{}'.format(s1.value[1])
BoxLayout:
size_hint_y: .3
height: '48dp'
Label:
text: 'Stepped'
Label:
text: '{}'.format(s2.value[0])
RangeSlider:
id: s2
step: 20
value: 20, 60
connector_color: (0, 0, 0, 0)
Label:
text: '{}'.format(s2.value[1])
BoxLayout:
padding: 10
Label:
text: 'Default'
RangeSlider:
id: s3
size_hint_x: None
width: '48dp'
orientation: 'vertical'
value1: 50
connector_color: (0, 1, 0, 1)
BoxLayout:
orientation: 'vertical'
Label:
text: '{}'.format(s3.value[1])
Label:
text: '{}'.format(s3.value[0])
Label:
text: 'Stepped'
RangeSlider:
id: s4
size_hint_x: None
width: '48dp'
orientation: 'vertical'
step: 20
value2: 60
connector_color: (1, 0, 0, 1)
BoxLayout:
orientation: 'vertical'
Label:
text: '{}'.format(s4.value[1])
Label:
text: '{}'.format(s4.value[0])
''')
class RangeSliderApp(BoxLayout):
pass
class SliderApp(App):
def build(self):
return RangeSliderApp()
SliderApp().run()
| |
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pandas
import pytest
from google.api_core import exceptions
from google.auth.credentials import AnonymousCredentials
from google.cloud import automl_v1beta1
from google.cloud.automl_v1beta1.proto import data_types_pb2
PROJECT = "project"
REGION = "region"
LOCATION_PATH = "projects/{}/locations/{}".format(PROJECT, REGION)
class TestTablesClient(object):
def tables_client(
self, client_attrs={}, prediction_client_attrs={}, gcs_client_attrs={}
):
client_mock = mock.Mock(**client_attrs)
prediction_client_mock = mock.Mock(**prediction_client_attrs)
gcs_client_mock = mock.Mock(**gcs_client_attrs)
return automl_v1beta1.TablesClient(
client=client_mock,
prediction_client=prediction_client_mock,
gcs_client=gcs_client_mock,
project=PROJECT,
region=REGION,
)
def test_list_datasets_empty(self):
client = self.tables_client(
{
"list_datasets.return_value": [],
"location_path.return_value": LOCATION_PATH,
},
{},
)
ds = client.list_datasets()
client.auto_ml_client.location_path.assert_called_with(PROJECT, REGION)
client.auto_ml_client.list_datasets.assert_called_with(LOCATION_PATH)
assert ds == []
def test_list_datasets_not_empty(self):
datasets = ["some_dataset"]
client = self.tables_client(
{
"list_datasets.return_value": datasets,
"location_path.return_value": LOCATION_PATH,
},
{},
)
ds = client.list_datasets()
client.auto_ml_client.location_path.assert_called_with(PROJECT, REGION)
client.auto_ml_client.list_datasets.assert_called_with(LOCATION_PATH)
assert len(ds) == 1
assert ds[0] == "some_dataset"
def test_get_dataset_no_value(self):
dataset_actual = "dataset"
client = self.tables_client({}, {})
with pytest.raises(ValueError):
dataset = client.get_dataset()
client.auto_ml_client.get_dataset.assert_not_called()
def test_get_dataset_name(self):
dataset_actual = "dataset"
client = self.tables_client({"get_dataset.return_value": dataset_actual}, {})
dataset = client.get_dataset(dataset_name="my_dataset")
client.auto_ml_client.get_dataset.assert_called_with("my_dataset")
assert dataset == dataset_actual
def test_get_no_dataset(self):
client = self.tables_client(
{"get_dataset.side_effect": exceptions.NotFound("err")}, {}
)
with pytest.raises(exceptions.NotFound):
client.get_dataset(dataset_name="my_dataset")
client.auto_ml_client.get_dataset.assert_called_with("my_dataset")
def test_get_dataset_from_empty_list(self):
client = self.tables_client({"list_datasets.return_value": []}, {})
with pytest.raises(exceptions.NotFound):
client.get_dataset(dataset_display_name="my_dataset")
def test_get_dataset_from_list_not_found(self):
client = self.tables_client(
{"list_datasets.return_value": [mock.Mock(display_name="not_it")]}, {}
)
with pytest.raises(exceptions.NotFound):
client.get_dataset(dataset_display_name="my_dataset")
def test_get_dataset_from_list(self):
client = self.tables_client(
{
"list_datasets.return_value": [
mock.Mock(display_name="not_it"),
mock.Mock(display_name="my_dataset"),
]
},
{},
)
dataset = client.get_dataset(dataset_display_name="my_dataset")
assert dataset.display_name == "my_dataset"
def test_get_dataset_from_list_ambiguous(self):
client = self.tables_client(
{
"list_datasets.return_value": [
mock.Mock(display_name="my_dataset"),
mock.Mock(display_name="not_my_dataset"),
mock.Mock(display_name="my_dataset"),
]
},
{},
)
with pytest.raises(ValueError):
client.get_dataset(dataset_display_name="my_dataset")
def test_create_dataset(self):
client = self.tables_client(
{
"location_path.return_value": LOCATION_PATH,
"create_dataset.return_value": mock.Mock(display_name="name"),
},
{},
)
metadata = {"metadata": "values"}
dataset = client.create_dataset("name", metadata=metadata)
client.auto_ml_client.location_path.assert_called_with(PROJECT, REGION)
client.auto_ml_client.create_dataset.assert_called_with(
LOCATION_PATH, {"display_name": "name", "tables_dataset_metadata": metadata}
)
assert dataset.display_name == "name"
def test_delete_dataset(self):
dataset = mock.Mock()
dataset.configure_mock(name="name")
client = self.tables_client({"delete_dataset.return_value": None}, {})
client.delete_dataset(dataset=dataset)
client.auto_ml_client.delete_dataset.assert_called_with("name")
def test_delete_dataset_not_found(self):
client = self.tables_client({"list_datasets.return_value": []}, {})
client.delete_dataset(dataset_display_name="not_found")
client.auto_ml_client.delete_dataset.assert_not_called()
def test_delete_dataset_name(self):
client = self.tables_client({"delete_dataset.return_value": None}, {})
client.delete_dataset(dataset_name="name")
client.auto_ml_client.delete_dataset.assert_called_with("name")
def test_export_not_found(self):
client = self.tables_client({"list_datasets.return_value": []}, {})
with pytest.raises(exceptions.NotFound):
client.export_data(dataset_display_name="name", gcs_input_uris="uri")
client.auto_ml_client.export_data.assert_not_called()
def test_export_gcs_uri(self):
client = self.tables_client({"export_data.return_value": None}, {})
client.export_data(dataset_name="name", gcs_output_uri_prefix="uri")
client.auto_ml_client.export_data.assert_called_with(
"name", {"gcs_destination": {"output_uri_prefix": "uri"}}
)
def test_export_bq_uri(self):
client = self.tables_client({"export_data.return_value": None}, {})
client.export_data(dataset_name="name", bigquery_output_uri="uri")
client.auto_ml_client.export_data.assert_called_with(
"name", {"bigquery_destination": {"output_uri": "uri"}}
)
def test_import_not_found(self):
client = self.tables_client({"list_datasets.return_value": []}, {})
with pytest.raises(exceptions.NotFound):
client.import_data(dataset_display_name="name", gcs_input_uris="uri")
client.auto_ml_client.import_data.assert_not_called()
def test_import_pandas_dataframe(self):
client = self.tables_client(
gcs_client_attrs={
"bucket_name": "my_bucket",
"upload_pandas_dataframe.return_value": "uri",
}
)
dataframe = pandas.DataFrame({})
client.import_data(
project=PROJECT,
region=REGION,
dataset_name="name",
pandas_dataframe=dataframe,
)
client.gcs_client.ensure_bucket_exists.assert_called_with(PROJECT, REGION)
client.gcs_client.upload_pandas_dataframe.assert_called_with(dataframe)
client.auto_ml_client.import_data.assert_called_with(
"name", {"gcs_source": {"input_uris": ["uri"]}}
)
def test_import_pandas_dataframe_init_gcs(self):
client = automl_v1beta1.TablesClient(
client=mock.Mock(),
prediction_client=mock.Mock(),
project=PROJECT,
region=REGION,
credentials=AnonymousCredentials(),
)
dataframe = pandas.DataFrame({})
patch = mock.patch(
"google.cloud.automl_v1beta1.tables.tables_client.gcs_client.GcsClient",
bucket_name="my_bucket",
)
with patch as MockGcsClient:
mockInstance = MockGcsClient.return_value
mockInstance.upload_pandas_dataframe.return_value = "uri"
client.import_data(dataset_name="name", pandas_dataframe=dataframe)
assert client.gcs_client is mockInstance
client.gcs_client.ensure_bucket_exists.assert_called_with(PROJECT, REGION)
client.gcs_client.upload_pandas_dataframe.assert_called_with(dataframe)
client.auto_ml_client.import_data.assert_called_with(
"name", {"gcs_source": {"input_uris": ["uri"]}}
)
def test_import_gcs_uri(self):
client = self.tables_client({"import_data.return_value": None}, {})
client.import_data(dataset_name="name", gcs_input_uris="uri")
client.auto_ml_client.import_data.assert_called_with(
"name", {"gcs_source": {"input_uris": ["uri"]}}
)
def test_import_gcs_uris(self):
client = self.tables_client({"import_data.return_value": None}, {})
client.import_data(dataset_name="name", gcs_input_uris=["uri", "uri"])
client.auto_ml_client.import_data.assert_called_with(
"name", {"gcs_source": {"input_uris": ["uri", "uri"]}}
)
def test_import_bq_uri(self):
client = self.tables_client({"import_data.return_value": None}, {})
client.import_data(dataset_name="name", bigquery_input_uri="uri")
client.auto_ml_client.import_data.assert_called_with(
"name", {"bigquery_source": {"input_uri": "uri"}}
)
def test_list_table_specs(self):
client = self.tables_client({"list_table_specs.return_value": None}, {})
client.list_table_specs(dataset_name="name")
client.auto_ml_client.list_table_specs.assert_called_with("name")
def test_list_table_specs_not_found(self):
client = self.tables_client(
{"list_table_specs.side_effect": exceptions.NotFound("not found")}, {}
)
with pytest.raises(exceptions.NotFound):
client.list_table_specs(dataset_name="name")
client.auto_ml_client.list_table_specs.assert_called_with("name")
def test_get_table_spec(self):
client = self.tables_client({}, {})
client.get_table_spec("name")
client.auto_ml_client.get_table_spec.assert_called_with("name")
def test_get_column_spec(self):
client = self.tables_client({}, {})
client.get_column_spec("name")
client.auto_ml_client.get_column_spec.assert_called_with("name")
def test_list_column_specs(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [],
},
{},
)
client.list_column_specs(dataset_name="name")
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
def test_update_column_spec_not_found(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
data_type_mock = mock.Mock(type_code="type_code")
column_spec_mock.configure_mock(
name="column", display_name="column", data_type=data_type_mock
)
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
with pytest.raises(exceptions.NotFound):
client.update_column_spec(dataset_name="name", column_spec_name="column2")
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_column_spec.assert_not_called()
def test_update_column_spec_display_name_not_found(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
data_type_mock = mock.Mock(type_code="type_code")
column_spec_mock.configure_mock(
name="column", display_name="column", data_type=data_type_mock
)
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
with pytest.raises(exceptions.NotFound):
client.update_column_spec(
dataset_name="name", column_spec_display_name="column2"
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_column_spec.assert_not_called()
def test_update_column_spec_name_no_args(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
data_type_mock = mock.Mock(type_code="type_code")
column_spec_mock.configure_mock(
name="column/2", display_name="column", data_type=data_type_mock
)
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.update_column_spec(dataset_name="name", column_spec_name="column/2")
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_column_spec.assert_called_with(
{"name": "column/2", "data_type": {"type_code": "type_code"}}
)
def test_update_column_spec_no_args(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
data_type_mock = mock.Mock(type_code="type_code")
column_spec_mock.configure_mock(
name="column", display_name="column", data_type=data_type_mock
)
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.update_column_spec(
dataset_name="name", column_spec_display_name="column"
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_column_spec.assert_called_with(
{"name": "column", "data_type": {"type_code": "type_code"}}
)
def test_update_column_spec_nullable(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
data_type_mock = mock.Mock(type_code="type_code")
column_spec_mock.configure_mock(
name="column", display_name="column", data_type=data_type_mock
)
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.update_column_spec(
dataset_name="name", column_spec_display_name="column", nullable=True
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_column_spec.assert_called_with(
{
"name": "column",
"data_type": {"type_code": "type_code", "nullable": True},
}
)
def test_update_column_spec_type_code(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
data_type_mock = mock.Mock(type_code="type_code")
column_spec_mock.configure_mock(
name="column", display_name="column", data_type=data_type_mock
)
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.update_column_spec(
dataset_name="name",
column_spec_display_name="column",
type_code="type_code2",
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_column_spec.assert_called_with(
{"name": "column", "data_type": {"type_code": "type_code2"}}
)
def test_update_column_spec_type_code_nullable(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
data_type_mock = mock.Mock(type_code="type_code")
column_spec_mock.configure_mock(
name="column", display_name="column", data_type=data_type_mock
)
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.update_column_spec(
dataset_name="name",
nullable=True,
column_spec_display_name="column",
type_code="type_code2",
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_column_spec.assert_called_with(
{
"name": "column",
"data_type": {"type_code": "type_code2", "nullable": True},
}
)
def test_update_column_spec_type_code_nullable_false(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
data_type_mock = mock.Mock(type_code="type_code")
column_spec_mock.configure_mock(
name="column", display_name="column", data_type=data_type_mock
)
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.update_column_spec(
dataset_name="name",
nullable=False,
column_spec_display_name="column",
type_code="type_code2",
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_column_spec.assert_called_with(
{
"name": "column",
"data_type": {"type_code": "type_code2", "nullable": False},
}
)
def test_set_target_column_table_not_found(self):
client = self.tables_client(
{"list_table_specs.side_effect": exceptions.NotFound("err")}, {}
)
with pytest.raises(exceptions.NotFound):
client.set_target_column(
dataset_name="name", column_spec_display_name="column2"
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_not_called()
client.auto_ml_client.update_dataset.assert_not_called()
def test_set_target_column_not_found(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/1", display_name="column")
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
with pytest.raises(exceptions.NotFound):
client.set_target_column(
dataset_name="name", column_spec_display_name="column2"
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_dataset.assert_not_called()
def test_set_target_column(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/1", display_name="column")
dataset_mock = mock.Mock()
tables_dataset_metadata_mock = mock.Mock()
tables_dataset_metadata_mock.configure_mock(
target_column_spec_id="2",
weight_column_spec_id="2",
ml_use_column_spec_id="3",
)
dataset_mock.configure_mock(
name="dataset", tables_dataset_metadata=tables_dataset_metadata_mock
)
client = self.tables_client(
{
"get_dataset.return_value": dataset_mock,
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.set_target_column(dataset_name="name", column_spec_display_name="column")
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_dataset.assert_called_with(
{
"name": "dataset",
"tables_dataset_metadata": {
"target_column_spec_id": "1",
"weight_column_spec_id": "2",
"ml_use_column_spec_id": "3",
},
}
)
def test_set_weight_column_table_not_found(self):
client = self.tables_client(
{"list_table_specs.side_effect": exceptions.NotFound("err")}, {}
)
try:
client.set_weight_column(
dataset_name="name", column_spec_display_name="column2"
)
except exceptions.NotFound:
pass
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_not_called()
client.auto_ml_client.update_dataset.assert_not_called()
def test_set_weight_column_not_found(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/1", display_name="column")
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
with pytest.raises(exceptions.NotFound):
client.set_weight_column(
dataset_name="name", column_spec_display_name="column2"
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_dataset.assert_not_called()
def test_set_weight_column(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/2", display_name="column")
dataset_mock = mock.Mock()
tables_dataset_metadata_mock = mock.Mock()
tables_dataset_metadata_mock.configure_mock(
target_column_spec_id="1",
weight_column_spec_id="1",
ml_use_column_spec_id="3",
)
dataset_mock.configure_mock(
name="dataset", tables_dataset_metadata=tables_dataset_metadata_mock
)
client = self.tables_client(
{
"get_dataset.return_value": dataset_mock,
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.set_weight_column(dataset_name="name", column_spec_display_name="column")
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_dataset.assert_called_with(
{
"name": "dataset",
"tables_dataset_metadata": {
"target_column_spec_id": "1",
"weight_column_spec_id": "2",
"ml_use_column_spec_id": "3",
},
}
)
def test_clear_weight_column(self):
dataset_mock = mock.Mock()
tables_dataset_metadata_mock = mock.Mock()
tables_dataset_metadata_mock.configure_mock(
target_column_spec_id="1",
weight_column_spec_id="2",
ml_use_column_spec_id="3",
)
dataset_mock.configure_mock(
name="dataset", tables_dataset_metadata=tables_dataset_metadata_mock
)
client = self.tables_client({"get_dataset.return_value": dataset_mock}, {})
client.clear_weight_column(dataset_name="name")
client.auto_ml_client.update_dataset.assert_called_with(
{
"name": "dataset",
"tables_dataset_metadata": {
"target_column_spec_id": "1",
"weight_column_spec_id": None,
"ml_use_column_spec_id": "3",
},
}
)
def test_set_test_train_column_table_not_found(self):
client = self.tables_client(
{"list_table_specs.side_effect": exceptions.NotFound("err")}, {}
)
with pytest.raises(exceptions.NotFound):
client.set_test_train_column(
dataset_name="name", column_spec_display_name="column2"
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_not_called()
client.auto_ml_client.update_dataset.assert_not_called()
def test_set_test_train_column_not_found(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/1", display_name="column")
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
with pytest.raises(exceptions.NotFound):
client.set_test_train_column(
dataset_name="name", column_spec_display_name="column2"
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_dataset.assert_not_called()
def test_set_test_train_column(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/3", display_name="column")
dataset_mock = mock.Mock()
tables_dataset_metadata_mock = mock.Mock()
tables_dataset_metadata_mock.configure_mock(
target_column_spec_id="1",
weight_column_spec_id="2",
ml_use_column_spec_id="2",
)
dataset_mock.configure_mock(
name="dataset", tables_dataset_metadata=tables_dataset_metadata_mock
)
client = self.tables_client(
{
"get_dataset.return_value": dataset_mock,
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.set_test_train_column(
dataset_name="name", column_spec_display_name="column"
)
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_dataset.assert_called_with(
{
"name": "dataset",
"tables_dataset_metadata": {
"target_column_spec_id": "1",
"weight_column_spec_id": "2",
"ml_use_column_spec_id": "3",
},
}
)
def test_clear_test_train_column(self):
dataset_mock = mock.Mock()
tables_dataset_metadata_mock = mock.Mock()
tables_dataset_metadata_mock.configure_mock(
target_column_spec_id="1",
weight_column_spec_id="2",
ml_use_column_spec_id="2",
)
dataset_mock.configure_mock(
name="dataset", tables_dataset_metadata=tables_dataset_metadata_mock
)
client = self.tables_client({"get_dataset.return_value": dataset_mock}, {})
client.clear_test_train_column(dataset_name="name")
client.auto_ml_client.update_dataset.assert_called_with(
{
"name": "dataset",
"tables_dataset_metadata": {
"target_column_spec_id": "1",
"weight_column_spec_id": "2",
"ml_use_column_spec_id": None,
},
}
)
def test_set_time_column(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/3", display_name="column")
dataset_mock = mock.Mock()
dataset_mock.configure_mock(name="dataset")
client = self.tables_client(
{
"get_dataset.return_value": dataset_mock,
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
},
{},
)
client.set_time_column(dataset_name="name", column_spec_display_name="column")
client.auto_ml_client.list_table_specs.assert_called_with("name")
client.auto_ml_client.list_column_specs.assert_called_with("table")
client.auto_ml_client.update_table_spec.assert_called_with(
{"name": "table", "time_column_spec_id": "3"}
)
def test_clear_time_column(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
dataset_mock = mock.Mock()
dataset_mock.configure_mock(name="dataset")
client = self.tables_client(
{
"get_dataset.return_value": dataset_mock,
"list_table_specs.return_value": [table_spec_mock],
},
{},
)
client.clear_time_column(dataset_name="name")
client.auto_ml_client.update_table_spec.assert_called_with(
{"name": "table", "time_column_spec_id": None}
)
def test_get_model_evaluation(self):
client = self.tables_client({}, {})
ds = client.get_model_evaluation(model_evaluation_name="x")
client.auto_ml_client.get_model_evaluation.assert_called_with("x")
def test_list_model_evaluations_empty(self):
client = self.tables_client({"list_model_evaluations.return_value": []}, {})
ds = client.list_model_evaluations(model_name="model")
client.auto_ml_client.list_model_evaluations.assert_called_with("model")
assert ds == []
def test_list_model_evaluations_not_empty(self):
evaluations = ["eval"]
client = self.tables_client(
{
"list_model_evaluations.return_value": evaluations,
"location_path.return_value": LOCATION_PATH,
},
{},
)
ds = client.list_model_evaluations(model_name="model")
client.auto_ml_client.list_model_evaluations.assert_called_with("model")
assert len(ds) == 1
assert ds[0] == "eval"
def test_list_models_empty(self):
client = self.tables_client(
{
"list_models.return_value": [],
"location_path.return_value": LOCATION_PATH,
},
{},
)
ds = client.list_models()
client.auto_ml_client.location_path.assert_called_with(PROJECT, REGION)
client.auto_ml_client.list_models.assert_called_with(LOCATION_PATH)
assert ds == []
def test_list_models_not_empty(self):
models = ["some_model"]
client = self.tables_client(
{
"list_models.return_value": models,
"location_path.return_value": LOCATION_PATH,
},
{},
)
ds = client.list_models()
client.auto_ml_client.location_path.assert_called_with(PROJECT, REGION)
client.auto_ml_client.list_models.assert_called_with(LOCATION_PATH)
assert len(ds) == 1
assert ds[0] == "some_model"
def test_get_model_name(self):
model_actual = "model"
client = self.tables_client({"get_model.return_value": model_actual}, {})
model = client.get_model(model_name="my_model")
client.auto_ml_client.get_model.assert_called_with("my_model")
assert model == model_actual
def test_get_no_model(self):
client = self.tables_client(
{"get_model.side_effect": exceptions.NotFound("err")}, {}
)
with pytest.raises(exceptions.NotFound):
client.get_model(model_name="my_model")
client.auto_ml_client.get_model.assert_called_with("my_model")
def test_get_model_from_empty_list(self):
client = self.tables_client({"list_models.return_value": []}, {})
with pytest.raises(exceptions.NotFound):
client.get_model(model_display_name="my_model")
def test_get_model_from_list_not_found(self):
client = self.tables_client(
{"list_models.return_value": [mock.Mock(display_name="not_it")]}, {}
)
with pytest.raises(exceptions.NotFound):
client.get_model(model_display_name="my_model")
def test_get_model_from_list(self):
client = self.tables_client(
{
"list_models.return_value": [
mock.Mock(display_name="not_it"),
mock.Mock(display_name="my_model"),
]
},
{},
)
model = client.get_model(model_display_name="my_model")
assert model.display_name == "my_model"
def test_get_model_from_list_ambiguous(self):
client = self.tables_client(
{
"list_models.return_value": [
mock.Mock(display_name="my_model"),
mock.Mock(display_name="not_my_model"),
mock.Mock(display_name="my_model"),
]
},
{},
)
with pytest.raises(ValueError):
client.get_model(model_display_name="my_model")
def test_delete_model(self):
model = mock.Mock()
model.configure_mock(name="name")
client = self.tables_client({"delete_model.return_value": None}, {})
client.delete_model(model=model)
client.auto_ml_client.delete_model.assert_called_with("name")
def test_delete_model_not_found(self):
client = self.tables_client({"list_models.return_value": []}, {})
client.delete_model(model_display_name="not_found")
client.auto_ml_client.delete_model.assert_not_called()
def test_delete_model_name(self):
client = self.tables_client({"delete_model.return_value": None}, {})
client.delete_model(model_name="name")
client.auto_ml_client.delete_model.assert_called_with("name")
def test_deploy_model_no_args(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.deploy_model()
client.auto_ml_client.deploy_model.assert_not_called()
def test_deploy_model(self):
client = self.tables_client({}, {})
client.deploy_model(model_name="name")
client.auto_ml_client.deploy_model.assert_called_with("name")
def test_deploy_model_not_found(self):
client = self.tables_client({"list_models.return_value": []}, {})
with pytest.raises(exceptions.NotFound):
client.deploy_model(model_display_name="name")
client.auto_ml_client.deploy_model.assert_not_called()
def test_undeploy_model(self):
client = self.tables_client({}, {})
client.undeploy_model(model_name="name")
client.auto_ml_client.undeploy_model.assert_called_with("name")
def test_undeploy_model_not_found(self):
client = self.tables_client({"list_models.return_value": []}, {})
with pytest.raises(exceptions.NotFound):
client.undeploy_model(model_display_name="name")
client.auto_ml_client.undeploy_model.assert_not_called()
def test_create_model(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock = mock.Mock()
column_spec_mock.configure_mock(name="column/2", display_name="column")
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [column_spec_mock],
"location_path.return_value": LOCATION_PATH,
},
{},
)
client.create_model(
"my_model", dataset_name="my_dataset", train_budget_milli_node_hours=1000
)
client.auto_ml_client.create_model.assert_called_with(
LOCATION_PATH,
{
"display_name": "my_model",
"dataset_id": "my_dataset",
"tables_model_metadata": {"train_budget_milli_node_hours": 1000},
},
)
def test_create_model_include_columns(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock1 = mock.Mock()
column_spec_mock1.configure_mock(name="column/1", display_name="column1")
column_spec_mock2 = mock.Mock()
column_spec_mock2.configure_mock(name="column/2", display_name="column2")
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [
column_spec_mock1,
column_spec_mock2,
],
"location_path.return_value": LOCATION_PATH,
},
{},
)
client.create_model(
"my_model",
dataset_name="my_dataset",
include_column_spec_names=["column1"],
train_budget_milli_node_hours=1000,
)
client.auto_ml_client.create_model.assert_called_with(
LOCATION_PATH,
{
"display_name": "my_model",
"dataset_id": "my_dataset",
"tables_model_metadata": {
"train_budget_milli_node_hours": 1000,
"input_feature_column_specs": [column_spec_mock1],
},
},
)
def test_create_model_exclude_columns(self):
table_spec_mock = mock.Mock()
# name is reserved in use of __init__, needs to be passed here
table_spec_mock.configure_mock(name="table")
column_spec_mock1 = mock.Mock()
column_spec_mock1.configure_mock(name="column/1", display_name="column1")
column_spec_mock2 = mock.Mock()
column_spec_mock2.configure_mock(name="column/2", display_name="column2")
client = self.tables_client(
{
"list_table_specs.return_value": [table_spec_mock],
"list_column_specs.return_value": [
column_spec_mock1,
column_spec_mock2,
],
"location_path.return_value": LOCATION_PATH,
},
{},
)
client.create_model(
"my_model",
dataset_name="my_dataset",
exclude_column_spec_names=["column1"],
train_budget_milli_node_hours=1000,
)
client.auto_ml_client.create_model.assert_called_with(
LOCATION_PATH,
{
"display_name": "my_model",
"dataset_id": "my_dataset",
"tables_model_metadata": {
"train_budget_milli_node_hours": 1000,
"input_feature_column_specs": [column_spec_mock2],
},
},
)
def test_create_model_invalid_hours_small(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.create_model(
"my_model", dataset_name="my_dataset", train_budget_milli_node_hours=1
)
client.auto_ml_client.create_model.assert_not_called()
def test_create_model_invalid_hours_large(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.create_model(
"my_model",
dataset_name="my_dataset",
train_budget_milli_node_hours=1000000,
)
client.auto_ml_client.create_model.assert_not_called()
def test_create_model_invalid_no_dataset(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.create_model("my_model", train_budget_milli_node_hours=1000)
client.auto_ml_client.get_dataset.assert_not_called()
client.auto_ml_client.create_model.assert_not_called()
def test_create_model_invalid_include_exclude(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.create_model(
"my_model",
dataset_name="my_dataset",
include_column_spec_names=["a"],
exclude_column_spec_names=["b"],
train_budget_milli_node_hours=1000,
)
client.auto_ml_client.get_dataset.assert_not_called()
client.auto_ml_client.create_model.assert_not_called()
def test_predict_from_array(self):
data_type = mock.Mock(type_code=data_types_pb2.CATEGORY)
column_spec = mock.Mock(display_name="a", data_type=data_type)
model_metadata = mock.Mock(input_feature_column_specs=[column_spec])
model = mock.Mock()
model.configure_mock(tables_model_metadata=model_metadata, name="my_model")
client = self.tables_client({"get_model.return_value": model}, {})
client.predict(["1"], model_name="my_model")
client.prediction_client.predict.assert_called_with(
"my_model", {"row": {"values": [{"string_value": "1"}]}}, None
)
def test_predict_from_dict(self):
data_type = mock.Mock(type_code=data_types_pb2.CATEGORY)
column_spec_a = mock.Mock(display_name="a", data_type=data_type)
column_spec_b = mock.Mock(display_name="b", data_type=data_type)
model_metadata = mock.Mock(
input_feature_column_specs=[column_spec_a, column_spec_b]
)
model = mock.Mock()
model.configure_mock(tables_model_metadata=model_metadata, name="my_model")
client = self.tables_client({"get_model.return_value": model}, {})
client.predict({"a": "1", "b": "2"}, model_name="my_model")
client.prediction_client.predict.assert_called_with(
"my_model",
{"row": {"values": [{"string_value": "1"}, {"string_value": "2"}]}},
None,
)
def test_predict_from_dict_with_feature_importance(self):
data_type = mock.Mock(type_code=data_types_pb2.CATEGORY)
column_spec_a = mock.Mock(display_name="a", data_type=data_type)
column_spec_b = mock.Mock(display_name="b", data_type=data_type)
model_metadata = mock.Mock(
input_feature_column_specs=[column_spec_a, column_spec_b]
)
model = mock.Mock()
model.configure_mock(tables_model_metadata=model_metadata, name="my_model")
client = self.tables_client({"get_model.return_value": model}, {})
client.predict(
{"a": "1", "b": "2"}, model_name="my_model", feature_importance=True
)
client.prediction_client.predict.assert_called_with(
"my_model",
{"row": {"values": [{"string_value": "1"}, {"string_value": "2"}]}},
{"feature_importance": "true"},
)
def test_predict_from_dict_missing(self):
data_type = mock.Mock(type_code=data_types_pb2.CATEGORY)
column_spec_a = mock.Mock(display_name="a", data_type=data_type)
column_spec_b = mock.Mock(display_name="b", data_type=data_type)
model_metadata = mock.Mock(
input_feature_column_specs=[column_spec_a, column_spec_b]
)
model = mock.Mock()
model.configure_mock(tables_model_metadata=model_metadata, name="my_model")
client = self.tables_client({"get_model.return_value": model}, {})
client.predict({"a": "1"}, model_name="my_model")
client.prediction_client.predict.assert_called_with(
"my_model",
{"row": {"values": [{"string_value": "1"}, {"null_value": 0}]}},
None,
)
def test_predict_all_types(self):
float_type = mock.Mock(type_code=data_types_pb2.FLOAT64)
timestamp_type = mock.Mock(type_code=data_types_pb2.TIMESTAMP)
string_type = mock.Mock(type_code=data_types_pb2.STRING)
array_type = mock.Mock(type_code=data_types_pb2.ARRAY)
struct_type = mock.Mock(type_code=data_types_pb2.STRUCT)
category_type = mock.Mock(type_code=data_types_pb2.CATEGORY)
column_spec_float = mock.Mock(display_name="float", data_type=float_type)
column_spec_timestamp = mock.Mock(
display_name="timestamp", data_type=timestamp_type
)
column_spec_string = mock.Mock(display_name="string", data_type=string_type)
column_spec_array = mock.Mock(display_name="array", data_type=array_type)
column_spec_struct = mock.Mock(display_name="struct", data_type=struct_type)
column_spec_category = mock.Mock(
display_name="category", data_type=category_type
)
column_spec_null = mock.Mock(display_name="null", data_type=category_type)
model_metadata = mock.Mock(
input_feature_column_specs=[
column_spec_float,
column_spec_timestamp,
column_spec_string,
column_spec_array,
column_spec_struct,
column_spec_category,
column_spec_null,
]
)
model = mock.Mock()
model.configure_mock(tables_model_metadata=model_metadata, name="my_model")
client = self.tables_client({"get_model.return_value": model}, {})
client.predict(
{
"float": 1.0,
"timestamp": "EST",
"string": "text",
"array": [1],
"struct": {"a": "b"},
"category": "a",
"null": None,
},
model_name="my_model",
)
client.prediction_client.predict.assert_called_with(
"my_model",
{
"row": {
"values": [
{"number_value": 1.0},
{"string_value": "EST"},
{"string_value": "text"},
{"list_value": [1]},
{"struct_value": {"a": "b"}},
{"string_value": "a"},
{"null_value": 0},
]
}
},
None,
)
def test_predict_from_array_missing(self):
data_type = mock.Mock(type_code=data_types_pb2.CATEGORY)
column_spec = mock.Mock(display_name="a", data_type=data_type)
model_metadata = mock.Mock(input_feature_column_specs=[column_spec])
model = mock.Mock()
model.configure_mock(tables_model_metadata=model_metadata, name="my_model")
client = self.tables_client({"get_model.return_value": model}, {})
with pytest.raises(ValueError):
client.predict([], model_name="my_model")
client.prediction_client.predict.assert_not_called()
def test_batch_predict_pandas_dataframe(self):
client = self.tables_client(
gcs_client_attrs={
"bucket_name": "my_bucket",
"upload_pandas_dataframe.return_value": "gs://input",
}
)
dataframe = pandas.DataFrame({})
client.batch_predict(
project=PROJECT,
region=REGION,
model_name="my_model",
pandas_dataframe=dataframe,
gcs_output_uri_prefix="gs://output",
)
client.gcs_client.ensure_bucket_exists.assert_called_with(PROJECT, REGION)
client.gcs_client.upload_pandas_dataframe.assert_called_with(dataframe)
client.prediction_client.batch_predict.assert_called_with(
"my_model",
{"gcs_source": {"input_uris": ["gs://input"]}},
{"gcs_destination": {"output_uri_prefix": "gs://output"}},
)
def test_batch_predict_pandas_dataframe_init_gcs(self):
client = automl_v1beta1.TablesClient(
client=mock.Mock(),
prediction_client=mock.Mock(),
project=PROJECT,
region=REGION,
credentials=AnonymousCredentials(),
)
dataframe = pandas.DataFrame({})
patch = mock.patch(
"google.cloud.automl_v1beta1.tables.tables_client.gcs_client.GcsClient",
bucket_name="my_bucket",
)
with patch as MockGcsClient:
mockInstance = MockGcsClient.return_value
mockInstance.upload_pandas_dataframe.return_value = "gs://input"
dataframe = pandas.DataFrame({})
client.batch_predict(
model_name="my_model",
pandas_dataframe=dataframe,
gcs_output_uri_prefix="gs://output",
)
client.gcs_client.ensure_bucket_exists.assert_called_with(PROJECT, REGION)
client.gcs_client.upload_pandas_dataframe.assert_called_with(dataframe)
client.prediction_client.batch_predict.assert_called_with(
"my_model",
{"gcs_source": {"input_uris": ["gs://input"]}},
{"gcs_destination": {"output_uri_prefix": "gs://output"}},
)
def test_batch_predict_gcs(self):
client = self.tables_client({}, {})
client.batch_predict(
model_name="my_model",
gcs_input_uris="gs://input",
gcs_output_uri_prefix="gs://output",
)
client.prediction_client.batch_predict.assert_called_with(
"my_model",
{"gcs_source": {"input_uris": ["gs://input"]}},
{"gcs_destination": {"output_uri_prefix": "gs://output"}},
)
def test_batch_predict_bigquery(self):
client = self.tables_client({}, {})
client.batch_predict(
model_name="my_model",
bigquery_input_uri="bq://input",
bigquery_output_uri="bq://output",
)
client.prediction_client.batch_predict.assert_called_with(
"my_model",
{"bigquery_source": {"input_uri": "bq://input"}},
{"bigquery_destination": {"output_uri": "bq://output"}},
)
def test_batch_predict_mixed(self):
client = self.tables_client({}, {})
client.batch_predict(
model_name="my_model",
gcs_input_uris="gs://input",
bigquery_output_uri="bq://output",
)
client.prediction_client.batch_predict.assert_called_with(
"my_model",
{"gcs_source": {"input_uris": ["gs://input"]}},
{"bigquery_destination": {"output_uri": "bq://output"}},
)
def test_batch_predict_missing_input_gcs_uri(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.batch_predict(
model_name="my_model",
gcs_input_uris=None,
gcs_output_uri_prefix="gs://output",
)
client.prediction_client.batch_predict.assert_not_called()
def test_batch_predict_missing_input_bigquery_uri(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.batch_predict(
model_name="my_model",
bigquery_input_uri=None,
gcs_output_uri_prefix="gs://output",
)
client.prediction_client.batch_predict.assert_not_called()
def test_batch_predict_missing_output_gcs_uri(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.batch_predict(
model_name="my_model",
gcs_input_uris="gs://input",
gcs_output_uri_prefix=None,
)
client.prediction_client.batch_predict.assert_not_called()
def test_batch_predict_missing_output_bigquery_uri(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.batch_predict(
model_name="my_model",
gcs_input_uris="gs://input",
bigquery_output_uri=None,
)
client.prediction_client.batch_predict.assert_not_called()
def test_batch_predict_missing_model(self):
client = self.tables_client({"list_models.return_value": []}, {})
with pytest.raises(exceptions.NotFound):
client.batch_predict(
model_display_name="my_model",
gcs_input_uris="gs://input",
gcs_output_uri_prefix="gs://output",
)
client.prediction_client.batch_predict.assert_not_called()
def test_batch_predict_no_model(self):
client = self.tables_client({}, {})
with pytest.raises(ValueError):
client.batch_predict(
gcs_input_uris="gs://input", gcs_output_uri_prefix="gs://output"
)
client.auto_ml_client.list_models.assert_not_called()
client.prediction_client.batch_predict.assert_not_called()
def test_auto_ml_client_credentials(self):
credentials_mock = mock.Mock()
patch_auto_ml_client = mock.patch(
"google.cloud.automl_v1beta1.gapic.auto_ml_client.AutoMlClient"
)
with patch_auto_ml_client as MockAutoMlClient:
client = automl_v1beta1.TablesClient(credentials=credentials_mock)
_, auto_ml_client_kwargs = MockAutoMlClient.call_args
assert "credentials" in auto_ml_client_kwargs
assert auto_ml_client_kwargs["credentials"] == credentials_mock
def test_prediction_client_credentials(self):
credentials_mock = mock.Mock()
patch_prediction_client = mock.patch(
"google.cloud.automl_v1beta1.gapic.prediction_service_client.PredictionServiceClient"
)
with patch_prediction_client as MockPredictionClient:
client = automl_v1beta1.TablesClient(credentials=credentials_mock)
_, prediction_client_kwargs = MockPredictionClient.call_args
assert "credentials" in prediction_client_kwargs
assert prediction_client_kwargs["credentials"] == credentials_mock
def test_prediction_client_client_info(self):
client_info_mock = mock.Mock()
patch_prediction_client = mock.patch(
"google.cloud.automl_v1beta1.gapic.prediction_service_client.PredictionServiceClient"
)
with patch_prediction_client as MockPredictionClient:
client = automl_v1beta1.TablesClient(client_info=client_info_mock)
_, prediction_client_kwargs = MockPredictionClient.call_args
assert "client_info" in prediction_client_kwargs
assert prediction_client_kwargs["client_info"] == client_info_mock
| |
"""Copyright 2009 Chris Davis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import traceback
from django.http import HttpResponse, HttpResponseBadRequest
from django.conf import settings
from graphite.account.models import Profile
from graphite.util import getProfile, getProfileByUsername, defaultUser, json
from graphite.logger import log
from graphite.storage import STORE
from graphite.metrics.search import searcher
from graphite.carbonlink import CarbonLink
import fnmatch, os
try:
import cPickle as pickle
except ImportError:
import pickle
def index_json(request):
jsonp = request.REQUEST.get('jsonp', False)
matches = []
for root, dirs, files in os.walk(settings.WHISPER_DIR):
root = root.replace(settings.WHISPER_DIR, '')
for basename in files:
if fnmatch.fnmatch(basename, '*.wsp'):
matches.append(os.path.join(root, basename))
for root, dirs, files in os.walk(settings.CERES_DIR):
root = root.replace(settings.CERES_DIR, '')
for filename in files:
if filename == '.ceres-node':
matches.append(root)
matches = [ m.replace('.wsp','').replace('/', '.') for m in sorted(matches) ]
if jsonp:
return HttpResponse("%s(%s)" % (jsonp, json.dumps(matches)), mimetype='text/javascript')
else:
return HttpResponse(json.dumps(matches), mimetype='application/json')
def search_view(request):
try:
query = str( request.REQUEST['query'] )
except:
return HttpResponseBadRequest(content="Missing required parameter 'query'", mimetype="text/plain")
search_request = {
'query' : query,
'max_results' : int( request.REQUEST.get('max_results', 25) ),
'keep_query_pattern' : int(request.REQUEST.get('keep_query_pattern', 0)),
}
#if not search_request['query'].endswith('*'):
# search_request['query'] += '*'
results = sorted(searcher.search(**search_request))
result_data = json.dumps( dict(metrics=results) )
return HttpResponse(result_data, mimetype='application/json')
def find_view(request):
"""View for finding metrics matching a given pattern. Used by
e.g. the browser's tree-style navigation
"""
profile = getProfile(request)
format = request.REQUEST.get('format', 'treejson')
local_only = int( request.REQUEST.get('local', 0) )
wildcards = int( request.REQUEST.get('wildcards', 0) )
fromTime = int( request.REQUEST.get('from', -1) )
untilTime = int( request.REQUEST.get('until', -1) )
if fromTime == -1:
fromTime = None
if untilTime == -1:
untilTime = None
automatic_variants = int( request.REQUEST.get('automatic_variants', 0) )
try:
query = str( request.REQUEST['query'] )
except:
return HttpResponseBadRequest(content="Missing required parameter 'query'", mimetype="text/plain")
if '.' in query:
base_path = query.rsplit('.', 1)[0] + '.'
else:
base_path = ''
if format == 'completer':
query = query.replace('..', '*.')
if not query.endswith('*'):
query += '*'
if automatic_variants:
query_parts = query.split('.')
for i,part in enumerate(query_parts):
if ',' in part and '{' not in part:
query_parts[i] = '{%s}' % part
query = '.'.join(query_parts)
try:
matches = list( STORE.find(query, fromTime, untilTime, local=local_only) )
except:
log.exception()
raise
log.info('find_view query=%s local_only=%s matches=%d' % (query, local_only, len(matches)))
matches.sort(key=lambda node: node.name)
log.info("received remote find request: pattern=%s from=%s until=%s local_only=%s format=%s matches=%d" % (query, fromTime, untilTime, local_only, format, len(matches)))
if format == 'treejson':
content = tree_json(matches, base_path, wildcards=profile.advancedUI or wildcards)
response = HttpResponse(content, mimetype='application/json')
elif format == 'pickle':
content = pickle_nodes(matches)
response = HttpResponse(content, mimetype='application/pickle')
elif format == 'completer':
results = []
for node in matches:
node_info = dict(path=node.path, name=node.name, is_leaf=str(int(node.is_leaf)))
if not node.is_leaf:
node_info['path'] += '.'
results.append(node_info)
if len(results) > 1 and wildcards:
wildcardNode = {'name' : '*'}
results.append(wildcardNode)
content = json.dumps({ 'metrics' : results })
response = HttpResponse(content, mimetype='application/json')
else:
return HttpResponseBadRequest(content="Invalid value for 'format' parameter", mimetype="text/plain")
response['Pragma'] = 'no-cache'
response['Cache-Control'] = 'no-cache'
return response
def expand_view(request):
"View for expanding a pattern into matching metric paths"
local_only = int( request.REQUEST.get('local', 0) )
group_by_expr = int( request.REQUEST.get('groupByExpr', 0) )
leaves_only = int( request.REQUEST.get('leavesOnly', 0) )
results = {}
for query in request.REQUEST.getlist('query'):
results[query] = set()
for node in STORE.find(query, local=local_only):
if node.is_leaf or not leaves_only:
results[query].add( node.path )
# Convert our results to sorted lists because sets aren't json-friendly
if group_by_expr:
for query, matches in results.items():
results[query] = sorted(matches)
else:
results = sorted( reduce(set.union, results.values(), set()) )
result = {
'results' : results
}
response = HttpResponse(json.dumps(result), mimetype='application/json')
response['Pragma'] = 'no-cache'
response['Cache-Control'] = 'no-cache'
return response
def get_metadata_view(request):
key = request.REQUEST['key']
metrics = request.REQUEST.getlist('metric')
results = {}
for metric in metrics:
try:
results[metric] = CarbonLink.get_metadata(metric, key)
except:
log.exception()
results[metric] = dict(error="Unexpected error occurred in CarbonLink.get_metadata(%s, %s)" % (metric, key))
return HttpResponse(json.dumps(results), mimetype='application/json')
def set_metadata_view(request):
results = {}
if request.method == 'GET':
metric = request.GET['metric']
key = request.GET['key']
value = request.GET['value']
try:
results[metric] = CarbonLink.set_metadata(metric, key, value)
except:
log.exception()
results[metric] = dict(error="Unexpected error occurred in CarbonLink.set_metadata(%s, %s)" % (metric, key))
elif request.method == 'POST':
if request.META.get('CONTENT_TYPE') == 'application/json':
operations = json.loads( request.raw_post_data )
else:
operations = json.loads( request.POST['operations'] )
for op in operations:
metric = None
try:
metric, key, value = op['metric'], op['key'], op['value']
results[metric] = CarbonLink.set_metadata(metric, key, value)
except:
log.exception()
if metric:
results[metric] = dict(error="Unexpected error occurred in bulk CarbonLink.set_metadata(%s)" % metric)
else:
results = dict(error="Invalid request method")
return HttpResponse(json.dumps(results), mimetype='application/json')
def tree_json(nodes, base_path, wildcards=False):
results = []
branchNode = {
'allowChildren': 1,
'expandable': 1,
'leaf': 0,
}
leafNode = {
'allowChildren': 0,
'expandable': 0,
'leaf': 1,
}
#Add a wildcard node if appropriate
if len(nodes) > 1 and wildcards:
wildcardNode = {'text' : '*', 'id' : base_path + '*'}
if any(not n.is_leaf for n in nodes):
wildcardNode.update(branchNode)
else:
wildcardNode.update(leafNode)
results.append(wildcardNode)
found = set()
results_leaf = []
results_branch = []
for node in nodes: #Now let's add the matching children
if node.name in found:
continue
found.add(node.name)
resultNode = {
'text' : str(node.name),
'id' : base_path + str(node.name),
}
if node.is_leaf:
resultNode.update(leafNode)
results_leaf.append(resultNode)
else:
resultNode.update(branchNode)
results_branch.append(resultNode)
results.extend(results_branch)
results.extend(results_leaf)
return json.dumps(results)
def pickle_nodes(nodes):
nodes_info = []
for node in nodes:
info = dict(path=node.path, is_leaf=node.is_leaf)
if node.is_leaf:
info['intervals'] = node.intervals
nodes_info.append(info)
return pickle.dumps(nodes_info, protocol=-1)
def any(iterable): #python2.4 compatibility
for i in iterable:
if i:
return True
return False
| |
import sublime
import sublime_plugin
from .edit import Edit
from .view import ViewMeta
from .vim import Vim, VISUAL_MODES
class ActualVim(ViewMeta):
def __init__(self, view):
super().__init__(view)
if view.settings().get('actual_proxy'):
return
view.settings().set('actual_intercept', True)
view.settings().set('actual_mode', True)
self.vim = vim = Vim(view, update=self.update, modify=self.modify)
vim.set_path(view.file_name())
vim.insert(0, view.substr(sublime.Region(0, view.size())))
vim.init_done()
# view.set_read_only(False)
self.output = None
@property
def actual(self):
return self.view and self.view.settings().get('actual_mode')
def monitor(self):
if self.output:
return
window = sublime.active_window()
self.output = output = window.new_file()
ActualVim.views[output.id()] = self
output.settings().set('actual_proxy', True)
output.set_read_only(True)
output.set_scratch(True)
output.set_name('(tty)')
output.settings().set('actual_intercept', True)
output.settings().set('actual_mode', True)
with Edit(output) as edit:
edit.insert(0, self.vim.tty.dump())
self.vim.monitor = output
# move the monitor view to a different group
if window.num_groups() > 1:
target = int(not window.active_group())
window.set_view_index(output, target, 0)
def update(self, vim, dirty, moved):
mode = vim.mode
view = vim.view
tty = vim.tty
if vim.cmdline:
view.set_status('actual', vim.cmdline)
else:
view.erase_status('actual')
if tty.row == tty.rows and tty.col > 0:
char = tty.buf[tty.row - 1][0]
if char in ':/':
if vim.panel:
# we already have a panel
panel = vim.panel.panel
with Edit(panel) as edit:
edit.replace(sublime.Region(0, panel.size()), vim.cmdline)
else:
# vim is prompting for input
row, col = (tty.row - 1, tty.col - 1)
vim.panel = ActualPanel(self)
vim.panel.show(char)
return
elif vim.panel:
vim.panel.close()
vim.panel = None
if mode in VISUAL_MODES:
def select():
v = ActualVim.get(view)
start = vim.visual
end = (vim.row, vim.col)
regions = v.visual(vim.mode, start, end)
view.sel().clear()
for r in regions:
view.sel().add(sublime.Region(*r))
Edit.defer(view, select)
return
else:
vim.update_cursor()
def modify(self, vim):
pass
def close(self, view):
if self.output:
self.output.close()
self.output = None
if view == self.view:
self.view.close()
self.vim.close()
def set_path(self, path):
self.vim.set_path(path)
class ActualKeypress(sublime_plugin.TextCommand):
def run(self, edit, key):
v = ActualVim.get(self.view, exact=False)
exit_insert_mode_keys = ["escape", "ctrl+C", "ctrl+["]
if key in exit_insert_mode_keys:
settings = sublime.load_settings("actual.sublime-settings")
if v.vim.mode == "i" and settings.get("insert_lock", False):
#Do not send esc, ctrl-c, ctrl-[
pass
else:
if v and v.actual:
v.vim.press(key)
else:
if v and v.actual:
v.vim.press(key)
class ToggleInsertLock(sublime_plugin.ApplicationCommand):
settings = sublime.load_settings("actual.sublime-settings")
def run(self, setting):
self.settings.set(setting, not self.settings.get(setting))
sublime.save_settings("actual.sublime-settings")
def is_checked(self, setting):
if self.settings.get(setting):
return True
else:
return False
class ActualListener(sublime_plugin.EventListener):
def on_new_async(self, view):
ActualVim.get(view)
def on_load(self, view):
ActualVim.get(view)
def on_selection_modified_async(self, view):
v = ActualVim.get(view, create=False)
if v and v.actual:
if not v.sel_changed():
return
sel = view.sel()
if not sel:
return
vim = v.vim
sel = sel[0]
def cursor(args):
buf, lnum, col, off = [int(a) for a in args.split(' ')]
# see if we changed selection on Sublime's side
if vim.mode in VISUAL_MODES:
start = vim.visual
end = lnum, col + 1
region = v.visual(vim.mode, start, end)[0]
if (sel.a, sel.b) == region:
return
if off == sel.b or off > view.size():
return
# selection didn't match Vim's, so let's change Vim's.
if sel.b == sel.a:
if vim.mode in VISUAL_MODES:
# vim.type('{}go'.format(sel.b))
vim.press('escape')
vim.set_cursor(sel.b, callback=vim.update_cursor)
else:
# this is currently broken
return
if vim.mode != 'n':
vim.press('escape')
a, b = sel.a, sel.b
if b > a:
a += 1
else:
b += 1
vim.type('{}gov{}go'.format(a, b))
vim.get_cursor(cursor)
def on_modified(self, view):
v = ActualVim.get(view, create=False)
if v:
v.sel_changed()
def on_close(self, view):
v = ActualVim.get(view, create=False)
if v:
v.close(view)
def on_post_save_async(self, view):
v = ActualVim.get(view, create=False)
if v:
v.set_path(view.file_name())
class ActualPanel:
def __init__(self, actual):
self.actual = actual
self.vim = actual.vim
self.view = actual.view
self.panel = None
def close(self):
if self.panel:
self.panel.close()
def show(self, char):
window = self.view.window()
self.panel = window.show_input_panel('Vim', char, self.on_done, None, self.on_cancel)
settings = self.panel.settings()
settings.set('actual_intercept', True)
settings.set('actual_proxy', self.view.id())
ActualVim.views[self.panel.id()] = self.actual
def on_done(self, text):
self.vim.press('enter')
self.vim.panel = None
def on_cancel(self):
self.vim.press('escape')
self.vim.panel = None
| |
#!/usr/bin/env python
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import datetime
import httplib
import imp
import json
import os
import re
import subprocess
import sys
import textwrap
import time
import urllib
import urllib2
from git_recipes import GitRecipesMixin
from git_recipes import GitFailedException
PERSISTFILE_BASENAME = "PERSISTFILE_BASENAME"
BRANCHNAME = "BRANCHNAME"
DOT_GIT_LOCATION = "DOT_GIT_LOCATION"
VERSION_FILE = "VERSION_FILE"
CHANGELOG_FILE = "CHANGELOG_FILE"
CHANGELOG_ENTRY_FILE = "CHANGELOG_ENTRY_FILE"
COMMITMSG_FILE = "COMMITMSG_FILE"
PATCH_FILE = "PATCH_FILE"
def TextToFile(text, file_name):
with open(file_name, "w") as f:
f.write(text)
def AppendToFile(text, file_name):
with open(file_name, "a") as f:
f.write(text)
def LinesInFile(file_name):
with open(file_name) as f:
for line in f:
yield line
def FileToText(file_name):
with open(file_name) as f:
return f.read()
def MSub(rexp, replacement, text):
return re.sub(rexp, replacement, text, flags=re.MULTILINE)
def Fill80(line):
# Replace tabs and remove surrounding space.
line = re.sub(r"\t", r" ", line.strip())
# Format with 8 characters indentation and line width 80.
return textwrap.fill(line, width=80, initial_indent=" ",
subsequent_indent=" ")
def MakeComment(text):
return MSub(r"^( ?)", "#", text)
def StripComments(text):
# Use split not splitlines to keep terminal newlines.
return "\n".join(filter(lambda x: not x.startswith("#"), text.split("\n")))
def MakeChangeLogBody(commit_messages, auto_format=False):
result = ""
added_titles = set()
for (title, body, author) in commit_messages:
# TODO(machenbach): Better check for reverts. A revert should remove the
# original CL from the actual log entry.
title = title.strip()
if auto_format:
# Only add commits that set the LOG flag correctly.
log_exp = r"^[ \t]*LOG[ \t]*=[ \t]*(?:(?:Y(?:ES)?)|TRUE)"
if not re.search(log_exp, body, flags=re.I | re.M):
continue
# Never include reverts.
if title.startswith("Revert "):
continue
# Don't include duplicates.
if title in added_titles:
continue
# Add and format the commit's title and bug reference. Move dot to the end.
added_titles.add(title)
raw_title = re.sub(r"(\.|\?|!)$", "", title)
bug_reference = MakeChangeLogBugReference(body)
space = " " if bug_reference else ""
result += "%s\n" % Fill80("%s%s%s." % (raw_title, space, bug_reference))
# Append the commit's author for reference if not in auto-format mode.
if not auto_format:
result += "%s\n" % Fill80("(%s)" % author.strip())
result += "\n"
return result
def MakeChangeLogBugReference(body):
"""Grep for "BUG=xxxx" lines in the commit message and convert them to
"(issue xxxx)".
"""
crbugs = []
v8bugs = []
def AddIssues(text):
ref = re.match(r"^BUG[ \t]*=[ \t]*(.+)$", text.strip())
if not ref:
return
for bug in ref.group(1).split(","):
bug = bug.strip()
match = re.match(r"^v8:(\d+)$", bug)
if match: v8bugs.append(int(match.group(1)))
else:
match = re.match(r"^(?:chromium:)?(\d+)$", bug)
if match: crbugs.append(int(match.group(1)))
# Add issues to crbugs and v8bugs.
map(AddIssues, body.splitlines())
# Filter duplicates, sort, stringify.
crbugs = map(str, sorted(set(crbugs)))
v8bugs = map(str, sorted(set(v8bugs)))
bug_groups = []
def FormatIssues(prefix, bugs):
if len(bugs) > 0:
plural = "s" if len(bugs) > 1 else ""
bug_groups.append("%sissue%s %s" % (prefix, plural, ", ".join(bugs)))
FormatIssues("", v8bugs)
FormatIssues("Chromium ", crbugs)
if len(bug_groups) > 0:
return "(%s)" % ", ".join(bug_groups)
else:
return ""
# Some commands don't like the pipe, e.g. calling vi from within the script or
# from subscripts like git cl upload.
def Command(cmd, args="", prefix="", pipe=True):
# TODO(machenbach): Use timeout.
cmd_line = "%s %s %s" % (prefix, cmd, args)
print "Command: %s" % cmd_line
sys.stdout.flush()
try:
if pipe:
return subprocess.check_output(cmd_line, shell=True)
else:
return subprocess.check_call(cmd_line, shell=True)
except subprocess.CalledProcessError:
return None
finally:
sys.stdout.flush()
sys.stderr.flush()
# Wrapper for side effects.
class SideEffectHandler(object): # pragma: no cover
def Call(self, fun, *args, **kwargs):
return fun(*args, **kwargs)
def Command(self, cmd, args="", prefix="", pipe=True):
return Command(cmd, args, prefix, pipe)
def ReadLine(self):
return sys.stdin.readline().strip()
def ReadURL(self, url, params=None):
# pylint: disable=E1121
url_fh = urllib2.urlopen(url, params, 60)
try:
return url_fh.read()
finally:
url_fh.close()
def ReadClusterFuzzAPI(self, api_key, **params):
params["api_key"] = api_key.strip()
params = urllib.urlencode(params)
headers = {"Content-type": "application/x-www-form-urlencoded"}
conn = httplib.HTTPSConnection("backend-dot-cluster-fuzz.appspot.com")
conn.request("POST", "/_api/", params, headers)
response = conn.getresponse()
data = response.read()
try:
return json.loads(data)
except:
print data
print "ERROR: Could not read response. Is your key valid?"
raise
def Sleep(self, seconds):
time.sleep(seconds)
def GetDate(self):
return datetime.date.today().strftime("%Y-%m-%d")
DEFAULT_SIDE_EFFECT_HANDLER = SideEffectHandler()
class NoRetryException(Exception):
pass
class Step(GitRecipesMixin):
def __init__(self, text, requires, number, config, state, options, handler):
self._text = text
self._requires = requires
self._number = number
self._config = config
self._state = state
self._options = options
self._side_effect_handler = handler
assert self._number >= 0
assert self._config is not None
assert self._state is not None
assert self._side_effect_handler is not None
def __getitem__(self, key):
# Convenience method to allow direct [] access on step classes for
# manipulating the backed state dict.
return self._state[key]
def __setitem__(self, key, value):
# Convenience method to allow direct [] access on step classes for
# manipulating the backed state dict.
self._state[key] = value
def Config(self, key):
return self._config[key]
def Run(self):
# Restore state.
state_file = "%s-state.json" % self._config[PERSISTFILE_BASENAME]
if not self._state and os.path.exists(state_file):
self._state.update(json.loads(FileToText(state_file)))
# Skip step if requirement is not met.
if self._requires and not self._state.get(self._requires):
return
print ">>> Step %d: %s" % (self._number, self._text)
try:
return self.RunStep()
finally:
# Persist state.
TextToFile(json.dumps(self._state), state_file)
def RunStep(self): # pragma: no cover
raise NotImplementedError
def Retry(self, cb, retry_on=None, wait_plan=None):
""" Retry a function.
Params:
cb: The function to retry.
retry_on: A callback that takes the result of the function and returns
True if the function should be retried. A function throwing an
exception is always retried.
wait_plan: A list of waiting delays between retries in seconds. The
maximum number of retries is len(wait_plan).
"""
retry_on = retry_on or (lambda x: False)
wait_plan = list(wait_plan or [])
wait_plan.reverse()
while True:
got_exception = False
try:
result = cb()
except NoRetryException, e:
raise e
except Exception:
got_exception = True
if got_exception or retry_on(result):
if not wait_plan: # pragma: no cover
raise Exception("Retried too often. Giving up.")
wait_time = wait_plan.pop()
print "Waiting for %f seconds." % wait_time
self._side_effect_handler.Sleep(wait_time)
print "Retrying..."
else:
return result
def ReadLine(self, default=None):
# Don't prompt in forced mode.
if self._options.force_readline_defaults and default is not None:
print "%s (forced)" % default
return default
else:
return self._side_effect_handler.ReadLine()
def Git(self, args="", prefix="", pipe=True, retry_on=None):
cmd = lambda: self._side_effect_handler.Command("git", args, prefix, pipe)
result = self.Retry(cmd, retry_on, [5, 30])
if result is None:
raise GitFailedException("'git %s' failed." % args)
return result
def SVN(self, args="", prefix="", pipe=True, retry_on=None):
cmd = lambda: self._side_effect_handler.Command("svn", args, prefix, pipe)
return self.Retry(cmd, retry_on, [5, 30])
def Editor(self, args):
if self._options.requires_editor:
return self._side_effect_handler.Command(os.environ["EDITOR"], args,
pipe=False)
def ReadURL(self, url, params=None, retry_on=None, wait_plan=None):
wait_plan = wait_plan or [3, 60, 600]
cmd = lambda: self._side_effect_handler.ReadURL(url, params)
return self.Retry(cmd, retry_on, wait_plan)
def GetDate(self):
return self._side_effect_handler.GetDate()
def Die(self, msg=""):
if msg != "":
print "Error: %s" % msg
print "Exiting"
raise Exception(msg)
def DieNoManualMode(self, msg=""):
if not self._options.manual: # pragma: no cover
msg = msg or "Only available in manual mode."
self.Die(msg)
def Confirm(self, msg):
print "%s [Y/n] " % msg,
answer = self.ReadLine(default="Y")
return answer == "" or answer == "Y" or answer == "y"
def DeleteBranch(self, name):
for line in self.GitBranch().splitlines():
if re.match(r".*\s+%s$" % name, line):
msg = "Branch %s exists, do you want to delete it?" % name
if self.Confirm(msg):
self.GitDeleteBranch(name)
print "Branch %s deleted." % name
else:
msg = "Can't continue. Please delete branch %s and try again." % name
self.Die(msg)
def InitialEnvironmentChecks(self):
# Cancel if this is not a git checkout.
if not os.path.exists(self._config[DOT_GIT_LOCATION]): # pragma: no cover
self.Die("This is not a git checkout, this script won't work for you.")
# Cancel if EDITOR is unset or not executable.
if (self._options.requires_editor and (not os.environ.get("EDITOR") or
Command("which", os.environ["EDITOR"]) is None)): # pragma: no cover
self.Die("Please set your EDITOR environment variable, you'll need it.")
def CommonPrepare(self):
# Check for a clean workdir.
if not self.GitIsWorkdirClean(): # pragma: no cover
self.Die("Workspace is not clean. Please commit or undo your changes.")
# Persist current branch.
self["current_branch"] = self.GitCurrentBranch()
# Fetch unfetched revisions.
self.GitSVNFetch()
def PrepareBranch(self):
# Delete the branch that will be created later if it exists already.
self.DeleteBranch(self._config[BRANCHNAME])
def CommonCleanup(self):
self.GitCheckout(self["current_branch"])
if self._config[BRANCHNAME] != self["current_branch"]:
self.GitDeleteBranch(self._config[BRANCHNAME])
# Clean up all temporary files.
Command("rm", "-f %s*" % self._config[PERSISTFILE_BASENAME])
def ReadAndPersistVersion(self, prefix=""):
def ReadAndPersist(var_name, def_name):
match = re.match(r"^#define %s\s+(\d*)" % def_name, line)
if match:
value = match.group(1)
self["%s%s" % (prefix, var_name)] = value
for line in LinesInFile(self._config[VERSION_FILE]):
for (var_name, def_name) in [("major", "MAJOR_VERSION"),
("minor", "MINOR_VERSION"),
("build", "BUILD_NUMBER"),
("patch", "PATCH_LEVEL")]:
ReadAndPersist(var_name, def_name)
def WaitForLGTM(self):
print ("Please wait for an LGTM, then type \"LGTM<Return>\" to commit "
"your change. (If you need to iterate on the patch or double check "
"that it's sane, do so in another shell, but remember to not "
"change the headline of the uploaded CL.")
answer = ""
while answer != "LGTM":
print "> ",
answer = self.ReadLine(None if self._options.wait_for_lgtm else "LGTM")
if answer != "LGTM":
print "That was not 'LGTM'."
def WaitForResolvingConflicts(self, patch_file):
print("Applying the patch \"%s\" failed. Either type \"ABORT<Return>\", "
"or resolve the conflicts, stage *all* touched files with "
"'git add', and type \"RESOLVED<Return>\"")
self.DieNoManualMode()
answer = ""
while answer != "RESOLVED":
if answer == "ABORT":
self.Die("Applying the patch failed.")
if answer != "":
print "That was not 'RESOLVED' or 'ABORT'."
print "> ",
answer = self.ReadLine()
# Takes a file containing the patch to apply as first argument.
def ApplyPatch(self, patch_file, revert=False):
try:
self.GitApplyPatch(patch_file, revert)
except GitFailedException:
self.WaitForResolvingConflicts(patch_file)
def FindLastTrunkPush(self, parent_hash="", include_patches=False):
push_pattern = "^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*"
if not include_patches:
# Non-patched versions only have three numbers followed by the "(based
# on...) comment."
push_pattern += " (based"
branch = "" if parent_hash else "svn/trunk"
return self.GitLog(n=1, format="%H", grep=push_pattern,
parent_hash=parent_hash, branch=branch)
class UploadStep(Step):
MESSAGE = "Upload for code review."
def RunStep(self):
if self._options.reviewer:
print "Using account %s for review." % self._options.reviewer
reviewer = self._options.reviewer
else:
print "Please enter the email address of a V8 reviewer for your patch: ",
self.DieNoManualMode("A reviewer must be specified in forced mode.")
reviewer = self.ReadLine()
self.GitUpload(reviewer, self._options.author, self._options.force_upload)
class DetermineV8Sheriff(Step):
MESSAGE = "Determine the V8 sheriff for code review."
def RunStep(self):
self["sheriff"] = None
if not self._options.sheriff: # pragma: no cover
return
try:
# The googlers mapping maps @google.com accounts to @chromium.org
# accounts.
googlers = imp.load_source('googlers_mapping',
self._options.googlers_mapping)
googlers = googlers.list_to_dict(googlers.get_list())
except: # pragma: no cover
print "Skip determining sheriff without googler mapping."
return
# The sheriff determined by the rotation on the waterfall has a
# @google.com account.
url = "https://chromium-build.appspot.com/p/chromium/sheriff_v8.js"
match = re.match(r"document\.write\('(\w+)'\)", self.ReadURL(url))
# If "channel is sheriff", we can't match an account.
if match:
g_name = match.group(1)
self["sheriff"] = googlers.get(g_name + "@google.com",
g_name + "@chromium.org")
self._options.reviewer = self["sheriff"]
print "Found active sheriff: %s" % self["sheriff"]
else:
print "No active sheriff found."
def MakeStep(step_class=Step, number=0, state=None, config=None,
options=None, side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
# Allow to pass in empty dictionaries.
state = state if state is not None else {}
config = config if config is not None else {}
try:
message = step_class.MESSAGE
except AttributeError:
message = step_class.__name__
try:
requires = step_class.REQUIRES
except AttributeError:
requires = None
return step_class(message, requires, number=number, config=config,
state=state, options=options,
handler=side_effect_handler)
class ScriptsBase(object):
# TODO(machenbach): Move static config here.
def __init__(self, config, side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER,
state=None):
self._config = config
self._side_effect_handler = side_effect_handler
self._state = state if state is not None else {}
def _Description(self):
return None
def _PrepareOptions(self, parser):
pass
def _ProcessOptions(self, options):
return True
def _Steps(self): # pragma: no cover
raise Exception("Not implemented.")
def MakeOptions(self, args=None):
parser = argparse.ArgumentParser(description=self._Description())
parser.add_argument("-a", "--author", default="",
help="The author email used for rietveld.")
parser.add_argument("-g", "--googlers-mapping",
help="Path to the script mapping google accounts.")
parser.add_argument("-r", "--reviewer", default="",
help="The account name to be used for reviews.")
parser.add_argument("--sheriff", default=False, action="store_true",
help=("Determine current sheriff to review CLs. On "
"success, this will overwrite the reviewer "
"option."))
parser.add_argument("-s", "--step",
help="Specify the step where to start work. Default: 0.",
default=0, type=int)
self._PrepareOptions(parser)
if args is None: # pragma: no cover
options = parser.parse_args()
else:
options = parser.parse_args(args)
# Process common options.
if options.step < 0: # pragma: no cover
print "Bad step number %d" % options.step
parser.print_help()
return None
if options.sheriff and not options.googlers_mapping: # pragma: no cover
print "To determine the current sheriff, requires the googler mapping"
parser.print_help()
return None
# Defaults for options, common to all scripts.
options.manual = getattr(options, "manual", True)
options.force = getattr(options, "force", False)
# Derived options.
options.requires_editor = not options.force
options.wait_for_lgtm = not options.force
options.force_readline_defaults = not options.manual
options.force_upload = not options.manual
# Process script specific options.
if not self._ProcessOptions(options):
parser.print_help()
return None
return options
def RunSteps(self, step_classes, args=None):
options = self.MakeOptions(args)
if not options:
return 1
state_file = "%s-state.json" % self._config[PERSISTFILE_BASENAME]
if options.step == 0 and os.path.exists(state_file):
os.remove(state_file)
steps = []
for (number, step_class) in enumerate(step_classes):
steps.append(MakeStep(step_class, number, self._state, self._config,
options, self._side_effect_handler))
for step in steps[options.step:]:
if step.Run():
return 1
return 0
def Run(self, args=None):
return self.RunSteps(self._Steps(), args)
| |
"""Tests for the Config Entry Flow helper."""
import pytest
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.config import async_process_ha_core_config
from homeassistant.helpers import config_entry_flow
from tests.async_mock import Mock, patch
from tests.common import (
MockConfigEntry,
MockModule,
mock_entity_platform,
mock_integration,
)
@pytest.fixture
def discovery_flow_conf(hass):
"""Register a handler."""
handler_conf = {"discovered": False}
async def has_discovered_devices(hass):
"""Mock if we have discovered devices."""
return handler_conf["discovered"]
with patch.dict(config_entries.HANDLERS):
config_entry_flow.register_discovery_flow(
"test", "Test", has_discovered_devices, config_entries.CONN_CLASS_LOCAL_POLL
)
yield handler_conf
@pytest.fixture
def webhook_flow_conf(hass):
"""Register a handler."""
with patch.dict(config_entries.HANDLERS):
config_entry_flow.register_webhook_flow("test_single", "Test Single", {}, False)
config_entry_flow.register_webhook_flow(
"test_multiple", "Test Multiple", {}, True
)
yield {}
async def test_single_entry_allowed(hass, discovery_flow_conf):
"""Test only a single entry is allowed."""
flow = config_entries.HANDLERS["test"]()
flow.hass = hass
flow.context = {}
MockConfigEntry(domain="test").add_to_hass(hass)
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
async def test_user_no_devices_found(hass, discovery_flow_conf):
"""Test if no devices found."""
flow = config_entries.HANDLERS["test"]()
flow.hass = hass
flow.context = {"source": config_entries.SOURCE_USER}
result = await flow.async_step_confirm(user_input={})
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "no_devices_found"
async def test_user_has_confirmation(hass, discovery_flow_conf):
"""Test user requires confirmation to setup."""
discovery_flow_conf["discovered"] = True
mock_entity_platform(hass, "config_flow.test", None)
result = await hass.config_entries.flow.async_init(
"test", context={"source": config_entries.SOURCE_USER}, data={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
@pytest.mark.parametrize("source", ["discovery", "ssdp", "zeroconf"])
async def test_discovery_single_instance(hass, discovery_flow_conf, source):
"""Test we not allow duplicates."""
flow = config_entries.HANDLERS["test"]()
flow.hass = hass
flow.context = {}
MockConfigEntry(domain="test").add_to_hass(hass)
result = await getattr(flow, f"async_step_{source}")({})
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
@pytest.mark.parametrize("source", ["discovery", "ssdp", "zeroconf"])
async def test_discovery_confirmation(hass, discovery_flow_conf, source):
"""Test we ask for confirmation via discovery."""
flow = config_entries.HANDLERS["test"]()
flow.hass = hass
flow.context = {"source": source}
result = await getattr(flow, f"async_step_{source}")({})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "confirm"
result = await flow.async_step_confirm({})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_multiple_discoveries(hass, discovery_flow_conf):
"""Test we only create one instance for multiple discoveries."""
mock_entity_platform(hass, "config_flow.test", None)
result = await hass.config_entries.flow.async_init(
"test", context={"source": config_entries.SOURCE_DISCOVERY}, data={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
# Second discovery
result = await hass.config_entries.flow.async_init(
"test", context={"source": config_entries.SOURCE_DISCOVERY}, data={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_only_one_in_progress(hass, discovery_flow_conf):
"""Test a user initialized one will finish and cancel discovered one."""
mock_entity_platform(hass, "config_flow.test", None)
# Discovery starts flow
result = await hass.config_entries.flow.async_init(
"test", context={"source": config_entries.SOURCE_DISCOVERY}, data={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
# User starts flow
result = await hass.config_entries.flow.async_init(
"test", context={"source": config_entries.SOURCE_USER}, data={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
# Discovery flow has not been aborted
assert len(hass.config_entries.flow.async_progress()) == 2
# Discovery should be aborted once user confirms
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert len(hass.config_entries.flow.async_progress()) == 0
async def test_import_abort_discovery(hass, discovery_flow_conf):
"""Test import will finish and cancel discovered one."""
mock_entity_platform(hass, "config_flow.test", None)
# Discovery starts flow
result = await hass.config_entries.flow.async_init(
"test", context={"source": config_entries.SOURCE_DISCOVERY}, data={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
# Start import flow
result = await hass.config_entries.flow.async_init(
"test", context={"source": config_entries.SOURCE_IMPORT}, data={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
# Discovery flow has been aborted
assert len(hass.config_entries.flow.async_progress()) == 0
async def test_import_no_confirmation(hass, discovery_flow_conf):
"""Test import requires no confirmation to set up."""
flow = config_entries.HANDLERS["test"]()
flow.hass = hass
flow.context = {}
discovery_flow_conf["discovered"] = True
result = await flow.async_step_import(None)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_import_single_instance(hass, discovery_flow_conf):
"""Test import doesn't create second instance."""
flow = config_entries.HANDLERS["test"]()
flow.hass = hass
flow.context = {}
discovery_flow_conf["discovered"] = True
MockConfigEntry(domain="test").add_to_hass(hass)
result = await flow.async_step_import(None)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_ignored_discoveries(hass, discovery_flow_conf):
"""Test we can ignore discovered entries."""
mock_entity_platform(hass, "config_flow.test", None)
result = await hass.config_entries.flow.async_init(
"test", context={"source": config_entries.SOURCE_DISCOVERY}, data={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
flow = next(
(
flw
for flw in hass.config_entries.flow.async_progress()
if flw["flow_id"] == result["flow_id"]
),
None,
)
# Ignore it.
await hass.config_entries.flow.async_init(
flow["handler"],
context={"source": config_entries.SOURCE_IGNORE},
data={"unique_id": flow["context"]["unique_id"]},
)
# Second discovery should be aborted
result = await hass.config_entries.flow.async_init(
"test", context={"source": config_entries.SOURCE_DISCOVERY}, data={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_webhook_single_entry_allowed(hass, webhook_flow_conf):
"""Test only a single entry is allowed."""
flow = config_entries.HANDLERS["test_single"]()
flow.hass = hass
MockConfigEntry(domain="test_single").add_to_hass(hass)
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "one_instance_allowed"
async def test_webhook_multiple_entries_allowed(hass, webhook_flow_conf):
"""Test multiple entries are allowed when specified."""
flow = config_entries.HANDLERS["test_multiple"]()
flow.hass = hass
MockConfigEntry(domain="test_multiple").add_to_hass(hass)
hass.config.api = Mock(base_url="http://example.com")
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
async def test_webhook_config_flow_registers_webhook(hass, webhook_flow_conf):
"""Test setting up an entry creates a webhook."""
flow = config_entries.HANDLERS["test_single"]()
flow.hass = hass
await async_process_ha_core_config(
hass, {"external_url": "https://example.com"},
)
result = await flow.async_step_user(user_input={})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"]["webhook_id"] is not None
async def test_webhook_create_cloudhook(hass, webhook_flow_conf):
"""Test only a single entry is allowed."""
assert await setup.async_setup_component(hass, "cloud", {})
async_setup_entry = Mock(return_value=True)
async_unload_entry = Mock(return_value=True)
mock_integration(
hass,
MockModule(
"test_single",
async_setup_entry=async_setup_entry,
async_unload_entry=async_unload_entry,
async_remove_entry=config_entry_flow.webhook_async_remove_entry,
),
)
mock_entity_platform(hass, "config_flow.test_single", None)
result = await hass.config_entries.flow.async_init(
"test_single", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
with patch(
"hass_nabucasa.cloudhooks.Cloudhooks.async_create",
return_value={"cloudhook_url": "https://example.com"},
) as mock_create, patch(
"homeassistant.components.cloud.async_active_subscription", return_value=True
), patch(
"homeassistant.components.cloud.async_is_logged_in", return_value=True
):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["description_placeholders"]["webhook_url"] == "https://example.com"
assert len(mock_create.mock_calls) == 1
assert len(async_setup_entry.mock_calls) == 1
with patch(
"hass_nabucasa.cloudhooks.Cloudhooks.async_delete",
return_value={"cloudhook_url": "https://example.com"},
) as mock_delete:
result = await hass.config_entries.async_remove(result["result"].entry_id)
assert len(mock_delete.mock_calls) == 1
assert result["require_restart"] is False
| |
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test the new API for making and checking interface declarations
"""
import doctest
import unittest
from zope.interface import Interface, implements
from zope.interface import directlyProvides, providedBy
from zope.interface import classImplements, implementedBy, implementsOnly
class I1(Interface): pass
class I2(Interface): pass
class I3(Interface): pass
class I4(Interface): pass
class I5(Interface): pass
class A(object):
implements(I1)
class B(object):
implements(I2)
class C(A, B):
implements(I3)
class COnly(A, B):
implementsOnly(I3)
class COnly_old(A, B):
__implemented__ = I3
class D(COnly):
implements(I5)
def test_ObjectSpecification_Simple():
"""
>>> c = C()
>>> directlyProvides(c, I4)
>>> [i.__name__ for i in providedBy(c)]
['I4', 'I3', 'I1', 'I2']
"""
def test_ObjectSpecification_Simple_w_only():
"""
>>> c = COnly()
>>> directlyProvides(c, I4)
>>> [i.__name__ for i in providedBy(c)]
['I4', 'I3']
"""
def test_ObjectSpecification_Simple_old_style():
"""
>>> c = COnly_old()
>>> directlyProvides(c, I4)
>>> [i.__name__ for i in providedBy(c)]
['I4', 'I3']
"""
class Test(unittest.TestCase):
# Note that most of the tests are in the doc strings of the
# declarations module.
def test_backward_compat(self):
class C1(object): __implemented__ = I1
class C2(C1): __implemented__ = I2, I5
class C3(C2): __implemented__ = I3, C2.__implemented__
self.assert_(C3.__implemented__.__class__ is tuple)
self.assertEqual(
[i.getName() for i in providedBy(C3())],
['I3', 'I2', 'I5'],
)
class C4(C3):
implements(I4)
self.assertEqual(
[i.getName() for i in providedBy(C4())],
['I4', 'I3', 'I2', 'I5'],
)
self.assertEqual(
[i.getName() for i in C4.__implemented__],
['I4', 'I3', 'I2', 'I5'],
)
# Note that C3.__implemented__ should now be a sequence of interfaces
self.assertEqual(
[i.getName() for i in C3.__implemented__],
['I3', 'I2', 'I5'],
)
self.failIf(C3.__implemented__.__class__ is tuple)
def test_module(self):
from zope.interface.tests import m1, m2
#import zope.interface.tests.m2
directlyProvides(m2,
m1.I1,
m1.I2,
)
self.assertEqual(list(providedBy(m1)),
list(providedBy(m2)),
)
def test_builtins(self):
# Setup
intspec = implementedBy(int)
olddeclared = intspec.declared
classImplements(int, I1)
class myint(int):
implements(I2)
x = 42
self.assertEqual([i.getName() for i in providedBy(x)],
['I1'])
x = myint(42)
directlyProvides(x, I3)
self.assertEqual([i.getName() for i in providedBy(x)],
['I3', 'I2', 'I1'])
# cleanup
intspec.declared = olddeclared
classImplements(int)
x = 42
self.assertEqual([i.getName() for i in providedBy(x)],
[])
def test_signature_w_no_class_interfaces():
"""
>>> from zope.interface import *
>>> class C(object):
... pass
>>> c = C()
>>> list(providedBy(c))
[]
>>> class I(Interface):
... pass
>>> directlyProvides(c, I)
>>> list(providedBy(c)) == list(directlyProvidedBy(c))
1
"""
def test_classImplement_on_deeply_nested_classes():
"""This test is in response to a bug found, which is why it's a bit
contrived
>>> from zope.interface import *
>>> class B1(object):
... pass
>>> class B2(B1):
... pass
>>> class B3(B2):
... pass
>>> class D(object):
... implements()
>>> class S(B3, D):
... implements()
This failed due to a bug in the code for finding __providedBy__
descriptors for old-style classes.
"""
def test_pickle_provides_specs():
"""
>>> from pickle import dumps, loads
>>> a = A()
>>> I2.providedBy(a)
0
>>> directlyProvides(a, I2)
>>> I2.providedBy(a)
1
>>> a2 = loads(dumps(a))
>>> I2.providedBy(a2)
1
"""
def test_that_we_dont_inherit_class_provides():
"""
>>> from zope.interface import classProvides
>>> class X(object):
... classProvides(I1)
>>> class Y(X):
... pass
>>> [i.__name__ for i in X.__provides__]
['I1']
>>> Y.__provides__
Traceback (most recent call last):
...
AttributeError: __provides__
"""
def test_that_we_dont_inherit_provides_optimizations():
"""
When we make a declaration for a class, we install a __provides__
descriptors that provides a default for instances that don't have
instance-specific declarations:
>>> class A(object):
... implements(I1)
>>> class B(object):
... implements(I2)
>>> [i.__name__ for i in A().__provides__]
['I1']
>>> [i.__name__ for i in B().__provides__]
['I2']
But it's important that we don't use this for subclasses without
declarations. This would cause incorrect results:
>>> class X(A, B):
... pass
>>> X().__provides__
Traceback (most recent call last):
...
AttributeError: __provides__
However, if we "induce" a declaration, by calling implementedBy
(even indirectly through providedBy):
>>> [i.__name__ for i in providedBy(X())]
['I1', 'I2']
then the optimization will work:
>>> [i.__name__ for i in X().__provides__]
['I1', 'I2']
"""
def test_classProvides_before_implements():
"""Special descriptor for class __provides__
The descriptor caches the implementedBy info, so that
we can get declarations for objects without instance-specific
interfaces a bit quicker.
For example::
>>> from zope.interface import Interface, classProvides
>>> class IFooFactory(Interface):
... pass
>>> class IFoo(Interface):
... pass
>>> class C(object):
... classProvides(IFooFactory)
... implements(IFoo)
>>> [i.getName() for i in C.__provides__]
['IFooFactory']
>>> [i.getName() for i in C().__provides__]
['IFoo']
"""
def test_getting_spec_for_proxied_builtin_class():
"""
In general, we should be able to get a spec
for a proxied class if someone has declared or
asked for a spec before.
We don't want to depend on proxies in this (zope.interface)
package, but we do want to work with proxies. Proxies have the
effect that a class's __dict__ cannot be gotten. Further, for
built-in classes, we can't save, and thus, cannot get, any class
attributes. We'll emulate this by treating a plain object as a class:
>>> cls = object()
We'll create an implements specification:
>>> import zope.interface.declarations
>>> impl = zope.interface.declarations.Implements(I1, I2)
Now, we'll emulate a declaration for a built-in type by putting
it in BuiltinImplementationSpecifications:
>>> zope.interface.declarations.BuiltinImplementationSpecifications[
... cls] = impl
Now, we should be able to get it back:
>>> implementedBy(cls) is impl
True
Of course, we don't want to leave it there. :)
>>> del zope.interface.declarations.BuiltinImplementationSpecifications[
... cls]
"""
def test_declaration_get():
"""
We can get definitions from a declaration:
>>> import zope.interface
>>> class I1(zope.interface.Interface):
... a11 = zope.interface.Attribute('a11')
... a12 = zope.interface.Attribute('a12')
>>> class I2(zope.interface.Interface):
... a21 = zope.interface.Attribute('a21')
... a22 = zope.interface.Attribute('a22')
... a12 = zope.interface.Attribute('a212')
>>> class I11(I1):
... a11 = zope.interface.Attribute('a111')
>>> decl = zope.interface.Declaration(I11, I2)
>>> decl.get('a11') is I11.get('a11')
True
>>> decl.get('a12') is I1.get('a12')
True
>>> decl.get('a21') is I2.get('a21')
True
>>> decl.get('a22') is I2.get('a22')
True
>>> decl.get('a')
>>> decl.get('a', 42)
42
We get None even with no interfaces:
>>> decl = zope.interface.Declaration()
>>> decl.get('a11')
>>> decl.get('a11', 42)
42
We get new data if e change interface bases:
>>> decl.__bases__ = I11, I2
>>> decl.get('a11') is I11.get('a11')
True
"""
def test_classImplements_after_classImplementsOnly_issue_402():
"""http://www.zope.org/Collectors/Zope3-dev/402
>>> from zope.interface import *
>>> class I1(Interface):
... pass
>>> class I2(Interface):
... pass
>>> class C:
... implements(I1)
>>> class C2:
... implementsOnly(I2)
>>> class I3(Interface):
... pass
>>> [i.__name__ for i in providedBy(C2()).__iro__]
['I2', 'Interface']
>>> classImplements(C2, I3)
>>> [i.__name__ for i in providedBy(C2()).__iro__]
['I2', 'I3', 'Interface']
>>> class I4(Interface):
... pass
>>> classImplements(C2, I4)
>>> [i.__name__ for i in providedBy(C2()).__iro__]
['I2', 'I3', 'I4', 'Interface']
"""
def test_picklability_of_implements_specifications():
"""
Sometimes, we need to pickle implements specs. We should be able
to do so as long as the class is picklable.
>>> import pickle
>>> pickle.loads(pickle.dumps(implementedBy(C))) is implementedBy(C)
True
"""
def test_provided_by_with_slots():
"""
This is an edge case: if the __slots__ of a class contain '__provides__',
using providedBy() on that class should still work (this occurs, for
example, when providing an adapter for a concrete class.)
>>> import zope.interface
>>> class Slotted(object):
... __slots__ = ('__provides__')
>>> class IFoo(zope.interface.Interface):
... pass
>>> IFoo.providedBy(Slotted)
False
"""
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(Test),
doctest.DocTestSuite("zope.interface.declarations"),
doctest.DocTestSuite(),
))
| |
import numpy as np
import cv2
from autumn_utils.image_util import *
def read_transformation_matrices (file_dir):
in_file = open (file_dir, 'r')
matrix_sequences = []
texture_sequences = []
matrix = []
for line in in_file:
# Skip blank lines
if len (line) < 4:
continue
# Read the image file name
if line.startswith ('#'):
texture_file = line[1:-1]
continue
# Deal with a row in a transformation matrix
line = line.split (' ')
row = [float (l) for l in line]
matrix.append (row)
if len (matrix) >= 4:
matrix_sequences.append (np.array (matrix))
texture_sequences.append (texture_file)
matrix = []
return matrix_sequences, texture_sequences
class Mesh (object):
vertices = None
textures = None
normals = None
faces = None
frontFacingScores = None
def __init (self):
self.vertices = np.empty ((0, 3))
self.textures = np.empty ((0, 2))
self.normals = np.empty ((0, 3))
self.faces = np.empty ((0, 9))
def LoadModel (self, model_dir):
vertex_count = 0
vertices = np.empty ((100, 3))
texture_count = 0
textures = np.empty ((100, 2))
normal_count = 0
normals = np.empty ((100, 3))
face_count = 0
faces = np.empty ((100, 9))
in_file = open (model_dir, 'r')
for line in in_file:
line = line.split (' ')
if line[0] == 'v':
# if vertex_count > 50:
# continue
for j in range (3):
vertices.itemset ((vertex_count, j), float (line[j + 1]))
vertex_count += 1
if vertex_count >= vertices.shape[0]:
vertices = np.resize (vertices, (vertices.shape[0] * 2, vertices.shape[1]))
elif line[0] == 'vt':
# if texture_count > 50:
# continue
for j in range (2):
textures.itemset ((texture_count, j), float (line[j + 1]))
texture_count += 1
if texture_count >= textures.shape[0]:
textures = np.resize (textures, (textures.shape[0] * 2, textures.shape[1]))
continue
elif line[0] == 'vn':
# if normal_count > 50:
# continue
for j in range (3):
normals.itemset ((normal_count, j), float (line[j + 1]))
normal_count += 1
if normal_count >= normals.shape[0]:
normals = np.resize (normals, (normals.shape[0] * 2, normals.shape[1]))
elif line[0] == 'f':
# if face_count > 5:
# continue
for j in range (3):
section = line[j + 1].split ('/')
for i in range (3):
if len (section[i]) == 0:
faces.itemset ((face_count, j * 3 + i), -1)
else:
faces.itemset ((face_count, j * 3 + i), int (section[i]))
face_count += 1
if face_count >= faces.shape[0]:
faces = np.resize (faces, (faces.shape[0] * 2, faces.shape[1]))
in_file.close ()
self.vertices = vertices[:vertex_count]
self.textures = textures[:texture_count]
self.normals = normals[:normal_count]
self.faces = faces[:face_count]
def CalculateFrontFacingScore (self):
self.frontFacingScores = np.zeros (len (self.faces))
for i in range (len (self.faces)):
face = self.faces[i,:]
x, y, z = face.item (2) - 1, face.item (5) - 1, face.item (8) - 1
n1, n2, n3 = self.normals[x], self.normals[y], self.normals[z]
fn = (n1 + n2 + n3) / 3
self.frontFacingScores.itemset (i, np.dot (fn, np.array ([0, 0, 1])))
return self.frontFacingScores
class ViewProjection (object):
# Intrinsic constant of Kinect, obtained via calibration
__focal_length = 1059
__depth_to_color = np.array ([51.9097, - 0.5288, - 0.5756]) * 0.001
rotation = None
translation = None
def __init__ (self, matrix):
# Decompose the rotation and translation matrix
self.rotation = matrix[:3, :3]
self.translation = matrix[3, :3]
def ProjectMeshToDifferentView (self, mesh):
new_mesh = Mesh ()
new_mesh.vertices = self.__rigidTransform (mesh.vertices)
new_mesh.normals = self.__rigidTransform (mesh.normals)
new_mesh.textures = self.__projectMeshToPlane (new_mesh.vertices, (1920, 1080))
new_mesh.faces = mesh.faces
img = self.__drawProjectedToPlane (new_mesh.textures, (1920, 1080))
return img, new_mesh
def __rigidTransform (self, mesh):
y_flip = np.diagflat ([1, -1, 1])
z_flip = np.diagflat ([1, 1, -1])
res = np.empty_like (mesh)
res = ((np.matrix (mesh) * y_flip * z_flip) * self.rotation + self.translation - self.__depth_to_color)
return res
def __projectMeshToPlane (self, vertices, img_shape):
textures = np.empty ((vertices.shape[0], 2))
textures[:, 0] = (vertices[:, 0] * self.__focal_length / vertices[:, 2] + img_shape[0] / 2).squeeze ()
textures[:, 1] = (vertices[:, 1] * self.__focal_length / vertices[:, 2] + img_shape[1] / 2).squeeze ()
return textures
def __drawProjectedToPlane (self, textures, img_shape):
img = np.zeros ((img_shape[1], img_shape[0], 3))
for i in range (len (textures) / 3):
p1, p2, p3 = textures[i*3], textures[i*3 + 1], textures[i*3 + 2]
tris = np.array ([(p1[0], p1[1]), (p2[0], p2[1]), (p3[0], p3[1])], dtype = 'int32')
cv2.fillConvexPoly (img, tris, (255, 255, 255))
return img
class ArcSegmentation (object):
arc = None
totalLength = 0
lengths = np.array ([0])
def __init__ (self, arc):
self.arc = arc
self.totalLength = 0
self.lengths = np.zeros ([len (self.arc) - 1])
for i in range (len (self.arc) - 1):
u = self.arc[i]
v = self.arc[i + 1]
dst = np.linalg.norm (u - v)
self.totalLength += dst
self.lengths.itemset (i, dst)
def divide (self, division):
segmentLength = self.totalLength / division
segments = np.zeros ((division + 1, arc.shape[1]))
currentSegmentLength = 0
currentSegmentId = 1
for i in range (len (self.lengths)):
u = self.arc[i]
v = self.arc[i + 1]
dst = self.lengths.item (i)
while dst > 0:
accepted = min (segmentLength - currentSegmentLength, dst)
dst -= accepted
currentSegmentLength += accepted
if abs (currentSegmentLength - segmentLength) < self.totalLength / 100:
t = dst / self.lengths.item (i)
w = self.interpolate (v, u, t)
segments[currentSegmentId] = w
currentSegmentLength = 0
currentSegmentId += 1
segments[0] = self.arc[0]
segments[-1] = self.arc[-1]
return segments
def interpolate (self, u, v, t):
return u + (v - u) * t
| |
from datetime import datetime
import json
import os
import time
import endpoints
from protorpc import messages, message_types, remote
from google.appengine.api import urlfetch, memcache, taskqueue
from google.appengine.ext import ndb
from models import *
from settings import WEB_CLIENT_ID
from utils import getUserId
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey = messages.StringField(1),
)
SESH_POST_REQUEST = endpoints.ResourceContainer(
SessionForm,
websafeConferenceKey = messages.StringField(1),
)
MEMCACHE_ANNOUNCEMENTS_KEY = "Memcache Announcements"
DEFAULTS = {
"city" : "Default City",
"maxAttendees" : 0,
"seatsAvailable" : 0,
"topics" : ["Default", "Topic"],
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
################################################################
@endpoints.api( name='conference',
version='v1',
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Sessions - - - - - - - - - - - - - - - - - - - - - -
# def _copySessionToForm(self, sesh):
# @endpoints.method(CONF_GET_REQUEST, SessionForms,
# path='sessions/{websafeConferenceKey}'
# http_method='GET', name='getConferenceSessions')
# def getConferenceSessions(self, request):
# """Given a conference, return all sessions"""
# pass
# @endpoints.method()
# def getConferenceSessionsByType(self, websafeConferenceKey, typeOfSession):
# """Given a conference, return all sessions of a specified type"""
# pass
# @endpoints.method()
# def getSessionsBySpeaker(self, speaker):
# """Given a speaker, return all sessions given by the speaker, across all conferences"""
# pass
def _copySessionToForm(self, sesh):
"""Copy all relevant fields from Session to SessionForm."""
sf = SessionForm()
for field in sf.all_fields():
if hasattr(sesh, field.name):
if field.name == 'date':
setattr(sf, field.name, str(getattr(sesh, field.name)))
else:
setattr(sf, field.name, getattr(sesh, field.name))
elif field.name == "websafeKey":
setattr(sf, field.name, sesh.key.urlsafe())
sf.check_initialized()
return sf
def _createSession(self, request):
c_key = ndb.Key(urlsafe=request.websafeConferenceKey)
conf = c_key.get()
if conf.organizerUserId != getUserId(endpoints.get_current_user()):
raise endpoints.ForbiddenException(
'You must be the organizer to create a session.')
s_id = Session.allocate_ids(size=1, parent=c_key)[0]
s_key = ndb.Key(Session, s_id, parent=c_key)
session = Session()
session.key = s_key
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name == 'date':
data = datetime.strptime(data, "%Y-%m-%d").date()
# write to Conference object
setattr(session, field.name, data)
session.put()
return self._copySessionToForm(session)
@endpoints.method(SESH_POST_REQUEST, SessionForm,
path='session/add/{websafeConferenceKey}',
http_method='POST', name='createSession')
def createSession(self, request):
"""Create a session if user is organizer of the conference"""
return self._createSession(request)
# - - - Registration - - - - - - - - - - - - - - - - - - - -
@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, reg=True):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser()
# check if conf exists given websafeConfKey
# get conference, check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' %wsck)
# register
if reg:
# check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You already registered for this conference.")
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
else:
# check if user already registered
if wsck in prof.conferenceKeysToAttend:
#unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
prof.put()
conf.put()
return BooleanMessage(data=retval)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='post', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected confereence."""
return self._conferenceRegistration(request)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='DELETE', name='unregisterFromConference')
def unregisterFromConference(self, request):
"""Register user for selected conference."""
return self._conferenceRegistration(request, reg=False)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending',
http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
profile = self._getProfileFromUser()
userConfKeys = profile.conferenceKeysToAttend
keys = [ndb.Key(urlsafe=key) for key in userConfKeys]
conferences = ndb.get_multi(keys)
return ConferenceForms(
items=[self._copyConferenceToForm(conf, "") for conf in conferences]
)
# - - - Conference Objects- - - - - - - - - - - - - - - - - -
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm"""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Conference 'name' field required")
# Copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# Add default values for those missing (both data model and outbound Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
# both for data model & outbound Message
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
setattr(request, "seatsAvailable", data["maxAttendees"])
# make Profile Key from user ID
p_key = ndb.Key(Profile, user_id)
# allocate new Conference ID with Profile key as parent
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
# make Conference key from ID
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email')
return request
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"], filtr["operator"], filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
""" Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name) for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException("Filter contains invalid filed or operator.")
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException("Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST',
name='queryConferences')
def queryConferences(self, request):
"""Query for conferences."""
conferences = self._getQuery(request)
return ConferenceForms(
items=[self._copyConferenceToForm(conf, "") for conf in conferences]
)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
# get Conference object from request; bail if not found
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
prof = conf.key.parent().get()
# return ConferenceForm
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='POST',
name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Query for conferences created by the user."""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
conferences = Conference.query(ancestor= ndb.Key(Profile, getUserId(user)))
return ConferenceForms(
items=[self._copyConferenceToForm(conf, "") for conf in conferences]
)
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
if not profile:
profile = Profile(
key = p_key,
displayName = user.nickname(),
mainEmail= user.email(),
teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
return self._createConferenceObject(request)
@endpoints.method(message_types.VoidMessage, ProfileForm, path='profile',
http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm, path='profile',
http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
# - - - Announcements - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create announcement and assign to memcache -
Used by memcache cron job and putAnnouncement()"""
confs=Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = '%s %s' % (
'Last chance to attend! The following conferences '
'are nearly sold out:',
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get',
http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
"""Return announcement from memcache."""
# TODO 1
# return an existing announcement from Memcache or an empty string.
announcement = memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY or "")
return StringMessage(data=announcement)
# registers API
api = endpoints.api_server([ConferenceApi])
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
def transformer(U, theta, out_size, name='SpatialTransformer', **kwargs):
"""Spatial Transformer Layer
Implements a spatial transformer layer as described in [1]_.
Based on [2]_ and edited by David Dao for Tensorflow.
Parameters
----------
U : float
The output of a convolutional net should have the
shape [num_batch, height, width, num_channels].
theta: float
The output of the
localisation network should be [num_batch, 6].
out_size: tuple of two ints
The size of the output of the network (height, width)
References
----------
.. [1] Spatial Transformer Networks
Max Jaderberg, Karen Simonyan, Andrew Zisserman, Koray Kavukcuoglu
Submitted on 5 Jun 2015
.. [2] https://github.com/skaae/transformer_network/blob/master/transformerlayer.py
Notes
-----
To initialize the network to the identity transform init
``theta`` to :
identity = np.array([[1., 0., 0.],
[0., 1., 0.]])
identity = identity.flatten()
theta = tf.Variable(initial_value=identity)
"""
def _repeat(x, n_repeats):
with tf.variable_scope('_repeat'):
rep = tf.transpose(
tf.expand_dims(tf.ones(shape=tf.pack([n_repeats, ])), 1), [1, 0])
rep = tf.cast(rep, 'int32')
x = tf.matmul(tf.reshape(x, (-1, 1)), rep)
return tf.reshape(x, [-1])
def _interpolate(im, x, y, out_size):
with tf.variable_scope('_interpolate'):
# constants
num_batch = tf.shape(im)[0]
height = tf.shape(im)[1]
width = tf.shape(im)[2]
channels = tf.shape(im)[3]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
height_f = tf.cast(height, 'float32')
width_f = tf.cast(width, 'float32')
out_height = out_size[0]
out_width = out_size[1]
zero = tf.zeros([], dtype='int32')
max_y = tf.cast(tf.shape(im)[1] - 1, 'int32')
max_x = tf.cast(tf.shape(im)[2] - 1, 'int32')
# scale indices from [-1, 1] to [0, width/height]
x = (x + 1.0)*(width_f) / 2.0
y = (y + 1.0)*(height_f) / 2.0
# do sampling
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
x0 = tf.clip_by_value(x0, zero, max_x)
x1 = tf.clip_by_value(x1, zero, max_x)
y0 = tf.clip_by_value(y0, zero, max_y)
y1 = tf.clip_by_value(y1, zero, max_y)
dim2 = width
dim1 = width*height
base = _repeat(tf.range(num_batch)*dim1, out_height*out_width)
base_y0 = base + y0*dim2
base_y1 = base + y1*dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# use indices to lookup pixels in the flat image and restore
# channels dim
im_flat = tf.reshape(im, tf.pack([-1, channels]))
im_flat = tf.cast(im_flat, 'float32')
Ia = tf.gather(im_flat, idx_a)
Ib = tf.gather(im_flat, idx_b)
Ic = tf.gather(im_flat, idx_c)
Id = tf.gather(im_flat, idx_d)
# and finally calculate interpolated values
x0_f = tf.cast(x0, 'float32')
x1_f = tf.cast(x1, 'float32')
y0_f = tf.cast(y0, 'float32')
y1_f = tf.cast(y1, 'float32')
wa = tf.expand_dims(((x1_f-x) * (y1_f-y)), 1)
wb = tf.expand_dims(((x1_f-x) * (y-y0_f)), 1)
wc = tf.expand_dims(((x-x0_f) * (y1_f-y)), 1)
wd = tf.expand_dims(((x-x0_f) * (y-y0_f)), 1)
output = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id])
return output
def _meshgrid(height, width):
with tf.variable_scope('_meshgrid'):
# This should be equivalent to:
# x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
# np.linspace(-1, 1, height))
# ones = np.ones(np.prod(x_t.shape))
# grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
x_t = tf.matmul(tf.ones(shape=tf.pack([height, 1])),
tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
tf.ones(shape=tf.pack([1, width])))
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
ones = tf.ones_like(x_t_flat)
grid = tf.concat(0, [x_t_flat, y_t_flat, ones])
return grid
def _transform(theta, input_dim, out_size):
with tf.variable_scope('_transform'):
num_batch = tf.shape(input_dim)[0]
height = tf.shape(input_dim)[1]
width = tf.shape(input_dim)[2]
num_channels = tf.shape(input_dim)[3]
theta = tf.reshape(theta, (-1, 2, 3))
theta = tf.cast(theta, 'float32')
# grid of (x_t, y_t, 1), eq (1) in ref [1]
height_f = tf.cast(height, 'float32')
width_f = tf.cast(width, 'float32')
out_height = out_size[0]
out_width = out_size[1]
grid = _meshgrid(out_height, out_width)
grid = tf.expand_dims(grid, 0)
grid = tf.reshape(grid, [-1])
grid = tf.tile(grid, tf.pack([num_batch]))
grid = tf.reshape(grid, tf.pack([num_batch, 3, -1]))
# Transform A x (x_t, y_t, 1)^T -> (x_s, y_s)
T_g = tf.batch_matmul(theta, grid)
x_s = tf.slice(T_g, [0, 0, 0], [-1, 1, -1])
y_s = tf.slice(T_g, [0, 1, 0], [-1, 1, -1])
x_s_flat = tf.reshape(x_s, [-1])
y_s_flat = tf.reshape(y_s, [-1])
input_transformed = _interpolate(
input_dim, x_s_flat, y_s_flat,
out_size)
output = tf.reshape(
input_transformed, tf.pack([num_batch, out_height, out_width, num_channels]))
return output
with tf.variable_scope(name):
output = _transform(theta, U, out_size)
return output
def batch_transformer(U, thetas, out_size, name='BatchSpatialTransformer'):
"""Batch Spatial Transformer Layer
Parameters
----------
U : float
tensor of inputs [num_batch,height,width,num_channels]
thetas : float
a set of transformations for each input [num_batch,num_transforms,6]
out_size : int
the size of the output [out_height,out_width]
Returns: float
Tensor of size [num_batch*num_transforms,out_height,out_width,num_channels]
"""
with tf.variable_scope(name):
num_batch, num_transforms = map(int, thetas.get_shape().as_list()[:2])
indices = [[i]*num_transforms for i in xrange(num_batch)]
input_repeated = tf.gather(U, tf.reshape(indices, [-1]))
return transformer(input_repeated, thetas, out_size)
| |
#!/usr/bin/python2.4
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mypgrep [ --user USER ] [ --password PASSWORD ] \\
[ criteria ] host1 host2 host3 ...
A tool to view the sessions across multiple MySQL servers.
"""
__author__ = "chip@google.com (Chip Turner)"
import fcntl
import MySQLdb
import os
import Queue
import re
import signal
import socket
import struct
import sys
import termios
import threading
import time
from gmt import thread_pool
from gmt import config_helper
config_helper.Init() # here instead of __main__ because we need the
# defaults for our string defines
# comaptibility layer with a google module
from gmt import compat_flags as flags
FLAGS = flags.FLAGS
flags.DEFINE_string("user", config_helper.GetGlobal("user"),
"User to connect to the databases as",
short_name="u")
flags.DEFINE_string("password", config_helper.GetGlobal("password") or "",
"Password to use when connecting as user",
short_name="p")
flags.DEFINE_boolean("show_full_query", 0,
"Show the full query when displaying sessions")
flags.DEFINE_boolean("show_summary", 0,
"Show a summarization of all matching jobs")
flags.DEFINE_boolean("dns_lookup", 0,
"Perform a DNS reverse lookup on IP addresses")
flags.DEFINE_boolean("hide_source", 0,
"Hide the database name in the list of results")
flags.DEFINE_boolean("idle", 0, "Show only idle connections")
flags.DEFINE_boolean("busy", 0, "Show only non-idle connections")
flags.DEFINE_boolean("connectionless", 0, "Show only connection-less sessions")
flags.DEFINE_integer("time", 0, "Show sessions whose current activity "
"is at least this old (in seconds)")
flags.DEFINE_string("source_user", "", "Show only sessions for this user")
flags.DEFINE_string("source_host", "", "Show only sessions from this host")
flags.DEFINE_string("query_contains", "",
"Show only sessions whose query contains this string")
flags.DEFINE_string("query_matches", "",
"Show only sessions whose query matches this regex")
flags.DEFINE_string("query_state", "",
"Show only sessions whose query state contains this "
"(such as 'Sending data' or 'Locked'")
flags.DEFINE_boolean("mine", 0,
"Show queries running from this host")
flags.DEFINE_boolean("include_replication", 0,
"Include replication threads when matching queries")
flags.DEFINE_boolean("show_state", 0,
"Prepend thread state to every query")
flags.DEFINE_boolean("kill", 0,
"Kill matching queries (BE CAREFUL!)")
flags.DEFINE_boolean("kill_without_confirm", False,
"Go ahead and kill without asking for confirmation")
flags.DEFINE_integer("num_worker_threads", 32,
"number of concurrent worker threads")
class SessionEntry:
"""A simple class to represent a result from MySQL's 'show full
processlist'.
Little more than a data structure that marginally cleans up data.
"""
def __init__(self, dbhost, dbh, pid, user, host, db, command, time,
state, info, raw_host):
"""Constructor.
Args:
dbhost: hostname of the database the session is running on
dbh: database handle connected to this database
pid: id of the running session
user: user the session is running as
host: host the session is running from
db: database the session is attached to
command: current MySQL command (aka state) such as Sleep, Query, etc
time: time the session has been running the current command
state: special message such as replication state
info: depends on command; usually the running query
raw_host: raw ip:port the session is running from
Returns: n/a
"""
# helper function to cleanup strings
def cleanup_string(s):
if s is None:
return "(none)"
else:
return re.sub(r"\s+", " ", s.strip())
# host is either host:port or None or empty string, both
# signifying internal DB hosts
if host:
if host == "localhost":
port = ""
else:
host, port = host.split(":")
else:
host = "(local)"
port = ""
self.dbhost = dbhost
self.dbh = dbh
self.pid = pid
self.user = user
self.db = db
self.command = cleanup_string(command)
self.host = host
self.raw_host = raw_host
self.port = port
self.time = int(time or 0)
self.state = cleanup_string(state)
if state:
if info:
if FLAGS.show_state:
info = "[%s] %s" % (state, info)
else:
info = "[%s]" % state
self.info = cleanup_string(info)
def kill(self):
cur = self.dbh.cursor()
# sometimes a connection is present when we get the list, but
# disconnects before we can kill it. we continue on, noting an
# error, if we encounter such an instance.
try:
cur.execute("KILL /* mypgrep */ %s" % self.pid)
except MySQLdb.OperationalError:
print ("Unable to kill session %d on %s, continuing..." %
(self.pid, self.dbhost))
def display(self, host_width, line_width, hide_source, source_width):
"""A method to display a session in a pleasing, columnar way.
Args:
host_width: width to reserve for hostnames in display
line_width: maximum length a line is allowed to be
hide_source: whether to hide the database the session is on
source_width: width to reserve for the source name
Returns:
string representing the session
"""
if hide_source:
buf = ""
else:
buf = "%*s" % (-source_width - 1, self.dbhost)
buf += "%-9d %*s %-15s %-8d %-12s " % (self.pid, -host_width, self.host,
self.user, self.time, self.command)
# if given a width, truncate the info field (aka running query) to
# fit, if possible
if line_width > 0:
substring_len = line_width - len(buf)
if substring_len < 0:
substring_len = 0
return buf + self.info[:substring_len]
else:
return buf + self.info
class SessionFilter:
"""A class to represent a filter applied to MySQL sessions.
Little more than a data structure and a function to compare a
session to the filter and decide if it is filtered out or not.
"""
def __init__(self, idle_only, busy_only, minimum_time,
query_contains, query_matches, query_state,
source_user, source_host,
present_conns):
"""Constructor.
Args:
idle_only: only show idle sessions
busy_only: only show non-idle sessions
minimum_time: lower limit of a session's time
query_contains: substring query contains
query_matches: regex the query would match
query_state: substring query state contains
source_user: user running the query
source_host: host query is coming from
present_conns: dict of host->client of connections to db
Returns: n/a
"""
self.idle_only = idle_only
self.busy_only = busy_only
self.minimum_time = minimum_time
self.query_contains = query_contains
self.query_matches = query_matches
self.query_state = query_state
self.source_user = source_user
self.source_host = source_host
self.present_conns = present_conns
def match(self, session):
"""Takes a session and checks it against the current filter.
Args:
session: SessionEntry object to examine
Returns:
whether the session is allowed by the filter or not
"""
if FLAGS.include_replication:
cmd_list = ["Query", "Connect"]
else:
cmd_list = ["Query"]
if self.present_conns:
# present_conns means, show only clients who are NOT in
# present_conns. however, if raw_host is None, then it is a
# local thread (such as replication) which otherwise would seem
# connectionless. So it does not match this filter.
if not session.raw_host:
return 0
# there is an inherent race condition between the time we do the
# netstat and the time we do the "show processlist". 15 seconds
# should be more than sufficient.
if session.time < 15:
return 0
# do we have any host data for this dbhost? if not, something
# went wrong with the ssh and we should abort letting the user
# know something is amiss. prevents accidental kills.
if (session.dbhost not in self.present_conns or
not self.present_conns[session.dbhost]):
print "No connection data for %s - please confirm 'ssh %s' works!" % \
(session.dbhost, session.dbhost)
sys.exit(1)
if session.raw_host in self.present_conns[session.dbhost]:
return 0
if self.idle_only:
if session.command != "Sleep":
return 0
if self.busy_only:
if session.command not in ("Query", "Killed"):
return 0
if self.minimum_time:
if session.time < self.minimum_time:
return 0
if self.query_contains:
if session.command not in cmd_list:
return 0
if session.info.lower().find(self.query_contains.lower()) == -1:
return 0
if self.query_matches:
if session.command not in cmd_list:
return 0
if not re.search(self.query_matches, session.info):
return 0
if self.query_state:
if session.state.lower().find(self.query_state.lower()) == -1:
return 0
if self.source_user:
if self.source_user != session.user:
return 0
if self.source_host:
if session.host != self.source_host:
return 0
return 1
def SessionListWorker(dbhost, host_results, host_results_lock):
"""Thread handler that fetches a session list and stores the results.
Args:
dbhost: host to query
host_results: dict to store results in
host_results_lock: lock for the host_results dict
Returns:
(nothing)
"""
try:
dbh = MySQLdb.connect(host=dbhost, user=FLAGS.user, passwd=FLAGS.password,
connect_timeout=30)
except MySQLdb.OperationalError:
# skip this unreachable host
sys.stderr.write('Unable to connect to %s, skipping.\n' % dbhost)
return
cursor = dbh.cursor()
cmd = "SHOW /* mygrep */ FULL PROCESSLIST"
cursor.execute(cmd)
sessions = []
for row in cursor.fetchall():
# skip our boring selves
if row[7] == cmd:
continue
# force it into a list so we can change row[2] aka the host
row = list(row)
# save a copy at the end of row in case we do dns resolution
row.append(row[2])
# do a little cleanup and beautification of the host
if FLAGS.dns_lookup:
if row[2] and row[2].find(":") != -1:
host, port = row[2].split(":")
host = socket.gethostbyaddr(host)[0].split(".")[0]
row[2] = "%s:%s" % (host, port)
sessions.append(SessionEntry(dbhost, dbh, *row))
# update the dict
try:
host_results_lock.acquire()
host_results[dbhost] = sessions
finally:
host_results_lock.release()
def ShowSummary(users, hosts, commands, busy_time):
"""Display summary statistics about a group of sessions.
Args:
users: dict of (username -> count)
hosts: dict of (hostname -> count)
commands: dict of (command -> count)
busy_time: seconds of time amongst all active sessions
Returns:
(none)
"""
def value_sort(d):
k = d.keys()
k.sort(lambda a, b: cmp(d[b], d[a]))
return k
sorted_users = value_sort(users)
sorted_hosts = value_sort(hosts)
sorted_commands = value_sort(commands)
def print_dict_summary(label, key_order, d):
print "%s:" % label
if key_order:
max_width = max([ len(s) for s in key_order ])
for key in key_order:
print " %*s: %s" % (-max_width - 1, key, d[key])
print
print_dict_summary("Hosts", sorted_hosts, hosts)
print_dict_summary("Users", sorted_users, users)
print_dict_summary("Commands", sorted_commands, commands)
print "Busy time: %d" % busy_time
# turn DEADBEEF:C0DE (ip:port in hex) into ip:port in traditional
# dotted, base 10 notation.
def _decode_raw_addr(raw_addr):
addr, port = raw_addr.split(":")
port = int(port, 16)
octets = []
for i in range(0, len(addr), 2):
octets.insert(0, str(int(addr[i:i+2], 16)))
return "%s:%s" % (".".join(octets), port)
# our main method
def main(argv):
if len(argv) == 1:
flags.ShowUsage()
return 1
# parallelize to perform the sessionlists
host_results = {}
host_results_lock = threading.Lock()
# expand db aliases to the entire set
dbhosts = []
for host in argv[1:]:
aliases = config_helper.ExpandDatabaseAliases(host)
dbhosts.extend(aliases)
# check each address, resolving it to see if we are seeing any DNS
# round robin involved.
resolved_dbhosts = []
for host in dbhosts:
addresses = socket.getaddrinfo(host, None,
socket.AF_INET, socket.SOCK_STREAM)
if len(addresses) > 1:
for address in addresses:
resolved_dbhosts.append(address[4][0])
else:
resolved_dbhosts.append(host)
dbhosts = resolved_dbhosts
db_connections = {}
if FLAGS.connectionless:
db_fhs = {}
# we are going to do a poor man's version of parallelism here.
# fire off all of the popens before trying to read from any of
# them. the result is ssh will block when it prints the first
# line of output, which is fine; at that point we can read them
# quickly.
for host in dbhosts:
db_connections[host] = {}
# we can't use googlesh because sometimes lines from one ssh mix
# with another. we can't use netstat because, on occasion, it
# truncates output with a warning about bogus output. so, we
# use plain ssh to get to the proc file netstat uses, then
# decode the hex addresses and ports.
fh = os.popen("nice ssh -o BatchMode=yes -o StrictHostKeyChecking=no "
"-x -n "
"%s cat /proc/net/tcp 2> /dev/null" % host)
db_fhs[host] = fh
for host, fh in db_fhs.items():
for line in fh.readlines():
line = line.strip()
if line.find("local_address") != -1:
continue
split_line = line.split()
raw_local_addr, raw_remote_addr, raw_state = \
split_line[1], split_line[2], split_line[3]
remote_addr = _decode_raw_addr(raw_remote_addr)
# two hex digits represent numerical values for ESTABLISHED, TIME_WAIT,
# etc. 01 == ESTABLISHED. all else is considered not to be a
# valid connection.
if raw_state == '01':
db_connections[host][remote_addr] = 1
# start a thread pool of at most num_worker_threads
tp = thread_pool.ThreadPool(min(FLAGS.num_worker_threads, len(dbhosts)))
tp.Start()
ops = []
for host in dbhosts:
ops.append(tp.Submit(SessionListWorker,
args=(host, host_results, host_results_lock)))
for op in ops:
op.Wait()
source_host = FLAGS.source_host
# If --mine was used, set the source host to the ip address
# associated with what uname says is our hostname.
if FLAGS.mine:
if source_host:
print "--mine and --source_host are mutually exclusive"
return 1
source_host = socket.gethostbyname(os.uname()[1])
# unless requested not to, guess at the terminal width and use that
# value for displaying each line
if FLAGS.show_full_query:
line_width = -1
else:
# try a couple of ugly hacks...
h, w = struct.unpack("hh", fcntl.ioctl(0, termios.TIOCGWINSZ, "xxxx"))
line_width = w or int(os.getenv("COLUMNS", "80"))
session_filter = SessionFilter(FLAGS.idle, FLAGS.busy, FLAGS.time,
FLAGS.query_contains, FLAGS.query_matches,
FLAGS.query_state,
FLAGS.source_user, source_host,
db_connections)
if not host_results:
print "No results found; unable to connect?"
sys.exit(1)
# compute some widths for beautifying the display
max_source_host_width = max([ len(s) for s in host_results ])
max_host_width = 0
for result in host_results.values():
for session in result:
max_host_width = max(max_host_width, len(session.host))
# aggregate data used for summary display
users = {}
hosts = {}
commands = {}
busy_time = 0
# process each host in a tolerable order
host_order = host_results.keys()
host_order.sort()
matched_sessions = []
for host in host_order:
sessions = host_results[host]
for session in sessions:
if session_filter.match(session):
users[session.user] = users.get(session.user, 0) + 1
hosts[session.host] = hosts.get(session.host, 0) + 1
commands[session.command] = commands.get(session.command, 0) + 1
if session.command == "Query":
busy_time += session.time
if not FLAGS.show_summary:
print session.display(max_host_width, line_width,
FLAGS.hide_source, max_source_host_width)
if FLAGS.kill and session.user != 'system user':
matched_sessions.append(session)
killed = 0
if FLAGS.kill:
if not FLAGS.kill_without_confirm and len(matched_sessions) > 0:
print "Kill the above queries? (y/n)",
answer = raw_input()
if answer.upper() not in ('Y', 'YES'):
print "Aborting!"
sys.exit(1)
for session in matched_sessions:
killed += 1
session.kill()
if FLAGS.show_summary:
ShowSummary(users, hosts, commands, busy_time)
return 0
if FLAGS.kill:
if killed == 1:
plural = ''
else:
plural = 's'
print
msg = ""
if "system user" in users:
msg = " ('system user' sessions not killed)"
print "%d session%s killed%s" % (killed, plural, msg)
if __name__ == "__main__":
new_argv = flags.ParseArgs(sys.argv[1:])
main([sys.argv[0]] + new_argv)
| |
import unittest
import datetime
import decimal
import Spartacus.Database
class TestPostgreSQL96(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.v_database = Spartacus.Database.PostgreSQL(
"127.0.0.1", 5496, "spartacus", "spartacus", "spartacus"
)
cls.v_database.Open()
cls.v_database.Execute(
"""
DROP TABLE IF EXISTS departments;
DROP TABLE IF EXISTS employees;
"""
)
cls.v_database.Execute(
"""
CREATE TABLE departments (
dept_no char(4) not null,
dept_name varchar(40) not null
);
"""
)
cls.v_database.Execute(
"""
INSERT INTO departments VALUES('d009','Customer Service');
INSERT INTO departments VALUES('d005','Development');
INSERT INTO departments VALUES('d002','Finance');
INSERT INTO departments VALUES('d003','Human Resources');
INSERT INTO departments VALUES('d001','Marketing');
INSERT INTO departments VALUES('d004','Production');
INSERT INTO departments VALUES('d006','Quality Management');
INSERT INTO departments VALUES('d008','Research');
INSERT INTO departments VALUES('d007','Sales');
"""
)
cls.v_database.Execute(
"""
CREATE TABLE employees (
emp_no integer not null,
birth_date text not null,
first_name varchar(14) not null,
last_name varchar(16) not null,
gender varchar(500) not null,
hire_date text not null
);
"""
)
cls.v_database.Close()
@classmethod
def tearDownClass(cls):
cls.v_database.Open()
cls.v_database.Execute(
"""
DROP TABLE IF EXISTS departments;
DROP TABLE IF EXISTS employees;
"""
)
cls.v_database.Close()
def test_open_close(self):
self.assertIsInstance(self.v_database, Spartacus.Database.PostgreSQL)
self.v_database.Open()
self.assertIsNot(self.v_database.v_con, None)
self.v_database.Close()
self.assertIs(self.v_database.v_con, None)
def test_getconstatus(self):
self.assertEqual(self.v_database.GetConStatus(), 0)
self.v_database.Open()
self.assertEqual(self.v_database.GetConStatus(), 1)
self.v_database.Close()
self.assertEqual(self.v_database.GetConStatus(), 0)
def test_open_autocommit_enabled(self):
self.v_database.Open(p_autocommit=True)
self.assertIsNot(self.v_database.v_con, None)
self.assertTrue(self.v_database.v_con.autocommit)
self.v_database.Close()
def test_open_autocommit_disabled(self):
self.v_database.Open(p_autocommit=False)
self.assertIsNot(self.v_database.v_con, None)
self.assertFalse(self.v_database.v_con.autocommit)
self.v_database.Close()
def test_executescalar(self):
v_result = self.v_database.ExecuteScalar(
"select dept_name from departments where dept_no = 'd005'"
)
self.assertEqual(v_result, "Development")
def test_execute(self):
self.v_database.Open()
self.v_database.Execute(
"insert into departments (dept_no, dept_name) values ('d000', 'Spartacus')"
)
v_result = self.v_database.ExecuteScalar(
"select dept_name from departments where dept_no = 'd000'"
)
self.v_database.Execute("delete from departments where dept_no = 'd000'")
self.v_database.Close()
self.assertEqual(v_result, "Spartacus")
def test_commit(self):
self.v_database.Open(p_autocommit=False)
self.v_database.Execute(
"insert into departments (dept_no, dept_name) values ('d000', 'Spartacus')"
)
self.v_database.Commit()
self.v_database.Close()
self.v_database.Open()
v_result = self.v_database.ExecuteScalar(
"select dept_name from departments where dept_no = 'd000'"
)
self.v_database.Execute("delete from departments where dept_no = 'd000'")
self.v_database.Close()
self.assertEqual(v_result, "Spartacus")
def test_rollback(self):
self.v_database.Open(p_autocommit=False)
self.v_database.Execute(
"insert into departments (dept_no, dept_name) values ('d000', 'Spartacus')"
)
self.v_database.Rollback()
self.v_database.Close()
v_result = self.v_database.ExecuteScalar(
"select dept_name from departments where dept_no = 'd000'"
)
self.assertIs(v_result, None)
def test_close_commit(self):
self.v_database.Open(p_autocommit=False)
self.v_database.Execute(
"insert into departments (dept_no, dept_name) values ('d000', 'Spartacus')"
)
self.v_database.Close(p_commit=True)
self.v_database.Open()
v_result = self.v_database.ExecuteScalar(
"select dept_name from departments where dept_no = 'd000'"
)
self.v_database.Execute("delete from departments where dept_no = 'd000'")
self.v_database.Close()
self.assertEqual(v_result, "Spartacus")
def test_close_rollback(self):
self.v_database.Open(p_autocommit=False)
self.v_database.Execute(
"insert into departments (dept_no, dept_name) values ('d000', 'Spartacus')"
)
self.v_database.Close(p_commit=False)
v_result = self.v_database.ExecuteScalar(
"select dept_name from departments where dept_no = 'd000'"
)
self.assertIs(v_result, None)
def test_getfields(self):
v_result = self.v_database.GetFields(
"""
SELECT 1 AS id,
'Spartacus'::text AS name,
'1988-05-08 17:00:00'::timestamp without time zone AS birth_date,
9.8 AS grade
"""
)
self.assertEqual(len(v_result), 4)
for r in v_result:
self.assertIsInstance(r, Spartacus.Database.DataField)
self.assertEqual(v_result[0].v_name, "id")
self.assertIs(v_result[0].v_type, int)
self.assertEqual(v_result[0].v_dbtype, "int4")
self.assertEqual(v_result[1].v_name, "name")
self.assertIs(v_result[1].v_type, str)
self.assertEqual(v_result[1].v_dbtype, "text")
self.assertEqual(v_result[2].v_name, "birth_date")
self.assertIs(v_result[2].v_type, datetime.datetime)
self.assertEqual(v_result[2].v_dbtype, "timestamp")
self.assertEqual(v_result[3].v_name, "grade")
self.assertIs(v_result[3].v_type, decimal.Decimal)
self.assertEqual(v_result[3].v_dbtype, "numeric")
def test_query(self):
v_result = self.v_database.Query("select * from departments order by dept_no")
self.assertIsInstance(v_result, Spartacus.Database.DataTable)
v_template = ["dept_no", "dept_name"]
self.assertListEqual(v_result.Columns, v_template)
self.assertEqual(len(v_result.Rows), 9)
self.assertEqual(v_result.Rows[0]["dept_no"], "d001")
self.assertEqual(v_result.Rows[0]["dept_name"], "Marketing")
self.assertEqual(v_result.Rows[1]["dept_no"], "d002")
self.assertEqual(v_result.Rows[1]["dept_name"], "Finance")
self.assertEqual(v_result.Rows[2]["dept_no"], "d003")
self.assertEqual(v_result.Rows[2]["dept_name"], "Human Resources")
self.assertEqual(v_result.Rows[3]["dept_no"], "d004")
self.assertEqual(v_result.Rows[3]["dept_name"], "Production")
self.assertEqual(v_result.Rows[4]["dept_no"], "d005")
self.assertEqual(v_result.Rows[4]["dept_name"], "Development")
self.assertEqual(v_result.Rows[5]["dept_no"], "d006")
self.assertEqual(v_result.Rows[5]["dept_name"], "Quality Management")
self.assertEqual(v_result.Rows[6]["dept_no"], "d007")
self.assertEqual(v_result.Rows[6]["dept_name"], "Sales")
self.assertEqual(v_result.Rows[7]["dept_no"], "d008")
self.assertEqual(v_result.Rows[7]["dept_name"], "Research")
self.assertEqual(v_result.Rows[8]["dept_no"], "d009")
self.assertEqual(v_result.Rows[8]["dept_name"], "Customer Service")
def test_query_simple(self):
v_result = self.v_database.Query(
"select * from departments order by dept_no", p_simple=True
)
self.assertIsInstance(v_result, Spartacus.Database.DataTable)
v_template = ["dept_no", "dept_name"]
self.assertListEqual(v_result.Columns, v_template)
self.assertEqual(len(v_result.Rows), 9)
self.assertEqual(v_result.Rows[0][0], "d001")
self.assertEqual(v_result.Rows[0][1], "Marketing")
self.assertEqual(v_result.Rows[1][0], "d002")
self.assertEqual(v_result.Rows[1][1], "Finance")
self.assertEqual(v_result.Rows[2][0], "d003")
self.assertEqual(v_result.Rows[2][1], "Human Resources")
self.assertEqual(v_result.Rows[3][0], "d004")
self.assertEqual(v_result.Rows[3][1], "Production")
self.assertEqual(v_result.Rows[4][0], "d005")
self.assertEqual(v_result.Rows[4][1], "Development")
self.assertEqual(v_result.Rows[5][0], "d006")
self.assertEqual(v_result.Rows[5][1], "Quality Management")
self.assertEqual(v_result.Rows[6][0], "d007")
self.assertEqual(v_result.Rows[6][1], "Sales")
self.assertEqual(v_result.Rows[7][0], "d008")
self.assertEqual(v_result.Rows[7][1], "Research")
self.assertEqual(v_result.Rows[8][0], "d009")
self.assertEqual(v_result.Rows[8][1], "Customer Service")
def test_query_types(self):
v_result = self.v_database.Query(
"""
SELECT 1 AS id,
'Spartacus'::text AS name,
'1988-05-08 17:00:00'::timestamp without time zone AS birth_date,
9.8 AS grade
"""
)
self.assertIsInstance(v_result, Spartacus.Database.DataTable)
v_template = ["id", "name", "birth_date", "grade"]
self.assertListEqual(v_result.Columns, v_template)
self.assertEqual(len(v_result.Rows), 1)
self.assertEqual(v_result.Rows[0]["id"], 1)
self.assertIsInstance(v_result.Rows[0]["id"], int)
self.assertEqual(v_result.Rows[0]["name"], "Spartacus")
self.assertIsInstance(v_result.Rows[0]["name"], str)
self.assertEqual(
v_result.Rows[0]["birth_date"],
datetime.datetime.strptime("1988-05-08 17:00:00", "%Y-%m-%d %H:%M:%S"),
)
self.assertIsInstance(v_result.Rows[0]["birth_date"], datetime.datetime)
self.assertEqual(float(v_result.Rows[0]["grade"]), 9.8)
self.assertIsInstance(v_result.Rows[0]["grade"], decimal.Decimal)
def test_query_alltypesstr(self):
v_result = self.v_database.Query(
"""
SELECT 1 AS id,
'Spartacus'::text AS name,
'1988-05-08 17:00:00'::timestamp without time zone AS birth_date,
9.8 AS grade
""",
p_alltypesstr=True,
)
self.assertIsInstance(v_result, Spartacus.Database.DataTable)
v_template = ["id", "name", "birth_date", "grade"]
self.assertListEqual(v_result.Columns, v_template)
self.assertEqual(len(v_result.Rows), 1)
self.assertEqual(v_result.Rows[0]["id"], "1")
self.assertIsInstance(v_result.Rows[0]["id"], str)
self.assertEqual(v_result.Rows[0]["name"], "Spartacus")
self.assertIsInstance(v_result.Rows[0]["name"], str)
self.assertEqual(v_result.Rows[0]["birth_date"], "1988-05-08 17:00:00")
self.assertIsInstance(v_result.Rows[0]["birth_date"], str)
self.assertEqual(v_result.Rows[0]["grade"], "9.8")
self.assertIsInstance(v_result.Rows[0]["grade"], str)
def test_queryblock_connection_not_open(self):
with self.assertRaises(Spartacus.Database.Exception):
v_result = self.v_database.QueryBlock(
"select * from departments order by dept_no", 4
)
def test_queryblock(self):
self.v_database.Open()
self.assertTrue(self.v_database.v_start)
v_result = self.v_database.QueryBlock(
"select * from departments order by dept_no", 4
)
self.assertFalse(self.v_database.v_start)
self.assertEqual(len(v_result.Rows), 4)
v_result = self.v_database.QueryBlock(
"select * from departments order by dept_no", 4
)
self.assertFalse(self.v_database.v_start)
self.assertEqual(len(v_result.Rows), 4)
v_result = self.v_database.QueryBlock(
"select * from departments order by dept_no", 4
)
self.assertTrue(self.v_database.v_start)
self.assertEqual(len(v_result.Rows), 1)
self.v_database.Close()
self.assertTrue(self.v_database.v_start)
def test_insertblock(self):
v_table = Spartacus.Database.DataTable()
v_table.AddColumn("dept_no")
v_table.AddColumn("dept_name")
v_table.AddRow(["d010", "Spartacus"])
v_table.AddRow(["d011", "Python"])
self.v_database.InsertBlock(v_table, "departments")
v_result = self.v_database.Query(
"select * from departments where dept_no in ('d010', 'd011')"
)
self.assertEqual(len(v_result.Rows), 2)
self.assertEqual(v_result.Rows[0]["dept_no"], "d010")
self.assertEqual(v_result.Rows[0]["dept_name"], "Spartacus")
self.assertEqual(v_result.Rows[1]["dept_no"], "d011")
self.assertEqual(v_result.Rows[1]["dept_name"], "Python")
self.v_database.Execute(
"delete from departments where dept_no in ('d010', 'd011')"
)
def test_insertblock_fields(self):
v_fields = self.v_database.GetFields("select * from employees limit 1")
v_table = Spartacus.Database.DataTable()
for f in v_fields:
v_table.AddColumn(f.v_name)
v_table.AddRow([500000, "1988-05-08", "Spartacus", "Python", "M", "2006-01-01"])
v_table.AddRow([500001, "1988-05-08", "Spartacus", "Python", "M", "2006-01-01"])
self.v_database.InsertBlock(v_table, "employees", v_fields)
v_result = self.v_database.Query(
"select * from employees where emp_no in (500000, 500001)"
)
self.assertEqual(len(v_result.Rows), 2)
self.assertEqual(v_result.Rows[0]["emp_no"], 500000)
self.assertEqual(v_result.Rows[0]["first_name"], "Spartacus")
self.assertEqual(v_result.Rows[1]["emp_no"], 500001)
self.assertEqual(v_result.Rows[1]["first_name"], "Spartacus")
self.v_database.Execute(
"delete from employees where emp_no in (500000, 500001)"
)
if __name__ == "__main__":
unittest.main()
| |
#!/usr/bin/env python
# coding=utf-8
###############################################################################
import os
import sys
import inspect
import traceback
import optparse
import logging
import configobj
try:
# python 2.6
import unittest2 as unittest
except ImportError:
import unittest
try:
import cPickle as pickle
except ImportError:
import pickle as pickle
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
from setproctitle import setproctitle
except ImportError:
setproctitle = None
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
'src')))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
'src', 'collectors')))
def run_only(func, predicate):
if predicate():
return func
else:
def f(arg):
pass
return f
def get_collector_config(key, value):
config = configobj.ConfigObj()
config['server'] = {}
config['server']['collectors_config_path'] = ''
config['collectors'] = {}
config['collectors']['default'] = {}
config['collectors']['default']['hostname_method'] = "uname_short"
config['collectors'][key] = value
return config
class CollectorTestCase(unittest.TestCase):
def setDocExample(self, collector, metrics, defaultpath=None):
if not len(metrics):
return False
filePath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'docs', 'collectors', collector + '.md')
if not os.path.exists(filePath):
return False
if not os.access(filePath, os.W_OK):
return False
if not os.access(filePath, os.R_OK):
return False
try:
with open(filePath, 'Ur') as fp:
content = fp.readlines()
with open(filePath, 'w') as fp:
for line in content:
if line.strip() == '__EXAMPLESHERE__':
for metric in sorted(metrics.iterkeys()):
metricPath = 'servers.hostname.'
if defaultpath:
metricPath += defaultpath + '.'
metricPath += metric
metricPath = metricPath.replace('..', '.')
fp.write('%s %s\n' % (metricPath, metrics[metric]))
else:
fp.write(line)
except IOError:
return False
return True
def getFixtureDirPath(self):
path = os.path.join(
os.path.dirname(inspect.getfile(self.__class__)),
'fixtures')
return path
def getFixturePath(self, fixture_name):
path = os.path.join(self.getFixtureDirPath(),
fixture_name)
if not os.access(path, os.R_OK):
print "Missing Fixture " + path
return path
def getFixture(self, fixture_name):
with open(self.getFixturePath(fixture_name), 'r') as f:
return StringIO(f.read())
def getFixtures(self):
fixtures = []
for root, dirnames, filenames in os.walk(self.getFixtureDirPath()):
fixtures.append(os.path.join(root, dirnames, filenames))
return fixtures
def getPickledResults(self, results_name):
with open(self.getFixturePath(results_name), 'r') as f:
return pickle.load(f)
def setPickledResults(self, results_name, data):
with open(self.getFixturePath(results_name), 'w+b') as f:
pickle.dump(data, f)
def assertUnpublished(self, mock, key, value, expected_value=0):
return self.assertPublished(mock, key, value, expected_value)
def assertPublished(self, mock, key, value, expected_value=1):
if type(mock) is list:
for m in mock:
calls = (filter(lambda x: x[0][0] == key, m.call_args_list))
if len(calls) > 0:
break
else:
calls = filter(lambda x: x[0][0] == key, mock.call_args_list)
actual_value = len(calls)
message = '%s: actual number of calls %d, expected %d' % (
key, actual_value, expected_value)
self.assertEqual(actual_value, expected_value, message)
if expected_value:
actual_value = calls[0][0][1]
expected_value = value
precision = 0
if isinstance(value, tuple):
expected_value, precision = expected_value
message = '%s: actual %r, expected %r' % (key,
actual_value,
expected_value)
if precision is not None:
self.assertAlmostEqual(float(actual_value),
float(expected_value),
places=precision,
msg=message)
else:
self.assertEqual(actual_value, expected_value, message)
def assertUnpublishedMany(self, mock, dict, expected_value=0):
return self.assertPublishedMany(mock, dict, expected_value)
def assertPublishedMany(self, mock, dict, expected_value=1):
for key, value in dict.iteritems():
self.assertPublished(mock, key, value, expected_value)
if type(mock) is list:
for m in mock:
m.reset_mock()
else:
mock.reset_mock()
def assertUnpublishedMetric(self, mock, key, value, expected_value=0):
return self.assertPublishedMetric(mock, key, value, expected_value)
def assertPublishedMetric(self, mock, key, value, expected_value=1):
calls = filter(lambda x: x[0][0].path.find(key) != -1,
mock.call_args_list)
actual_value = len(calls)
message = '%s: actual number of calls %d, expected %d' % (
key, actual_value, expected_value)
self.assertEqual(actual_value, expected_value, message)
if expected_value:
actual_value = calls[0][0][0].value
expected_value = value
precision = 0
if isinstance(value, tuple):
expected_value, precision = expected_value
message = '%s: actual %r, expected %r' % (key,
actual_value,
expected_value)
if precision is not None:
self.assertAlmostEqual(float(actual_value),
float(expected_value),
places=precision,
msg=message)
else:
self.assertEqual(actual_value, expected_value, message)
def assertUnpublishedMetricMany(self, mock, dict, expected_value=0):
return self.assertPublishedMetricMany(mock, dict, expected_value)
def assertPublishedMetricMany(self, mock, dict, expected_value=1):
for key, value in dict.iteritems():
self.assertPublishedMetric(mock, key, value, expected_value)
mock.reset_mock()
collectorTests = {}
def getCollectorTests(path):
for f in os.listdir(path):
cPath = os.path.abspath(os.path.join(path, f))
if ((os.path.isfile(cPath) and
len(f) > 3 and
f[-3:] == '.py' and
f[0:4] == 'test')):
sys.path.append(os.path.dirname(cPath))
sys.path.append(os.path.dirname(os.path.dirname(cPath)))
modname = f[:-3]
try:
# Import the module
collectorTests[modname] = __import__(modname,
globals(),
locals(),
['*'])
except Exception:
print "Failed to import module: %s. %s" % (
modname, traceback.format_exc())
continue
for f in os.listdir(path):
cPath = os.path.abspath(os.path.join(path, f))
if os.path.isdir(cPath):
getCollectorTests(cPath)
###############################################################################
if __name__ == "__main__":
if setproctitle:
setproctitle('test.py')
# Disable log output for the unit tests
log = logging.getLogger("diamond")
log.addHandler(logging.StreamHandler(sys.stderr))
log.disabled = True
# Initialize Options
parser = optparse.OptionParser()
parser.add_option("-c",
"--collector",
dest="collector",
default="",
help="Run a single collector's unit tests")
parser.add_option("-v",
"--verbose",
dest="verbose",
default=1,
action="count",
help="verbose")
# Parse Command Line Args
(options, args) = parser.parse_args()
cPath = os.path.abspath(os.path.join(os.path.dirname(__file__),
'src',
'collectors',
options.collector))
dPath = os.path.abspath(os.path.join(os.path.dirname(__file__),
'src',
'diamond'))
getCollectorTests(cPath)
if not options.collector:
# Only pull in diamond tests when a specific collector
# hasn't been specified
getCollectorTests(dPath)
loader = unittest.TestLoader()
tests = []
for test in collectorTests:
for name, c in inspect.getmembers(collectorTests[test],
inspect.isclass):
if not issubclass(c, unittest.TestCase):
continue
tests.append(loader.loadTestsFromTestCase(c))
suite = unittest.TestSuite(tests)
results = unittest.TextTestRunner(verbosity=options.verbose).run(suite)
results = str(results)
results = results.replace('>', '').split()[1:]
resobj = {}
for result in results:
result = result.split('=')
resobj[result[0]] = int(result[1])
if resobj['failures'] > 0:
sys.exit(1)
if resobj['errors'] > 0:
sys.exit(2)
sys.exit(0)
| |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils used to manipulate tensor shapes."""
from six.moves import zip
import tensorflow.compat.v1 as tf
from tensorflow_examples.lite.model_maker.third_party.efficientdet.visualize import static_shape
def _is_tensor(t):
"""Returns a boolean indicating whether the input is a tensor.
Args:
t: the input to be tested.
Returns:
a boolean that indicates whether t is a tensor.
"""
return isinstance(t, (tf.Tensor, tf.SparseTensor, tf.Variable))
def _set_dim_0(t, d0):
"""Sets the 0-th dimension of the input tensor.
Args:
t: the input tensor, assuming the rank is at least 1.
d0: an integer indicating the 0-th dimension of the input tensor.
Returns:
the tensor t with the 0-th dimension set.
"""
t_shape = t.get_shape().as_list()
t_shape[0] = d0
t.set_shape(t_shape)
return t
def pad_tensor(t, length):
"""Pads the input tensor with 0s along the first dimension up to the length.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after padding, assuming length <= t.shape[0].
Returns:
padded_t: the padded tensor, whose first dimension is length. If the length
is an integer, the first dimension of padded_t is set to length
statically.
"""
t_rank = tf.rank(t)
t_shape = tf.shape(t)
t_d0 = t_shape[0]
pad_d0 = tf.expand_dims(length - t_d0, 0)
pad_shape = tf.cond(
tf.greater(t_rank, 1), lambda: tf.concat([pad_d0, t_shape[1:]], 0),
lambda: tf.expand_dims(length - t_d0, 0))
padded_t = tf.concat([t, tf.zeros(pad_shape, dtype=t.dtype)], 0)
if not _is_tensor(length):
padded_t = _set_dim_0(padded_t, length)
return padded_t
def clip_tensor(t, length):
"""Clips the input tensor along the first dimension up to the length.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after clipping, assuming length <= t.shape[0].
Returns:
clipped_t: the clipped tensor, whose first dimension is length. If the
length is an integer, the first dimension of clipped_t is set to length
statically.
"""
clipped_t = tf.gather(t, tf.range(length))
if not _is_tensor(length):
clipped_t = _set_dim_0(clipped_t, length)
return clipped_t
def pad_or_clip_tensor(t, length):
"""Pad or clip the input tensor along the first dimension.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after processing.
Returns:
processed_t: the processed tensor, whose first dimension is length. If the
length is an integer, the first dimension of the processed tensor is set
to length statically.
"""
return pad_or_clip_nd(t, [length] + t.shape.as_list()[1:])
def pad_or_clip_nd(tensor, output_shape):
"""Pad or Clip given tensor to the output shape.
Args:
tensor: Input tensor to pad or clip.
output_shape: A list of integers / scalar tensors (or None for dynamic dim)
representing the size to pad or clip each dimension of the input tensor.
Returns:
Input tensor padded and clipped to the output shape.
"""
tensor_shape = tf.shape(tensor)
clip_size = [
tf.where(tensor_shape[i] - shape > 0, shape, -1)
if shape is not None else -1 for i, shape in enumerate(output_shape)
]
clipped_tensor = tf.slice(
tensor,
begin=tf.zeros(len(clip_size), dtype=tf.int32),
size=clip_size)
# Pad tensor if the shape of clipped tensor is smaller than the expected
# shape.
clipped_tensor_shape = tf.shape(clipped_tensor)
trailing_paddings = [
shape - clipped_tensor_shape[i] if shape is not None else 0
for i, shape in enumerate(output_shape)
]
paddings = tf.stack(
[
tf.zeros(len(trailing_paddings), dtype=tf.int32),
trailing_paddings
],
axis=1)
padded_tensor = tf.pad(clipped_tensor, paddings=paddings)
output_static_shape = [
dim if not isinstance(dim, tf.Tensor) else None for dim in output_shape
]
padded_tensor.set_shape(output_static_shape)
return padded_tensor
def combined_static_and_dynamic_shape(tensor):
"""Returns a list containing static and dynamic values for the dimensions.
Returns a list of static and dynamic values for shape dimensions. This is
useful to preserve static shapes when available in reshape operation.
Args:
tensor: A tensor of any type.
Returns:
A list of size tensor.shape.ndims containing integers or a scalar tensor.
"""
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(tensor)
combined_shape = []
for index, dim in enumerate(static_tensor_shape):
if dim is not None:
combined_shape.append(dim)
else:
combined_shape.append(dynamic_tensor_shape[index])
return combined_shape
def static_or_dynamic_map_fn(fn, elems, dtype=None,
parallel_iterations=32, back_prop=True):
"""Runs map_fn as a (static) for loop when possible.
This function rewrites the map_fn as an explicit unstack input -> for loop
over function calls -> stack result combination. This allows our graphs to
be acyclic when the batch size is static.
For comparison, see https://www.tensorflow.org/api_docs/python/tf/map_fn.
Note that `static_or_dynamic_map_fn` currently is not *fully* interchangeable
with the default tf.map_fn function as it does not accept nested inputs (only
Tensors or lists of Tensors). Likewise, the output of `fn` can only be a
Tensor or list of Tensors.
TODO(jonathanhuang): make this function fully interchangeable with tf.map_fn.
Args:
fn: The callable to be performed. It accepts one argument, which will have
the same structure as elems. Its output must have the
same structure as elems.
elems: A tensor or list of tensors, each of which will
be unpacked along their first dimension. The sequence of the
resulting slices will be applied to fn.
dtype: (optional) The output type(s) of fn. If fn returns a structure of
Tensors differing from the structure of elems, then dtype is not optional
and must have the same structure as the output of fn.
parallel_iterations: (optional) number of batch items to process in
parallel. This flag is only used if the native tf.map_fn is used
and defaults to 32 instead of 10 (unlike the standard tf.map_fn default).
back_prop: (optional) True enables support for back propagation.
This flag is only used if the native tf.map_fn is used.
Returns:
A tensor or sequence of tensors. Each tensor packs the
results of applying fn to tensors unpacked from elems along the first
dimension, from first to last.
Raises:
ValueError: if `elems` a Tensor or a list of Tensors.
ValueError: if `fn` does not return a Tensor or list of Tensors
"""
if isinstance(elems, list):
for elem in elems:
if not isinstance(elem, tf.Tensor):
raise ValueError('`elems` must be a Tensor or list of Tensors.')
elem_shapes = [elem.shape.as_list() for elem in elems]
# Fall back on tf.map_fn if shapes of each entry of `elems` are None or fail
# to all be the same size along the batch dimension.
for elem_shape in elem_shapes:
if (not elem_shape or not elem_shape[0]
or elem_shape[0] != elem_shapes[0][0]):
return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
arg_tuples = zip(*[tf.unstack(elem) for elem in elems])
outputs = [fn(arg_tuple) for arg_tuple in arg_tuples]
else:
if not isinstance(elems, tf.Tensor):
raise ValueError('`elems` must be a Tensor or list of Tensors.')
elems_shape = elems.shape.as_list()
if not elems_shape or not elems_shape[0]:
return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
outputs = [fn(arg) for arg in tf.unstack(elems)]
# Stack `outputs`, which is a list of Tensors or list of lists of Tensors
if all([isinstance(output, tf.Tensor) for output in outputs]):
return tf.stack(outputs)
else:
if all([isinstance(output, list) for output in outputs]):
if all([all(
[isinstance(entry, tf.Tensor) for entry in output_list])
for output_list in outputs]):
return [tf.stack(output_tuple) for output_tuple in zip(*outputs)]
raise ValueError('`fn` should return a Tensor or a list of Tensors.')
def check_min_image_dim(min_dim, image_tensor):
"""Checks that the image width/height are greater than some number.
This function is used to check that the width and height of an image are above
a certain value. If the image shape is static, this function will perform the
check at graph construction time. Otherwise, if the image shape varies, an
Assertion control dependency will be added to the graph.
Args:
min_dim: The minimum number of pixels along the width and height of the
image.
image_tensor: The image tensor to check size for.
Returns:
If `image_tensor` has dynamic size, return `image_tensor` with a Assert
control dependency. Otherwise returns image_tensor.
Raises:
ValueError: if `image_tensor`'s' width or height is smaller than `min_dim`.
"""
image_shape = image_tensor.get_shape()
image_height = static_shape.get_height(image_shape)
image_width = static_shape.get_width(image_shape)
if image_height is None or image_width is None:
shape_assert = tf.Assert(
tf.logical_and(tf.greater_equal(tf.shape(image_tensor)[1], min_dim),
tf.greater_equal(tf.shape(image_tensor)[2], min_dim)),
['image size must be >= {} in both height and width.'.format(min_dim)])
with tf.control_dependencies([shape_assert]):
return tf.identity(image_tensor)
if image_height < min_dim or image_width < min_dim:
raise ValueError(
'image size must be >= %d in both height and width; image dim = %d,%d' %
(min_dim, image_height, image_width))
return image_tensor
def assert_shape_equal(shape_a, shape_b):
"""Asserts that shape_a and shape_b are equal.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if (all(isinstance(dim, int) for dim in shape_a) and
all(isinstance(dim, int) for dim in shape_b)):
if shape_a != shape_b:
raise ValueError('Unequal shapes {}, {}'.format(shape_a, shape_b))
else: return tf.no_op()
else:
return tf.assert_equal(shape_a, shape_b)
def assert_shape_equal_along_first_dimension(shape_a, shape_b):
"""Asserts that shape_a and shape_b are the same along the 0th-dimension.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if isinstance(shape_a[0], int) and isinstance(shape_b[0], int):
if shape_a[0] != shape_b[0]:
raise ValueError('Unequal first dimension {}, {}'.format(
shape_a[0], shape_b[0]))
else: return tf.no_op()
else:
return tf.assert_equal(shape_a[0], shape_b[0])
def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1):
"""Asserts the input box tensor is normalized.
Args:
boxes: a tensor of shape [N, 4] where N is the number of boxes.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1.
Returns:
a tf.Assert op which fails when the input box tensor is not normalized.
Raises:
ValueError: When the input box tensor is not normalized.
"""
box_minimum = tf.reduce_min(boxes)
box_maximum = tf.reduce_max(boxes)
return tf.Assert(
tf.logical_and(
tf.less_equal(box_maximum, maximum_normalized_coordinate),
tf.greater_equal(box_minimum, 0)),
[boxes])
def flatten_dimensions(inputs, first, last):
"""Flattens `K-d` tensor along [first, last) dimensions.
Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
[D0, D1, ..., D(first) * D(first+1) * ... * D(last-1), D(last), ..., D(K-1)].
Example:
`inputs` is a tensor with initial shape [10, 5, 20, 20, 3].
new_tensor = flatten_dimensions(inputs, first=1, last=3)
new_tensor.shape -> [10, 100, 20, 3].
Args:
inputs: a tensor with shape [D0, D1, ..., D(K-1)].
first: first value for the range of dimensions to flatten.
last: last value for the range of dimensions to flatten. Note that the last
dimension itself is excluded.
Returns:
a tensor with shape
[D0, D1, ..., D(first) * D(first + 1) * ... * D(last - 1), D(last), ...,
D(K-1)].
Raises:
ValueError: if first and last arguments are incorrect.
"""
if first >= inputs.shape.ndims or last > inputs.shape.ndims:
raise ValueError('`first` and `last` must be less than inputs.shape.ndims. '
'found {} and {} respectively while ndims is {}'.format(
first, last, inputs.shape.ndims))
shape = combined_static_and_dynamic_shape(inputs)
flattened_dim_prod = tf.reduce_prod(shape[first:last],
keepdims=True)
new_shape = tf.concat([shape[:first], flattened_dim_prod,
shape[last:]], axis=0)
return tf.reshape(inputs, new_shape)
def flatten_first_n_dimensions(inputs, n):
"""Flattens `K-d` tensor along first n dimension to be a `(K-n+1)-d` tensor.
Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
[D0 * D1 * ... * D(n-1), D(n), ... D(K-1)].
Example:
`inputs` is a tensor with initial shape [10, 5, 20, 20, 3].
new_tensor = flatten_first_n_dimensions(inputs, 2)
new_tensor.shape -> [50, 20, 20, 3].
Args:
inputs: a tensor with shape [D0, D1, ..., D(K-1)].
n: The number of dimensions to flatten.
Returns:
a tensor with shape [D0 * D1 * ... * D(n-1), D(n), ... D(K-1)].
"""
return flatten_dimensions(inputs, first=0, last=n)
def expand_first_dimension(inputs, dims):
"""Expands `K-d` tensor along first dimension to be a `(K+n-1)-d` tensor.
Converts `inputs` with shape [D0, D1, ..., D(K-1)] into a tensor of shape
[dims[0], dims[1], ..., dims[-1], D1, ..., D(k-1)].
Example:
`inputs` is a tensor with shape [50, 20, 20, 3].
new_tensor = expand_first_dimension(inputs, [10, 5]).
new_tensor.shape -> [10, 5, 20, 20, 3].
Args:
inputs: a tensor with shape [D0, D1, ..., D(K-1)].
dims: List with new dimensions to expand first axis into. The length of
`dims` is typically 2 or larger.
Returns:
a tensor with shape [dims[0], dims[1], ..., dims[-1], D1, ..., D(k-1)].
"""
inputs_shape = combined_static_and_dynamic_shape(inputs)
expanded_shape = tf.stack(dims + inputs_shape[1:])
# Verify that it is possible to expand the first axis of inputs.
assert_op = tf.assert_equal(
inputs_shape[0], tf.reduce_prod(tf.stack(dims)),
message=('First dimension of `inputs` cannot be expanded into provided '
'`dims`'))
with tf.control_dependencies([assert_op]):
inputs_reshaped = tf.reshape(inputs, expanded_shape)
return inputs_reshaped
def resize_images_and_return_shapes(inputs, image_resizer_fn):
"""Resizes images using the given function and returns their true shapes.
Args:
inputs: a float32 Tensor representing a batch of inputs of shape
[batch_size, height, width, channels].
image_resizer_fn: a function which takes in a single image and outputs
a resized image and its original shape.
Returns:
resized_inputs: The inputs resized according to image_resizer_fn.
true_image_shapes: A integer tensor of shape [batch_size, 3]
representing the height, width and number of channels in inputs.
"""
if inputs.dtype is not tf.float32:
raise ValueError('`resize_images_and_return_shapes` expects a'
' tf.float32 tensor')
# TODO(jonathanhuang): revisit whether to always use batch size as
# the number of parallel iterations vs allow for dynamic batching.
outputs = static_or_dynamic_map_fn(
image_resizer_fn,
elems=inputs,
dtype=[tf.float32, tf.int32])
resized_inputs = outputs[0]
true_image_shapes = outputs[1]
return resized_inputs, true_image_shapes
| |
import warnings
from string import lowercase, uppercase
import sympy
import numpy as np
from scipy.linalg import svdvals
from aliased import aliased_function, _add_aliases_to_namespace, vectorize
class Term(sympy.Symbol):
"""
A Term is a sympy.Symbol that is
meant to represent a term in a regression model.
Terms can be added
to other sympy expressions with the single convention that a
term plus itself returns itself.
It is meant to emulate something on the right hand side of
a formula in R. In particular, its name can be the
name of a field in a recarray used to create a design
matrix.
>>> t = Term('x')
>>> xval = np.array([(3,),(4,),(5,)], np.dtype([('x', np.float)]))
>>> f = t.formula
>>> d = f.design(xval)
>>> print d.dtype.descr
[('x', '<f8')]
>>> f.design(xval, return_float=True)
array([ 3., 4., 5.])
"""
# This flag is defined to avoid using isinstance in getterms
# and getparams.
_term_flag = True
def _getformula(self):
return Formula([self])
formula = property(_getformula, doc="Return a Formula with only terms=[self].")
def __add__(self, other):
if self == other:
return self
else:
return sympy.Symbol.__add__(self, other)
class FactorTerm(Term):
"""
Boolean Term derived from a Factor.
Its properties are the same as a Term except that
its product with itself is itself.
"""
# This flag is defined to avoid using isinstance in getterms
_factor_term_flag = True
def __new__(cls, name, level):
new = Term.__new__(cls, "%s_%s" % (name, level))
new.level = level
new.factor_name = name
return new
def __mul__(self, other):
if self == other:
return self
else:
return sympy.Symbol.__mul__(self, other)
class Beta(sympy.symbol.Dummy):
def __new__(cls, name, term):
new = sympy.symbol.Dummy.__new__(cls, name)
new._term = term
return new
def getparams(expression):
"""
Return the parameters of an expression that are not Term
instances but are instances of sympy.Symbol.
>>> x, y, z = [Term(l) for l in 'xyz']
>>> f = Formula([x,y,z])
>>> getparams(f)
[]
>>> f.mean
_b0*x + _b1*y + _b2*z
>>> getparams(f.mean)
[_b0, _b1, _b2]
>>>
>>> th = sympy.Symbol('theta')
>>> f.mean*sympy.exp(th)
(_b0*x + _b1*y + _b2*z)*exp(theta)
>>> getparams(f.mean*sympy.exp(th))
[theta, _b0, _b1, _b2]
"""
atoms = set([])
expression = np.array(expression)
if expression.shape == ():
expression = expression.reshape((1,))
if expression.ndim > 1:
expression = expression.reshape((np.product(expression.shape),))
for term in expression:
atoms = atoms.union(sympy.sympify(term).atoms())
params = []
for atom in atoms:
if isinstance(atom, sympy.Symbol) and not is_term(atom):
params.append(atom)
params.sort()
return params
def getterms(expression):
"""
Return the all instances of Term in an expression.
>>> x, y, z = [Term(l) for l in 'xyz']
>>> f = Formula([x,y,z])
>>> getterms(f)
[x, y, z]
>>> getterms(f.mean)
[x, y, z]
>>>
"""
atoms = set([])
expression = np.array(expression)
if expression.shape == ():
expression = expression.reshape((1,))
if expression.ndim > 1:
expression = expression.reshape((np.product(expression.shape),))
for e in expression:
atoms = atoms.union(e.atoms())
terms = []
for atom in atoms:
if is_term(atom):
terms.append(atom)
terms.sort()
return terms
def make_recarray(rows, names, dtypes=None):
"""
Create a recarray with named column
from a list of rows and names for the
columns. If dtype is None,
the dtype is based on rows if it
is an np.ndarray, else
the data is cast as np.float. If dtypes
are supplied,
it uses the dtypes to create a np.dtype
unless rows is an np.ndarray, in which
case dtypes are ignored
Parameters
----------
rows: []
Rows that will be turned into an array.
names: [str]
Names for the columns.
dtypes: [str or np.dtype]
Used to create a np.dtype, can be np.dtypes or string.
Returns
-------
v : np.ndarray
Examples
--------
The following tests depend on machine byte order to pass
>>> arr = np.array([[3,4],[4,6],[6,8]])
>>> make_recarray(arr, ['x','y'])
array([[(3, 4)],
[(4, 6)],
[(6, 8)]],
dtype=[('x', '<i8'), ('y', '<i8')])
>>> r = make_recarray(arr, ['w', 'u'])
>>> make_recarray(r, ['x','y'])
array([[(3, 4)],
[(4, 6)],
[(6, 8)]],
dtype=[('x', '<i8'), ('y', '<i8')])
>>> make_recarray([[3,4],[4,6],[7,9]], 'wv', [np.float, np.int])
array([(3.0, 4), (4.0, 6), (7.0, 9)],
dtype=[('w', '<f8'), ('v', '<i8')])
>>>
"""
# XXX This function is sort of one of convenience
# Would be nice to use DataArray or something like that
# to add axis names.
if isinstance(rows, np.ndarray):
if rows.dtype.isbuiltin:
dtype = np.dtype([(n, rows.dtype) for n in names])
else:
dtype = np.dtype([(n, d[1]) for n, d in zip(names, rows.dtype.descr)])
if dtypes is not None:
raise ValueError('dtypes not used if rows is an ndarray')
return rows.view(dtype)
if dtypes is None:
dtype = np.dtype([(n, np.float) for n in names])
else:
dtype = np.dtype([(n, d) for n, d in zip(names, dtypes)])
nrows = []
vector = -1
for r in rows:
if vector < 0:
a = np.array(r)
if a.shape == ():
vector = True
else:
vector = False
if not vector:
nrows.append(tuple(r))
else:
nrows.append(r)
if vector:
if len(names) != 1: # a 'row vector'
nrows = tuple(nrows)
return np.array(nrows, dtype)
else:
nrows = np.array([(r,) for r in nrows], dtype)
return np.array(nrows, dtype)
class Formula(object):
"""
A Formula is a model for a mean in a regression model.
It is often given by a sequence of sympy expressions,
with the mean model being the sum of each term multiplied
by a linear regression coefficient.
The expressions may depend on additional Symbol instances,
giving a non-linear regression model.
"""
# This flag is defined to avoid using isinstance
_formula_flag = True
def __init__(self, seq, char = 'b'):
"""
Inputs:
-------
seq : [``sympy.Basic``]
char : character for regression coefficient
"""
self._terms = np.asarray(seq)
self._counter = 0
self.char = char
# Properties
def _getcoefs(self):
if not hasattr(self, '_coefs'):
self._coefs = {}
for term in self.terms:
self._coefs.setdefault(term, Beta("%s%d" % (self.char, self._counter), term))
self._counter += 1
return self._coefs
coefs = property(_getcoefs, doc='Coefficients in the linear regression formula.')
def _getterms(self):
t = self._terms
# The Rmode flag is meant to emulate R's implicit addition of an
# intercept to every formula. It currently cannot be changed.
Rmode = False
if Rmode:
if sympy.Number(1) not in self._terms:
t = np.array(list(t) + [sympy.Number(1)])
return t
terms = property(_getterms, doc='Terms in the linear regression formula.')
def __repr__(self):
return """Formula(%s)""" % `list(self.terms)`
def __getitem__(self, key):
"""
Return the term such that str(term) == key.
Parameters
----------
key : str
name of term to retrieve
Returns
-------
term : sympy.Expression
"""
names = [str(t) for t in self.terms]
try:
idx = names.index(key)
except ValueError:
raise ValueError('term %s not found' % key)
return self.terms[idx]
@staticmethod
def fromrec(rec, keep=[], drop=[]):
"""
Construct a Formula from
a recarray. For fields with a string-dtype,
it is assumed that these are qualtiatitve regressors, i.e. Factors.
Parameters
----------
rec: recarray
Recarray whose field names will be used to create a formula.
keep: []
Field names to explicitly keep, dropping all others.
drop: []
Field names to drop.
"""
f = {}
for n in rec.dtype.names:
if rec[n].dtype.kind == 'S':
f[n] = Factor.fromcol(rec[n], n)
else:
f[n] = Term(n).formula
for d in drop:
del(f[d])
if keep:
return np.sum([t for n, t in f.items() if n in keep])
else:
return np.sum(f.values())
def subs(self, old, new):
""" Perform a sympy substitution on all terms in the Formula,
returning a new Formula.
Parameters
----------
old : sympy.Basic
The expression to be changed
new : sympy.Basic
The value to change it to.
Returns
-------
newf : Formula
Examples
--------
>>> s, t = [Term(l) for l in 'st']
>>> f, g = [sympy.Function(l) for l in 'fg']
>>> form = Formula([f(t),g(s)])
>>> newform = form.subs(g, sympy.Function('h'))
>>> newform.terms
array([f(t), h(s)], dtype=object)
>>> form.terms
array([f(t), g(s)], dtype=object)
>>>
"""
return Formula([term.subs(old, new) for term in self.terms])
def __add__(self, other):
"""
Create a new Formula by combining terms
of other with those of self.
>>> x, y, z = [Term(l) for l in 'xyz']
>>> f1 = Formula([x,y,z])
>>> f2 = Formula([y])+I
>>> f3=f1+f2
>>> sorted(f1.terms)
[x, y, z]
>>> sorted(f2.terms)
[1, y]
>>> sorted(f3.terms)
[1, x, y, y, z]
>>>
"""
if not is_formula(other):
raise ValueError('only Formula objects can be added to a Formula')
f = Formula(np.hstack([self.terms, other.terms]))
return f
def __sub__(self, other):
"""
Create a new Formula by deleting terms in other
from self. No exceptions are raised for terms in other that do not appear in
self.
>>> x, y, z = [Term(l) for l in 'xyz']
>>> f1 = Formula([x,y,z])
>>> f2 = Formula([y])+I
>>> f1.mean
_b0*x + _b1*y + _b2*z
>>> f2.mean
_b0*y + _b1
>>> f3=f2-f1
>>> f3.mean
_b0
>>> f4=f1-f2
>>> f4.mean
_b0*x + _b1*z
>>>
"""
if not is_formula(other):
raise ValueError('only Formula objects can be subtracted from a Formula')
d = list(set(self.terms).difference(other.terms))
return Formula(d)
def __array__(self):
return self.terms
def _getparams(self):
return getparams(self.mean)
params = property(_getparams, doc='The parameters in the Formula.')
def _getmean(self):
"""
Expression for the mean, expressed as a linear
combination of terms, each with dummy variables in front.
"""
b = [self.coefs[term] for term in self.terms]
return np.sum(np.array(b)*self.terms)
mean = property(_getmean, doc="Expression for the mean, expressed as a linear combination of terms, each with dummy variables in front.")
def _getdiff(self):
p = list(set(getparams(self.mean)))
p.sort()
return [s.doit() for s in sympy.diff(self.mean, p)]
design_expr = property(_getdiff)
def _getdtype(self):
vnames = [str(s) for s in self.design_expr]
return np.dtype([(n, np.float) for n in vnames])
dtype = property(_getdtype, doc='The dtype of the design matrix of the Formula.')
def __mul__(self, other):
if not is_formula(other):
raise ValueError('only two Formulas can be multiplied together')
if is_factor(self):
if self == other:
return self
v = []
# Compute the pairwise product of each term
# If either one is a Term, use Term's multiplication
for sterm in self.terms:
for oterm in other.terms:
if is_term(sterm):
v.append(Term.__mul__(sterm, oterm))
elif is_term(oterm):
v.append(Term.__mul__(oterm, sterm))
else:
v.append(sterm*oterm)
return Formula(tuple(np.unique(v)))
def __eq__(self, other):
s = np.array(self)
o = np.array(other)
if s.shape != o.shape:
return False
return np.alltrue(np.equal(np.array(self), np.array(other)))
def _setup_design(self):
"""
Create a callable object to evaluate the design matrix
at a given set of parameter values to be specified by
a recarray and observed Term values, also specified
by a recarray.
"""
d = self.design_expr
# Before evaluating, we recreate the formula
# with numbered terms, and numbered parameters.
# This renaming has no impact on the
# final design matrix as the
# callable, self._f below, is a lambda
# that does not care about the names of the terms.
# First, find all terms in the mean expression,
# and rename them in the form "__t%d__" with a
# random offset.
# This may cause a possible problem
# when there are parameters named something like "__t%d__".
# Using the random offset will minimize the possibility
# of this happening.
# This renaming is here principally because of the
# intercept.
random_offset = np.random.random_integers(low=0, high=2**30)
terms = getterms(self.mean)
newterms = []
for i, t in enumerate(terms):
newt = sympy.DeferredVector("__t%d__" % (i + random_offset))
for j, _ in enumerate(d):
d[j] = d[j].subs(t, newt)
newterms.append(newt)
# Next, find all parameters that remain in the design expression.
# In a standard regression model, there will be no parameters
# because they will all be differentiated away in computing
# self.design_expr. In nonlinear models, parameters will remain.
params = getparams(self.design_expr)
newparams = []
for i, p in enumerate(params):
newp = sympy.Symbol("__p%d__" % (i + random_offset), dummy=True)
for j, _ in enumerate(d):
d[j] = d[j].subs(p, newp)
newparams.append(newp)
# If there are any aliased functions, these need to be added
# to the name space before sympy lambdifies the expression
# These "aliased" functions are used for things like
# the natural splines, etc. You can represent natural splines
# with sympy but the expression is pretty awful.
_namespace = {};
_add_aliases_to_namespace(_namespace, *d)
self._f = sympy.lambdify(newparams + newterms, d, (_namespace, "numpy"))
# The input to self.design will be a recarray of that must
# have field names that the Formula will expect to see.
# However, if any of self.terms are FactorTerms, then the field
# in the recarray will not actually be in the Term.
#
# For example, if there is a Factor 'f' with levels ['a','b'],
# there will be terms 'f_a' and 'f_b', though the input to
# design will have a field named 'f'. In this sense,
# the recarray used in the call to self.design
# is not really made up of terms, but "preterms".
# In this case, the callable
preterm = []
for t in terms:
if not is_factor_term(t):
preterm.append(str(t))
else:
preterm.append(t.factor_name)
preterm = list(set(preterm))
# There is also an argument for parameters that are not
# Terms.
self._dtypes = {'param':np.dtype([(str(p), np.float) for p in params]),
'term':np.dtype([(str(t), np.float) for t in terms]),
'preterm':np.dtype([(n, np.float) for n in preterm])}
self.__terms = terms
def design(self,
input,
param=None,
return_float=False,
contrasts=None):
""" Construct the design matrix, and optional contrast matrices.
Parameters
----------
input : np.recarray
Recarray including fields needed to compute the Terms in
getparams(self.design_expr).
param : None or np.recarray
Recarray including fields that are not Terms in
getparams(self.design_expr)
return_float : bool, optional
If True, return a np.float array rather than a np.recarray
contrasts : None or dict, optional
Contrasts. The items in this dictionary should be (str,
Formula) pairs where a contrast matrix is constructed for
each Formula by evaluating its design at the same parameters
as self.design. If not None, then the return_float is set to True.
"""
self._setup_design()
preterm_recarray = input
param_recarray = param
# The input to design should have field names for all fields in self._dtypes['preterm']
if not set(preterm_recarray.dtype.names).issuperset(self._dtypes['preterm'].names):
raise ValueError("for term, expecting a recarray with dtype having the following names: %s" % `self._dtypes['preterm'].names`)
# The parameters should have field names for all fields in self._dtypes['param']
if param_recarray is not None:
if not set(param_recarray.dtype.names).issuperset(self._dtypes['param'].names):
raise ValueError("for param, expecting a recarray with dtype having the following names: %s" % `self._dtypes['param'].names`)
# If the only term is an intercept,
# the return value is a matrix of 1's.
if list(self.terms) == [sympy.Number(1)]:
a = np.ones(preterm_recarray.shape[0], np.float)
if not return_float:
a = a.view(np.dtype([('intercept', np.float)]))
return a
elif not self._dtypes['term']:
raise ValueError("none of the expresssions are self.terms are Term instances; shape of resulting undefined")
# The term_recarray is essentially the same as preterm_recarray,
# except that all factors in self are expanded
# into their respective binary columns.
term_recarray = np.zeros(preterm_recarray.shape[0],
dtype=self._dtypes['term'])
for t in self.__terms:
if not is_factor_term(t):
term_recarray[t.name] = preterm_recarray[t.name]
else:
term_recarray['%s_%s' % (t.factor_name, t.level)] = \
np.array(map(lambda x: x == t.level, preterm_recarray[t.factor_name]))
# The lambda created in self._setup_design needs to take a tuple of
# columns as argument, not an ndarray, so each column
# is extracted and put into float_tuple.
float_array = term_recarray.view(np.float)
float_array.shape = (term_recarray.shape[0], -1)
float_array = float_array.T
float_tuple = tuple(float_array)
# If there are any parameters, they also must be extracted
# and put into a tuple with the order specified
# by self._dtypes['param']
if param_recarray is not None:
param = tuple(float(param_recarray[n]) for n in self._dtypes['param'].names)
else:
param = ()
# Evaluate the design at the parameters and tuple of arrays
D = self._f(*(param+float_tuple))
# TODO: check if this next stepis necessary
# I think it is because the lambda evaluates sympy.Number(1) to 1
# and not an array.
D_tuple = [np.asarray(w) for w in D]
need_to_modify_shape = []
OK_row_shapes = []
for i, row in enumerate(D_tuple):
if row.shape in [(),(1,)]:
need_to_modify_shape.append(i)
else:
OK_row_shapes.append(row.shape[0])
# Make sure that each array has the correct shape.
# The columns in need_to_modify should just be
# the intercept column, which evaluates to have shape == ().
# This makes sure that it has the correct number of rows.
for i in need_to_modify_shape:
D_tuple[i].shape = ()
D_tuple[i] = np.multiply.outer(D_tuple[i], np.ones(preterm_recarray.shape[0]))
# At this point, all the columns have the correct shape and the
# design matrix is almost ready to output.
D = np.array(D_tuple).T
# If we will return a float matrix or any contrasts,
# we may have some reshaping to do.
if contrasts is None:
contrasts = {}
if return_float or contrasts:
# If the design matrix is just a column of 1s
# return a 1-dimensional array.
D = np.squeeze(D.astype(np.float))
# If there are contrasts, the pseudo-inverse of D
# must be computed.
if contrasts:
if D.ndim == 1:
_D = D.reshape((D.shape[0], 1))
else:
_D = D
pinvD = np.linalg.pinv(_D)
else:
# Correct the dtype.
# XXX There seems to be a lot of messing around with the dtype.
# This would be a convenient place to just add
# labels like a DataArray.
D = np.array([tuple(r) for r in D], self.dtype)
# Compute the contrast matrices, if any.
if contrasts:
cmatrices = {}
for key, cf in contrasts.items():
if not is_formula(cf):
cf = Formula([cf])
L = cf.design(input, param=param_recarray,
return_float=True)
cmatrices[key] = contrast_from_cols_or_rows(L, _D, pseudo=pinvD)
return D, cmatrices
else:
return D
def natural_spline(t, knots=None, order=3, intercept=False):
""" Return a Formula containing a natural spline
Spline for a Term with specified `knots` and `order`.
Parameters
----------
t : ``Term``
knots : None or sequence, optional
Sequence of float. Default None (same as empty list)
order : int, optional
Order of the spline. Defaults to a cubic (==3)
intercept : bool, optional
If True, include a constant function in the natural
spline. Default is False
Returns
-------
formula : Formula
A Formula with (len(knots) + order) Terms
(if intercept=False, otherwise includes one more Term),
made up of the natural spline functions.
Examples
--------
The following results depend on machine byte order
>>> x = Term('x')
>>> n = natural_spline(x, knots=[1,3,4], order=3)
>>> xval = np.array([3,5,7.]).view(np.dtype([('x', np.float)]))
>>> n.design(xval, return_float=True)
array([[ 3., 9., 27., 8., 0., -0.],
[ 5., 25., 125., 64., 8., 1.],
[ 7., 49., 343., 216., 64., 27.]])
>>> d = n.design(xval)
>>> print d.dtype.descr
[('ns_1(x)', '<f8'), ('ns_2(x)', '<f8'), ('ns_3(x)', '<f8'), ('ns_4(x)', '<f8'), ('ns_5(x)', '<f8'), ('ns_6(x)', '<f8')]
>>>
"""
if knots is None:
knots = {}
fns = []
for i in range(order+1):
n = 'ns_%d' % i
def f(x, i=i):
return x**i
s = aliased_function(n, f)
fns.append(s(t))
for j, k in enumerate(knots):
n = 'ns_%d' % (j+i+1,)
def f(x, k=k, order=order):
return (x-k)**order * np.greater(x, k)
s = aliased_function(n, f)
fns.append(s(t))
if not intercept:
fns.pop(0)
ff = Formula(fns)
return ff
# The intercept formula
I = Formula([sympy.Number(1)])
class Factor(Formula):
"""
A Factor is a qualitative variable in a regression model,
and is similar to R's factor. The levels
of the Factor can be either strings or ints.
"""
# This flag is defined to avoid using isinstance in getterms
# and getparams.
_factor_flag = True
def __init__(self, name, levels, char='b'):
"""
Parameters
----------
name : str
levels : [str or int]
A sequence of strings or ints.
char : str
Returns
-------
"""
# Check whether they can all be cast to strings or ints without
# loss.
levelsarr = np.asarray(levels)
if levelsarr.ndim == 0 and levelsarr.dtype.kind == 'S':
levelsarr = np.asarray(list(levels))
if levelsarr.dtype.kind != 'S': # the levels are not strings
if not np.alltrue(np.equal(levelsarr, np.round(levelsarr))):
raise ValueError('levels must be strings or ints')
levelsarr = levelsarr.astype(np.int)
Formula.__init__(self, [FactorTerm(name, l) for l in levelsarr],
char=char)
self.levels = list(levelsarr)
self.name = name
# TODO: allow different specifications of the contrasts
# here.... this is like R's contr.sum
def get_term(self, level):
"""
Retrieve a term of the Factor...
"""
if level not in self.levels:
raise ValueError('level not found')
return self["%s_%s" % (self.name, str(level))]
def _getmaineffect(self, ref=-1):
v = list(self._terms.copy())
ref_term = v[ref]
v.pop(ref)
return Formula([vv - ref_term for vv in v])
main_effect = property(_getmaineffect)
def stratify(self, variable):
"""
Create a new variable, stratified by the levels of a Factor.
Parameters
----------
variable : str or a simple sympy expression whose string representation
are all lower or upper case letters, i.e. it can be interpreted
as a name
Returns
-------
formula : Formula
Formula whose mean has one parameter named variable%d, for each
level in self.levels
Examples
--------
>>> f = Factor('a', ['x','y'])
>>> sf = f.stratify('theta')
>>> sf.mean
_theta0*a_x + _theta1*a_y
>>>
"""
if not set(str(variable)).issubset(lowercase + uppercase + '0123456789'):
raise ValueError('variable should be interpretable as a name and not have anything but digits and numbers')
variable = sympy.sympify(variable)
f = Formula(self._terms, char=variable)
f.name = self.name
return f
@staticmethod
def fromcol(col, name):
"""
Create a Factor from a column array.
Parameters
----------
col : ndarray
an array with ndim==1
name : str
name of the Factor
Returns
-------
factor : Factor
Examples
--------
>>> data = np.array([(3,'a'),(4,'a'),(5,'b'),(3,'b')], np.dtype([('x', np.float), ('y', 'S1')]))
>>> f1 = Factor.fromcol(data['y'], 'y')
>>> f2 = Factor.fromcol(data['x'], 'x')
>>> d = f1.design(data)
>>> print d.dtype.descr
[('y_a', '<f8'), ('y_b', '<f8')]
>>> d = f2.design(data)
>>> print d.dtype.descr
[('x_3', '<f8'), ('x_4', '<f8'), ('x_5', '<f8')]
>>>
"""
col = np.asarray(col)
if col.ndim != 1 or (col.dtype.names and len(col.dtype.names) > 1):
raise ValueError('expecting an array that can be thought of as a column or field of a recarray')
levels = np.unique(col)
if not col.dtype.names and not name:
name = 'factor'
elif col.dtype.names:
name = col.dtype.names[0]
return Factor(name, levels)
def contrast_from_cols_or_rows(L, D, pseudo=None):
""" Construct a contrast matrix from a design matrix D
(possibly with its pseudo inverse already computed)
and a matrix L that either specifies something in
the column space of D or the row space of D.
Parameters
----------
L : ndarray
Matrix used to try and construct a contrast.
D : ndarray
Design matrix used to create the contrast.
Returns
-------
C : ndarray
Matrix with C.shape[1] == D.shape[1] representing an estimable
contrast.
Notes
-----
From an n x p design matrix D and a matrix L, tries
to determine a p x q contrast matrix C which
determines a contrast of full rank, i.e. the
n x q matrix
dot(transpose(C), pinv(D))
is full rank.
L must satisfy either L.shape[0] == n or L.shape[1] == p.
If L.shape[0] == n, then L is thought of as representing
columns in the column space of D.
If L.shape[1] == p, then L is thought of as what is known
as a contrast matrix. In this case, this function returns an estimable
contrast corresponding to the dot(D, L.T)
This always produces a meaningful contrast, not always
with the intended properties because q is always non-zero unless
L is identically 0. That is, it produces a contrast that spans
the column space of L (after projection onto the column space of D).
"""
L = np.asarray(L)
D = np.asarray(D)
n, p = D.shape
if L.shape[0] != n and L.shape[1] != p:
raise ValueError, 'shape of L and D mismatched'
if pseudo is None:
pseudo = pinv(D)
if L.shape[0] == n:
C = np.dot(pseudo, L).T
else:
C = np.dot(pseudo, np.dot(D, L.T)).T
Lp = np.dot(D, C.T)
if len(Lp.shape) == 1:
Lp.shape = (n, 1)
if rank(Lp) != Lp.shape[1]:
Lp = fullrank(Lp)
C = np.dot(pseudo, Lp).T
return np.squeeze(C)
def rank(X, cond=1.0e-12):
# XXX Is this in scipy somewhere?
""" Return the rank of a matrix X
Rank based on its generalized inverse, not the SVD.
"""
X = np.asarray(X)
if len(X.shape) == 2:
D = svdvals(X)
return int(np.add.reduce(np.greater(D / D.max(), cond).astype(np.int32)))
else:
return int(not np.alltrue(np.equal(X, 0.)))
def fullrank(X, r=None):
""" Return a matrix whose column span is the same as X
using an SVD decomposition.
If the rank of X is known it can be specified by r-- no check is
made to ensure that this really is the rank of X.
"""
if r is None:
r = rank(X)
V, D, U = np.linalg.svd(X, full_matrices=0)
order = np.argsort(D)
order = order[::-1]
value = []
for i in range(r):
value.append(V[:,order[i]])
return np.asarray(np.transpose(value)).astype(np.float64)
class RandomEffects(Formula):
""" Covariance matrices for common random effects analyses.
Examples
--------
>>> subj = make_recarray([2,2,2,3,3], 's')
>>> subj_factor = Factor('s', [2,3])
>>> c = RandomEffects(subj_factor.terms)
>>> c.cov(subj)
array([[_s2_0, _s2_0, _s2_0, 0, 0],
[_s2_0, _s2_0, _s2_0, 0, 0],
[_s2_0, _s2_0, _s2_0, 0, 0],
[0, 0, 0, _s2_1, _s2_1],
[0, 0, 0, _s2_1, _s2_1]], dtype=object)
>>> c = RandomEffects(subj_factor.terms, sigma=np.array([[4,1],[1,6]]))
>>> c.cov(subj)
array([[ 4., 4., 4., 1., 1.],
[ 4., 4., 4., 1., 1.],
[ 4., 4., 4., 1., 1.],
[ 1., 1., 1., 6., 6.],
[ 1., 1., 1., 6., 6.]])
"""
def __init__(self, seq, sigma=None, char = 'e'):
"""
Parameters
----------
seq : [``sympy.Basic``]
sigma : ndarray
Covariance of the random effects. Defaults
to a diagonal with entries for each random
effect.
char : character for regression coefficient
"""
self._terms = np.asarray(seq)
q = self._terms.shape[0]
self._counter = 0
if sigma is None:
self.sigma = np.diag([sympy.Symbol('s2_%d' % i, dummy=True) for i in
range(q)])
else:
self.sigma = sigma
if self.sigma.shape != (q,q):
raise ValueError('incorrect shape for covariance '
'of random effects, '
'should have shape %s' % repr(q,q))
self.char = char
def cov(self, term, param=None):
"""
Compute the covariance matrix for
some given data.
Parameters:
-----------
term : np.recarray
Recarray including fields corresponding to the Terms in
getparams(self.design_expr).
param : np.recarray
Recarray including fields that are not Terms in
getparams(self.design_expr)
Outputs:
--------
C : ndarray
Covariance matrix implied by design and self.sigma.
"""
D = self.design(term, param=param, return_float=True)
return np.dot(D, np.dot(self.sigma, D.T))
def define(name, expr):
"""
Take an expression of 't' (possibly complicated)
and make it a '%s(t)' % name, such that
when it evaluates it has the right values.
Parameters
----------
expr : sympy expression, with only 't' as a Symbol
name : str
Returns
-------
nexpr: sympy expression
Examples
--------
>>> t = Term('t')
>>> expr = t**2 + 3*t
>>> print expr
3*t + t**2
>>> newexpr = define('f', expr)
>>> print newexpr
f(t)
>>> import aliased
>>> f = aliased.lambdify(t, newexpr)
>>> f(4)
28
>>> 3*4+4**2
28
>>>
"""
v = vectorize(expr)
return aliased_function(name, v)(Term('t'))
def is_term(obj):
"""
Is obj a Term?
"""
return hasattr(obj, "_term_flag")
def is_factor_term(obj):
"""
Is obj a FactorTerm?
"""
return hasattr(obj, "_factor_term_flag")
def is_formula(obj):
"""
Is obj a Formula?
"""
return hasattr(obj, "_formula_flag")
def is_factor(obj):
"""
Is obj a Formula?
"""
return hasattr(obj, "_factor_flag")
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import sqlalchemy as sa
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy import inspect
from sqlalchemy.orm import backref, relationship
from a10_neutron_lbaas.db import model_base as models
LOG = logging.getLogger(__name__)
class A10ScalingGroup(models.A10Base):
"""A10 Scaling Group - container of switch and workers"""
__tablename__ = u'a10_scaling_groups'
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
tenant_id = sa.Column(sa.String(255), nullable=True)
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
scaling_policy_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_policies.id'),
nullable=True)
scaling_policy = relationship('A10ScalingPolicy', backref='scaling_groups')
switches = relationship('A10ScalingGroupSwitch')
workers = relationship('A10ScalingGroupWorker')
members = relationship('A10ScalingGroupMember', backref='scaling_group')
__mapper_args__ = {
'polymorphic_identity': __tablename__
}
class A10ScalingGroupBinding(models.A10Base):
__tablename__ = u'a10_scaling_group_bindings'
id = sa.Column(sa.String(36),
primary_key=True,
nullable=False,
default=models._uuid_str)
scaling_group_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_groups.id'),
nullable=False)
scaling_group = relationship(A10ScalingGroup, backref='bindings')
lbaas_loadbalancer_id = sa.Column(sa.String(36),
unique=True,
nullable=False)
class A10ScalingGroupMember(models.A10Base):
"""A10 Scaling Group Member - switch/worker depending on 'role'"""
__tablename__ = "a10_scaling_group_members"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
type = sa.Column(sa.String(50), nullable=False)
scaling_group_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_groups.id'),
nullable=False)
tenant_id = sa.Column(sa.String(255), nullable=True)
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
host = sa.Column(sa.String(255), nullable=False)
api_version = sa.Column(sa.String(12), nullable=False)
username = sa.Column(sa.String(255), nullable=False)
password = sa.Column(sa.String(255), nullable=False)
protocol = sa.Column(sa.String(255), nullable=False)
port = sa.Column(sa.Integer, nullable=False)
nova_instance_id = sa.Column(sa.String(36), nullable=False)
__mapper_args__ = {
'polymorphic_identity': __tablename__,
'polymorphic_on': type
}
def add_virtual_server(self, neutron_id, **kwargs):
vs = A10ScalingGroupMemberVirtualServer.create(
neutron_id=neutron_id,
**kwargs)
self.virtual_servers.append(vs)
return vs
def get_virtual_server(self, neutron_id):
return inspect(self).session.\
query(A10ScalingGroupMemberVirtualServer).\
filter_by(member_id=self.id, neutron_id=neutron_id).\
one_or_none()
def delete_virtual_server(self, neutron_id):
vs = self.get_virtual_server(neutron_id)
if vs:
inspect(self).session.delete(vs)
class A10ScalingGroupWorker(A10ScalingGroupMember):
__tablename__ = "a10_scaling_group_workers"
id = sa.Column(sa.String(36),
sa.ForeignKey(u'a10_scaling_group_members.id'),
primary_key=True,
default=models._uuid_str,
nullable=False)
__mapper_args__ = {
'polymorphic_identity': __tablename__,
}
class A10ScalingGroupSwitch(A10ScalingGroupMember):
__tablename__ = "a10_scaling_group_switches"
id = sa.Column(sa.String(36),
sa.ForeignKey(u'a10_scaling_group_members.id'),
primary_key=True,
default=models._uuid_str,
nullable=False)
__mapper_args__ = {
'polymorphic_identity': __tablename__,
}
class A10ScalingGroupMemberVirtualServer(models.A10Base):
__tablename__ = "a10_scaling_group_member_virtual_servers"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
member_id = sa.Column(sa.String(36),
sa.ForeignKey(u'a10_scaling_group_members.id'),
nullable=False)
member = relationship('A10ScalingGroupMember',
backref=backref('virtual_servers', cascade='all, delete-orphan'))
neutron_id = sa.Column(sa.String(36),
nullable=False)
ip_address = sa.Column(sa.String(50), nullable=False)
interface_ip_address = sa.Column(sa.String(50), nullable=True)
sflow_uuid = sa.Column(sa.String(36), nullable=False)
def add_port(self, port, **kwargs):
vs = A10ScalingGroupMemberVirtualServerPort.create(
port=port,
**kwargs)
self.ports.append(vs)
return vs
def get_port(self, port):
return inspect(self).session.\
query(A10ScalingGroupMemberVirtualServerPort).\
filter_by(virtual_server_id=self.id, port=port).\
one_or_none()
def delete_port(self, port):
port = self.get_port(port)
if port:
inspect(self).session.delete(port)
class A10ScalingGroupMemberVirtualServerPort(models.A10Base):
__tablename__ = "a10_scaling_group_member_virtual_server_ports"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
virtual_server_id = sa.Column(sa.String(36),
sa.ForeignKey(u'a10_scaling_group_member_virtual_servers.id'),
nullable=False)
virtual_server = relationship('A10ScalingGroupMemberVirtualServer',
backref=backref('ports', cascade='all, delete-orphan'))
port = sa.Column(sa.Integer,
nullable=False)
protocol = sa.Column(sa.String(255), nullable=False)
sflow_uuid = sa.Column(sa.String(36), nullable=False)
class A10ScalingPolicy(models.A10Base):
__tablename__ = "a10_scaling_policies"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
tenant_id = sa.Column(sa.String(255), nullable=True)
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
cooldown = sa.Column(sa.Integer, nullable=False)
min_instances = sa.Column(sa.Integer, nullable=False)
max_instances = sa.Column(sa.Integer, nullable=True)
reactions = relationship('A10ScalingPolicyReaction',
order_by="A10ScalingPolicyReaction.position",
collection_class=ordering_list('position'),
backref='policy')
def scaling_group_ids(self):
return [sg.id for sg in self.scaling_groups]
class A10ScalingPolicyReaction(models.A10Base):
__tablename__ = "a10_scaling_policy_reactions"
# A surrogate key is required by ordering_list
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
scaling_policy_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_policies.id'),
nullable=False)
position = sa.Column(sa.Integer,
nullable=False)
alarm_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_alarms.id'),
nullable=False)
action_id = sa.Column(sa.String(36),
sa.ForeignKey('a10_scaling_actions.id'),
nullable=False)
alarm = relationship('A10ScalingAlarm', backref='reactions')
action = relationship('A10ScalingAction', backref='reactions')
class A10ScalingAlarm(models.A10Base):
__tablename__ = "a10_scaling_alarms"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
tenant_id = sa.Column(sa.String(255), nullable=True)
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
aggregation = sa.Column(sa.String(50), nullable=False)
measurement = sa.Column(sa.String(50), nullable=False)
operator = sa.Column(sa.String(50), nullable=False)
threshold = sa.Column(sa.Float(), nullable=False)
unit = sa.Column(sa.String(50), nullable=False)
period = sa.Column(sa.Integer, nullable=False)
period_unit = sa.Column(sa.String(50), nullable=False)
def scaling_group_ids(self):
return set(x
for reaction in self.reactions
for x in reaction.policy.scaling_group_ids())
class A10ScalingAction(models.A10Base):
__tablename__ = "a10_scaling_actions"
id = sa.Column(sa.String(36),
primary_key=True,
default=models._uuid_str,
nullable=False)
tenant_id = sa.Column(sa.String(255), nullable=True)
name = sa.Column(sa.String(255), nullable=True)
description = sa.Column(sa.String(255), nullable=True)
action = sa.Column(sa.String(50), nullable=False)
amount = sa.Column(sa.Integer)
def scaling_group_ids(self):
return set(x
for reaction in self.reactions
for x in reaction.policy.scaling_group_ids())
| |
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.compute import route_pb2
from google3.cloud.graphite.mmv2.services.google.compute import route_pb2_grpc
from typing import List
class Route(object):
def __init__(
self,
id: int = None,
name: str = None,
description: str = None,
network: str = None,
tag: list = None,
dest_range: str = None,
priority: int = None,
next_hop_instance: str = None,
next_hop_ip: str = None,
next_hop_network: str = None,
next_hop_gateway: str = None,
next_hop_peering: str = None,
next_hop_ilb: str = None,
warning: list = None,
next_hop_vpn_tunnel: str = None,
self_link: str = None,
project: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.description = description
self.network = network
self.tag = tag
self.dest_range = dest_range
self.priority = priority
self.next_hop_instance = next_hop_instance
self.next_hop_ip = next_hop_ip
self.next_hop_gateway = next_hop_gateway
self.next_hop_ilb = next_hop_ilb
self.next_hop_vpn_tunnel = next_hop_vpn_tunnel
self.project = project
self.service_account_file = service_account_file
def apply(self):
stub = route_pb2_grpc.ComputeBetaRouteServiceStub(channel.Channel())
request = route_pb2.ApplyComputeBetaRouteRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.network):
request.resource.network = Primitive.to_proto(self.network)
if Primitive.to_proto(self.tag):
request.resource.tag.extend(Primitive.to_proto(self.tag))
if Primitive.to_proto(self.dest_range):
request.resource.dest_range = Primitive.to_proto(self.dest_range)
if Primitive.to_proto(self.priority):
request.resource.priority = Primitive.to_proto(self.priority)
if Primitive.to_proto(self.next_hop_instance):
request.resource.next_hop_instance = Primitive.to_proto(
self.next_hop_instance
)
if Primitive.to_proto(self.next_hop_ip):
request.resource.next_hop_ip = Primitive.to_proto(self.next_hop_ip)
if Primitive.to_proto(self.next_hop_gateway):
request.resource.next_hop_gateway = Primitive.to_proto(
self.next_hop_gateway
)
if Primitive.to_proto(self.next_hop_ilb):
request.resource.next_hop_ilb = Primitive.to_proto(self.next_hop_ilb)
if Primitive.to_proto(self.next_hop_vpn_tunnel):
request.resource.next_hop_vpn_tunnel = Primitive.to_proto(
self.next_hop_vpn_tunnel
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
request.service_account_file = self.service_account_file
response = stub.ApplyComputeBetaRoute(request)
self.id = Primitive.from_proto(response.id)
self.name = Primitive.from_proto(response.name)
self.description = Primitive.from_proto(response.description)
self.network = Primitive.from_proto(response.network)
self.tag = Primitive.from_proto(response.tag)
self.dest_range = Primitive.from_proto(response.dest_range)
self.priority = Primitive.from_proto(response.priority)
self.next_hop_instance = Primitive.from_proto(response.next_hop_instance)
self.next_hop_ip = Primitive.from_proto(response.next_hop_ip)
self.next_hop_network = Primitive.from_proto(response.next_hop_network)
self.next_hop_gateway = Primitive.from_proto(response.next_hop_gateway)
self.next_hop_peering = Primitive.from_proto(response.next_hop_peering)
self.next_hop_ilb = Primitive.from_proto(response.next_hop_ilb)
self.warning = RouteWarningArray.from_proto(response.warning)
self.next_hop_vpn_tunnel = Primitive.from_proto(response.next_hop_vpn_tunnel)
self.self_link = Primitive.from_proto(response.self_link)
self.project = Primitive.from_proto(response.project)
def delete(self):
stub = route_pb2_grpc.ComputeBetaRouteServiceStub(channel.Channel())
request = route_pb2.DeleteComputeBetaRouteRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.network):
request.resource.network = Primitive.to_proto(self.network)
if Primitive.to_proto(self.tag):
request.resource.tag.extend(Primitive.to_proto(self.tag))
if Primitive.to_proto(self.dest_range):
request.resource.dest_range = Primitive.to_proto(self.dest_range)
if Primitive.to_proto(self.priority):
request.resource.priority = Primitive.to_proto(self.priority)
if Primitive.to_proto(self.next_hop_instance):
request.resource.next_hop_instance = Primitive.to_proto(
self.next_hop_instance
)
if Primitive.to_proto(self.next_hop_ip):
request.resource.next_hop_ip = Primitive.to_proto(self.next_hop_ip)
if Primitive.to_proto(self.next_hop_gateway):
request.resource.next_hop_gateway = Primitive.to_proto(
self.next_hop_gateway
)
if Primitive.to_proto(self.next_hop_ilb):
request.resource.next_hop_ilb = Primitive.to_proto(self.next_hop_ilb)
if Primitive.to_proto(self.next_hop_vpn_tunnel):
request.resource.next_hop_vpn_tunnel = Primitive.to_proto(
self.next_hop_vpn_tunnel
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
response = stub.DeleteComputeBetaRoute(request)
@classmethod
def list(self, project, service_account_file=""):
stub = route_pb2_grpc.ComputeBetaRouteServiceStub(channel.Channel())
request = route_pb2.ListComputeBetaRouteRequest()
request.service_account_file = service_account_file
request.Project = project
return stub.ListComputeBetaRoute(request).items
def to_proto(self):
resource = route_pb2.ComputeBetaRoute()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.network):
resource.network = Primitive.to_proto(self.network)
if Primitive.to_proto(self.tag):
resource.tag.extend(Primitive.to_proto(self.tag))
if Primitive.to_proto(self.dest_range):
resource.dest_range = Primitive.to_proto(self.dest_range)
if Primitive.to_proto(self.priority):
resource.priority = Primitive.to_proto(self.priority)
if Primitive.to_proto(self.next_hop_instance):
resource.next_hop_instance = Primitive.to_proto(self.next_hop_instance)
if Primitive.to_proto(self.next_hop_ip):
resource.next_hop_ip = Primitive.to_proto(self.next_hop_ip)
if Primitive.to_proto(self.next_hop_gateway):
resource.next_hop_gateway = Primitive.to_proto(self.next_hop_gateway)
if Primitive.to_proto(self.next_hop_ilb):
resource.next_hop_ilb = Primitive.to_proto(self.next_hop_ilb)
if Primitive.to_proto(self.next_hop_vpn_tunnel):
resource.next_hop_vpn_tunnel = Primitive.to_proto(self.next_hop_vpn_tunnel)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
return resource
class RouteWarning(object):
def __init__(self, code: str = None, message: str = None, data: dict = None):
self.code = code
self.message = message
self.data = data
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = route_pb2.ComputeBetaRouteWarning()
if RouteWarningCodeEnum.to_proto(resource.code):
res.code = RouteWarningCodeEnum.to_proto(resource.code)
if Primitive.to_proto(resource.message):
res.message = Primitive.to_proto(resource.message)
if Primitive.to_proto(resource.data):
res.data = Primitive.to_proto(resource.data)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return RouteWarning(
code=RouteWarningCodeEnum.from_proto(resource.code),
message=Primitive.from_proto(resource.message),
data=Primitive.from_proto(resource.data),
)
class RouteWarningArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [RouteWarning.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [RouteWarning.from_proto(i) for i in resources]
class RouteWarningCodeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return route_pb2.ComputeBetaRouteWarningCodeEnum.Value(
"ComputeBetaRouteWarningCodeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return route_pb2.ComputeBetaRouteWarningCodeEnum.Name(resource)[
len("ComputeBetaRouteWarningCodeEnum") :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
| |
from __future__ import absolute_import, unicode_literals
from django.core.exceptions import ValidationError
from django.forms import Form
from django.forms.fields import IntegerField, BooleanField
from django.forms.util import ErrorList
from django.forms.widgets import Media, HiddenInput
from django.utils.encoding import python_2_unicode_compatible
from django.utils.safestring import mark_safe
from django.utils import six
from django.utils.six.moves import xrange
from django.utils.translation import ugettext as _
__all__ = ('BaseFormSet', 'all_valid')
# special field names
TOTAL_FORM_COUNT = 'TOTAL_FORMS'
INITIAL_FORM_COUNT = 'INITIAL_FORMS'
MAX_NUM_FORM_COUNT = 'MAX_NUM_FORMS'
ORDERING_FIELD_NAME = 'ORDER'
DELETION_FIELD_NAME = 'DELETE'
class ManagementForm(Form):
"""
``ManagementForm`` is used to keep track of how many form instances
are displayed on the page. If adding new forms via javascript, you should
increment the count field of this form as well.
"""
def __init__(self, *args, **kwargs):
self.base_fields[TOTAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
self.base_fields[INITIAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
self.base_fields[MAX_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput)
super(ManagementForm, self).__init__(*args, **kwargs)
@python_2_unicode_compatible
class BaseFormSet(object):
"""
A collection of instances of the same Form class.
"""
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList):
self.is_bound = data is not None or files is not None
self.prefix = prefix or self.get_default_prefix()
self.auto_id = auto_id
self.data = data or {}
self.files = files or {}
self.initial = initial
self.error_class = error_class
self._errors = None
self._non_form_errors = None
# construct the forms in the formset
self._construct_forms()
def __str__(self):
return self.as_table()
def __iter__(self):
"""Yields the forms in the order they should be rendered"""
return iter(self.forms)
def __getitem__(self, index):
"""Returns the form at the given index, based on the rendering order"""
return self.forms[index]
def __len__(self):
return len(self.forms)
def __bool__(self):
"""All formsets have a management form which is not included in the length"""
return True
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
@property
def management_form(self):
"""Returns the ManagementForm instance for this FormSet."""
if self.is_bound:
form = ManagementForm(self.data, auto_id=self.auto_id, prefix=self.prefix)
if not form.is_valid():
raise ValidationError('ManagementForm data is missing or has been tampered with')
else:
form = ManagementForm(auto_id=self.auto_id, prefix=self.prefix, initial={
TOTAL_FORM_COUNT: self.total_form_count(),
INITIAL_FORM_COUNT: self.initial_form_count(),
MAX_NUM_FORM_COUNT: self.max_num
})
return form
def total_form_count(self):
"""Returns the total number of forms in this FormSet."""
if self.is_bound:
return self.management_form.cleaned_data[TOTAL_FORM_COUNT]
else:
initial_forms = self.initial_form_count()
total_forms = initial_forms + self.extra
# Allow all existing related objects/inlines to be displayed,
# but don't allow extra beyond max_num.
if self.max_num is not None:
if initial_forms > self.max_num >= 0:
total_forms = initial_forms
elif total_forms > self.max_num >= 0:
total_forms = self.max_num
return total_forms
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if self.is_bound:
return self.management_form.cleaned_data[INITIAL_FORM_COUNT]
else:
# Use the length of the inital data if it's there, 0 otherwise.
initial_forms = self.initial and len(self.initial) or 0
if self.max_num is not None and (initial_forms > self.max_num >= 0):
initial_forms = self.max_num
return initial_forms
def _construct_forms(self):
# instantiate all the forms and put them in self.forms
self.forms = []
for i in xrange(self.total_form_count()):
self.forms.append(self._construct_form(i))
def _construct_form(self, i, **kwargs):
"""
Instantiates and returns the i-th form instance in a formset.
"""
defaults = {
'auto_id': self.auto_id,
'prefix': self.add_prefix(i),
'error_class': self.error_class,
}
if self.is_bound:
defaults['data'] = self.data
defaults['files'] = self.files
if self.initial and not 'initial' in kwargs:
try:
defaults['initial'] = self.initial[i]
except IndexError:
pass
# Allow extra forms to be empty.
if i >= self.initial_form_count():
defaults['empty_permitted'] = True
defaults.update(kwargs)
form = self.form(**defaults)
self.add_fields(form, i)
return form
@property
def initial_forms(self):
"""Return a list of all the initial forms in this formset."""
return self.forms[:self.initial_form_count()]
@property
def extra_forms(self):
"""Return a list of all the extra forms in this formset."""
return self.forms[self.initial_form_count():]
@property
def empty_form(self):
form = self.form(
auto_id=self.auto_id,
prefix=self.add_prefix('__prefix__'),
empty_permitted=True,
)
self.add_fields(form, None)
return form
# Maybe this should just go away?
@property
def cleaned_data(self):
"""
Returns a list of form.cleaned_data dicts for every form in self.forms.
"""
if not self.is_valid():
raise AttributeError("'%s' object has no attribute 'cleaned_data'" % self.__class__.__name__)
return [form.cleaned_data for form in self.forms]
@property
def deleted_forms(self):
"""
Returns a list of forms that have been marked for deletion.
"""
if not self.is_valid() or not self.can_delete:
return []
# construct _deleted_form_indexes which is just a list of form indexes
# that have had their deletion widget set to True
if not hasattr(self, '_deleted_form_indexes'):
self._deleted_form_indexes = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
if self._should_delete_form(form):
self._deleted_form_indexes.append(i)
return [self.forms[i] for i in self._deleted_form_indexes]
@property
def ordered_forms(self):
"""
Returns a list of form in the order specified by the incoming data.
Raises an AttributeError if ordering is not allowed.
"""
if not self.is_valid() or not self.can_order:
raise AttributeError("'%s' object has no attribute 'ordered_forms'" % self.__class__.__name__)
# Construct _ordering, which is a list of (form_index, order_field_value)
# tuples. After constructing this list, we'll sort it by order_field_value
# so we have a way to get to the form indexes in the order specified
# by the form data.
if not hasattr(self, '_ordering'):
self._ordering = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
# don't add data marked for deletion to self.ordered_data
if self.can_delete and self._should_delete_form(form):
continue
self._ordering.append((i, form.cleaned_data[ORDERING_FIELD_NAME]))
# After we're done populating self._ordering, sort it.
# A sort function to order things numerically ascending, but
# None should be sorted below anything else. Allowing None as
# a comparison value makes it so we can leave ordering fields
# blank.
def compare_ordering_key(k):
if k[1] is None:
return (1, 0) # +infinity, larger than any number
return (0, k[1])
self._ordering.sort(key=compare_ordering_key)
# Return a list of form.cleaned_data dicts in the order specified by
# the form data.
return [self.forms[i[0]] for i in self._ordering]
@classmethod
def get_default_prefix(cls):
return 'form'
def non_form_errors(self):
"""
Returns an ErrorList of errors that aren't associated with a particular
form -- i.e., from formset.clean(). Returns an empty ErrorList if there
are none.
"""
if self._non_form_errors is not None:
return self._non_form_errors
return self.error_class()
@property
def errors(self):
"""
Returns a list of form.errors for every form in self.forms.
"""
if self._errors is None:
self.full_clean()
return self._errors
def _should_delete_form(self, form):
"""
Returns whether or not the form was marked for deletion.
"""
return form.cleaned_data.get(DELETION_FIELD_NAME, False)
def is_valid(self):
"""
Returns True if every form in self.forms is valid.
"""
if not self.is_bound:
return False
# We loop over every form.errors here rather than short circuiting on the
# first failure to make sure validation gets triggered for every form.
forms_valid = True
err = self.errors
for i in range(0, self.total_form_count()):
form = self.forms[i]
if self.can_delete:
if self._should_delete_form(form):
# This form is going to be deleted so any of its errors
# should not cause the entire formset to be invalid.
continue
forms_valid &= form.is_valid()
return forms_valid and not bool(self.non_form_errors())
def full_clean(self):
"""
Cleans all of self.data and populates self._errors.
"""
self._errors = []
if not self.is_bound: # Stop further processing.
return
for i in range(0, self.total_form_count()):
form = self.forms[i]
self._errors.append(form.errors)
# Give self.clean() a chance to do cross-form validation.
try:
self.clean()
except ValidationError as e:
self._non_form_errors = self.error_class(e.messages)
def clean(self):
"""
Hook for doing any extra formset-wide cleaning after Form.clean() has
been called on every form. Any ValidationError raised by this method
will not be associated with a particular form; it will be accesible
via formset.non_form_errors()
"""
pass
def has_changed(self):
"""
Returns true if data in any form differs from initial.
"""
return any(form.has_changed() for form in self)
def add_fields(self, form, index):
"""A hook for adding extra fields on to each form instance."""
if self.can_order:
# Only pre-fill the ordering field for initial forms.
if index is not None and index < self.initial_form_count():
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), initial=index+1, required=False)
else:
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), required=False)
if self.can_delete:
form.fields[DELETION_FIELD_NAME] = BooleanField(label=_('Delete'), required=False)
def add_prefix(self, index):
return '%s-%s' % (self.prefix, index)
def is_multipart(self):
"""
Returns True if the formset needs to be multipart, i.e. it
has FileInput. Otherwise, False.
"""
if self.forms:
return self.forms[0].is_multipart()
else:
return self.empty_form.is_multipart()
@property
def media(self):
# All the forms on a FormSet are the same, so you only need to
# interrogate the first form for media.
if self.forms:
return self.forms[0].media
else:
return self.empty_form.media
def as_table(self):
"Returns this formset rendered as HTML <tr>s -- excluding the <table></table>."
# XXX: there is no semantic division between forms here, there
# probably should be. It might make sense to render each form as a
# table row with each field as a td.
forms = ' '.join([form.as_table() for form in self])
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def as_p(self):
"Returns this formset rendered as HTML <p>s."
forms = ' '.join([form.as_p() for form in self])
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def as_ul(self):
"Returns this formset rendered as HTML <li>s."
forms = ' '.join([form.as_ul() for form in self])
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def formset_factory(form, formset=BaseFormSet, extra=1, can_order=False,
can_delete=False, max_num=None):
"""Return a FormSet for the given form class."""
attrs = {'form': form, 'extra': extra,
'can_order': can_order, 'can_delete': can_delete,
'max_num': max_num}
return type(form.__name__ + str('FormSet'), (formset,), attrs)
def all_valid(formsets):
"""Returns true if every formset in formsets is valid."""
valid = True
for formset in formsets:
if not formset.is_valid():
valid = False
return valid
| |
#!/usr/bin/env python
"""
base actions of character/mob
"""
from basinboa import status
from basinboa.message.broadcast import message_to_room, player_message_to_room
from basinboa.command.cmds.inspect_cmds import look
from basinboa.mobile.bag import Bag
from basinboa.mobile.equipment import Equipment
from basinboa.universe.direction import north_xy, south_xy, west_xy, east_xy, NORTH, SOUTH, EAST, WEST, UP, DOWN, NORTH_NAME, SOUTH_NAME, EAST_NAME, WEST_NAME, UP_NAME, DOWN_NAME
class Puppet(object):
"""docstring for Puppet"""
def __init__(self):
super(Puppet, self).__init__()
self.name = None
self.nickname = None
self.desc = None
self.skills = None
self.spells = None
self.race = None
self.job = None
self.bag = Bag()
self.equipment = Equipment()
#. geo
self.xy = None
self.prev_xy = None
self.map_name = None
self.prev_map_name = None
#. combat status
self.combat_targets = []
self.hp = (100, 100) #. current, max
self.mp = (100, 100) #. current, max
self.status = None
#. other status
self.follow_target = None
self.followers = []
def set_name(self, name):
"""docstring for set_name"""
self.name = name
def get_name(self):
"""docstring for get_name"""
return self.name
def get_attr(self, data, attr):
"""docstring for get_attr"""
if hasattr(self, attr):
data[attr] = getattr(self, attr)
return data
def set_attr(self, data, attr):
"""docstring for set_attr"""
if data.has_key(attr):
setattr(self, attr, data[attr])
def _dump(self):
"""docstring for dump"""
attrs = [
'name', 'nickname', 'desc', 'skills', 'spells', 'racd', 'job',
#. geo
'xy', 'map_name',
#. combat status
'hp', 'mp', 'status'
]
data = {}
for attr in attrs:
data = self.get_attr(data, attr)
return data
def _load(self, data):
"""docstring for load"""
attrs = [
'name', 'nickname', 'desc', 'skills', 'spells', 'racd', 'job',
#. geo
'xy', 'map_name',
#. combat status
'hp', 'mp', 'status'
]
for attr in attrs:
self.set_attr(data, attr)
def _move(self, symbol, func=None):
"""docstring for _move"""
mfunc = getattr(status.WORLD, 'move_mob') if self.is_mob \
else getattr(status.WORLD, 'move_character')
src_room, dst_room = mfunc(self, symbol, func)
return src_room, dst_room
def go_west(self):
"""docstring for west"""
src_room, dst_room = self._move(WEST, west_xy)
if src_room and dst_room:
self.notice_players(src_room, dst_room, WEST_NAME)
self.notice_follwers(WEST)
return True
return False
def go_east(self):
"""docstring for east"""
src_room, dst_room = self._move(EAST, east_xy)
if src_room and dst_room:
self.notice_players(src_room, dst_room, EAST_NAME)
self.notice_follwers(EAST)
return True
return False
def go_north(self):
"""docstring for north"""
src_room, dst_room = self._move(NORTH, north_xy)
if src_room and dst_room:
self.notice_players(src_room, dst_room, NORTH_NAME)
self.notice_follwers(NORTH)
return True
return False
def go_south(self):
"""docstring for south"""
src_room, dst_room = self._move(SOUTH, south_xy)
if src_room and dst_room:
self.notice_players(src_room, dst_room, SOUTH_NAME)
self.notice_follwers(SOUTH)
return True
return False
def go_up(self):
"""docstring for go_up"""
src_room, dst_room = self._move(UP)
if src_room and dst_room:
self.notice_players(src_room, dst_room, UP_NAME)
self.notice_follwers(UP)
return True
return False
def go_down(self):
"""docstring for go_down"""
src_room, dst_room = self._move(DOWN)
if src_room and dst_room:
self.notice_players(src_room, dst_room, DOWN_NAME)
self.notice_follwers(DOWN)
return True
return False
def notice_follwers(self, direction):
"""notice all followers to move in the same room"""
for follower in self.followers:
#. follower must be in the same room
if follower.xy == self.prev_xy and follower.map_name == self.prev_map_name:
if not follower.is_mob:
player = status.PLAYERS[follower.get_name()]
player.send("You follow the %s's steps!\n" % (self.get_name()))
look(player, None)
if direction == UP:
follower.go_up()
if direction == DOWN:
follower.go_down()
if direction == WEST:
follower.go_west()
if direction == EAST:
follower.go_east()
if direction == NORTH:
follower.go_north()
if direction == SOUTH:
follower.go_south()
def notice_players(self, src_room, dst_room, direction_name):
"""docstring for notice_players"""
msg_go = "%s go to %s.\n" % (self.name, direction_name)
msg_come = "%s come to here.\n" % (self.name)
if self.is_mob:
message_to_room(src_room, msg_go)
message_to_room(dst_room, msg_come)
else:
player = status.PLAYERS[self.name]
message_to_room(src_room, msg_go)
player_message_to_room(player, msg_come)
def add_follower(self, object_):
"""add mob/character object to followers list"""
self.followers.append(object_)
def remove_follower(self, object_):
"""remove mob/character object from followers list"""
self.followers.remove(object_) if object_ in self.followers else None
def get_followers(self):
"""docstring for get_followers"""
return self.followers
def has_follower(self, object_):
"""docstring for has_follower"""
return True if object_ in self.followers else False
def start_follow(self, object_):
"""follow mob/character"""
if object_ == self:
self.stop_follow()
else:
self.follow_target = object_
def stop_follow(self):
"""docstring for stop_follow"""
if self.follow_target:
self.follow_target.remove_follower(self)
self.follow_target = None
if self in self.followers:
self.followers.remove(self)
def get_desc(self):
"""docstring for get_desc"""
return self.desc
def add_combat_target(self, object_):
"""docstring for set_combat_target"""
self.combat_targets.append(object_)
def remove_combat_target(self, object_):
"""docstring for remove_combat_target"""
self.combat_targets.remove(object_)
def remove_all_combat_targets(self):
"""docstring for remove_all_combat_targets"""
self.combat_targets = []
def get_combat_targets(self):
"""docstring for get_combat_target"""
return self.combat_targets
def increase_point(self, attr, value, to_max):
"""docstring for increase_point"""
current, max_ = attr
if to_max:
attr = (max_, max_)
else:
current += value
attr = (current, max_)
return attr
def decrease_point(self, attr, value):
"""docstring for decrease_point"""
current, max_ = attr
return (current - value, max_)
def increase_hp(self, value=1, to_max=False):
"""docstring for increase_hp"""
self.hp = self.increase_point(self.hp, value, to_max)
def decrease_hp(self, value=1):
"""docstring for increase_hp"""
self.hp = self.decrease_point(self.hp, value)
current, max_ = self.hp
if current <= 0:
status.CHARACTERS[self].send_cc('^RYou Dead!^~\n')
self.hp = (1, max_)
for target in self.get_combat_targets():
target.remove_combat_target(self)
self.remove_all_combat_targets()
def increase_mp(self, value=1, to_max=False):
"""docstring for increase_mp"""
self.mp= self.increase_point(self.mp, value, to_max)
def decrease_mp(self, value=1):
"""docstring for decrease_mp"""
self.mp = self.decrease_point(self.mp, value)
def hurt(self, _object):
"""docstring for hurt"""
damage = 10
_object.decrease_hp(damage)
return damage
def hit(self, _object):
"""docstring for hit"""
damage = 10
_object.decrease_hp(damage)
return damage
def get_hp(self):
"""docstring for get_hp"""
return self.hp
def get_mp(self):
"""docstring for get_mp"""
return self.mp
def set_location(self, xy, map_name=None):
"""docstring for set_location"""
map_name = map_name if map_name else self.map_name
self.xy = xy
self.map_name = map_name
def set_prev_location(self, xy, map_name=None):
"""docstring for set_location"""
map_name = map_name if map_name else self.map_name
self.prev_xy = xy
self.prev_map_name = map_name
def init_prev_location(self):
"""docstring for set_location"""
self.prev_xy = self.xy
self.prev_map_name = self.map_name
| |
# BSD 3-Clause License
#
# Copyright (c) 2016-21, University of Liverpool
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A module to produce a precision evaluation plot"""
from __future__ import division
__author__ = "Felix Simkovic"
__date__ = "07 Feb 2017"
__version__ = "0.13.1"
import matplotlib.pyplot as plt
import numpy as np
from conkit.misc import deprecate
from conkit.plot.figure import Figure
from conkit.plot.tools import ColorDefinitions, _isinstance
class PrecisionEvaluationFigure(Figure):
"""A Figure object specifically for a Precision evaluation.
This figure will illustrate the precision scores of a contact
map at different precision scores. These can be determined at
various start and end points with different stepwise increases
in between.
Attributes
----------
hierarchy : :obj:`~conkit.core.contactmap.ContactMap`
The contact map hierarchy
cutoff_step : float
The cutoff step
min_cutoff : float
The minimum cutoff factor
max_cutoff : float
The maximum cutoff factor
Examples
--------
>>> import conkit
>>> cmap = conkit.io.read('toxd/toxd.mat', 'ccmpred').top_map
>>> cmap.sequence = conkit.io.read('toxd/toxd.fasta', 'fasta').top_sequence
>>> pdb = conkit.io.read('toxd/toxd.pdb', 'pdb').top_map
>>> cmap.match(pdb, inplace=True)
>>> conkit.plot.PrecisionEvaluationFigure(cmap)
"""
def __init__(self, hierarchy, min_cutoff=0.0, max_cutoff=100.0, cutoff_step=0.2, **kwargs):
"""A precision evaluation figure
Parameters
----------
hierarchy : :obj:`~conkit.core.contactmap.ContactMap`
The contact map hierarchy
min_cutoff : float, optional
The minimum factor
max_cutoff : float, optional
The maximum facotr
cutoff_step : float, optional
The cutoff step
**kwargs
General :obj:`~conkit.plot.figure.Figure` keyword arguments
"""
super(PrecisionEvaluationFigure, self).__init__(**kwargs)
self._hierarchy = None
self._cutoff_boundaries = [0.0, 100.0]
self._cutoff_step = 0.2
self.hierarchy = hierarchy
self.cutoff_step = cutoff_step
self.min_cutoff = min_cutoff
self.max_cutoff = max_cutoff
self.draw()
def __repr__(self):
return self.__class__.__name__
@property
def cutoff_step(self):
"""The cutoff step"""
return self._cutoff_step
@cutoff_step.setter
def cutoff_step(self, cutoff_step):
"""Define the cutoff step"""
self._cutoff_step = cutoff_step
@property
def hierarchy(self):
"""A ConKit :obj:`~conkit.core.contactmap.ContactMap`"""
return self._hierarchy
@hierarchy.setter
def hierarchy(self, hierarchy):
"""Define the ConKit :obj:`~conkit.core.contactmap.ContactMap`
Raises
------
:exc:`RuntimeError`
The hierarchy is not an alignment
"""
if hierarchy and _isinstance(hierarchy, "ContactMap"):
self._hierarchy = hierarchy
else:
raise TypeError("Invalid hierarchy type: %s" % hierarchy.__class__.__name__)
@property
def min_cutoff(self):
"""The minimum cutoff factor
Raises
------
:obj:`ValueError`
The minimum cutoff value is larger than or equal to the maximum
"""
if self._cutoff_boundaries[0] >= self._cutoff_boundaries[1]:
msg = "The minimum cutoff value is larger than or equal to the maximum"
raise ValueError(msg)
return self._cutoff_boundaries[0]
@min_cutoff.setter
def min_cutoff(self, min_cutoff):
"""Define the minimum cutoff factor"""
if min_cutoff < 0.0:
raise ValueError("Minimum factor cannot be negative")
self._cutoff_boundaries[0] = min_cutoff
@property
def max_cutoff(self):
"""The maximum cutoff factor
Raises
------
:obj:`ValueError`
The maximum cutoff value is smaller than the the minimum
"""
if self._cutoff_boundaries[1] < self._cutoff_boundaries[0]:
msg = "The maximum cutoff value is smaller than the the minimum"
raise ValueError(msg)
return self._cutoff_boundaries[1]
@max_cutoff.setter
def max_cutoff(self, max_cutoff):
"""Define the maximum cutoff factor"""
if max_cutoff > 100:
raise ValueError("Maximum factor cannot be greater than 100")
self._cutoff_boundaries[1] = max_cutoff
@deprecate("0.11", msg="Use draw instead")
def redraw(self):
self.draw()
def draw(self):
factors = np.arange(self.min_cutoff, self.max_cutoff + 0.1, self.cutoff_step)
precisions = np.zeros(factors.shape[0])
for i, factor in enumerate(factors):
ncontacts = int(self._hierarchy.sequence.seq_len * factor)
m = self._hierarchy[:ncontacts]
precisions[i] = m.precision
self.ax.plot(
factors,
precisions,
color=ColorDefinitions.GENERAL,
marker=None,
linestyle="-",
label="Precision score",
zorder=1,
)
self.ax.axhline(0.5, color=ColorDefinitions.PRECISION50, linestyle="-", label="50% Precision", zorder=0)
if self.min_cutoff <= 1.0:
self.ax.axvline(1.0, color=ColorDefinitions.FACTOR1, linestyle="-", label="Factor L", zorder=0)
if self.min_cutoff <= 0.5:
self.ax.axvline(0.5, color=ColorDefinitions.FACTOR1, linestyle="--", label="Factor L/2", zorder=0)
if self.min_cutoff <= 0.2:
self.ax.axvline(0.2, color=ColorDefinitions.FACTOR1, linestyle="-.", label="Factor L/5", zorder=0)
if self.min_cutoff <= 0.1:
self.ax.axvline(0.1, color=ColorDefinitions.FACTOR1, linestyle=":", label="Factor L/10", zorder=0)
self.ax.set_xlim(self.min_cutoff, self.max_cutoff)
xticks = (self.ax.get_xticks() * self._hierarchy.sequence.seq_len).astype(np.int64)
self.ax.set_xticklabels(xticks)
self.ax.set_ylim(0.0, 1.0)
self.ax.set_xlabel("Number of Contacts")
self.ax.set_ylabel("Precision")
if self.legend:
self.ax.legend(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc=3, ncol=3, mode="expand", borderaxespad=0.0)
# TODO: deprecate this in 0.10
if self._file_name:
self.savefig(self._file_name, dpi=self._dpi)
| |
# -*- coding: utf-8 -*-
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mox
from oslo.config import cfg
import routes
import webob
from sps.common import exception
from sps.common import rpc
from sps.common import wsgi
from sps.openstack.common import jsonutils
from sps.tests.unit import base
from sps.tests import utils as test_utils
CONF = cfg.CONF
class FakeResource(object):
"""
Fake resource defining some methods that
will be called later by the api.
"""
def get_demos(self, context, keyword=None):
return keyword
def count_demos(self, context, demos):
return len(demos)
def get_all_demos(self, context):
return False
def raise_value_error(self, context):
raise ValueError("Yep, Just like that!")
def raise_weird_error(self, context):
class WeirdError(Exception):
pass
raise WeirdError("Weirdness")
def create_api():
deserializer = rpc.RPCJSONDeserializer()
serializer = rpc.RPCJSONSerializer()
controller = rpc.Controller()
controller.register(FakeResource())
res = wsgi.Resource(controller, deserializer, serializer)
mapper = routes.Mapper()
mapper.connect("/rpc", controller=res,
conditions=dict(method=["POST"]),
action="__call__")
return test_utils.FakeAuthMiddleware(wsgi.Router(mapper), is_admin=True)
class TestRPCController(base.IsolatedUnitTest):
def setUp(self):
super(TestRPCController, self).setUp()
self.res = FakeResource()
self.controller = rpc.Controller()
self.controller.register(self.res)
# Mock
self.mocker = mox.Mox()
def test_register(self):
res = FakeResource()
controller = rpc.Controller()
controller.register(res)
self.assertIn("get_demos", controller._registered)
self.assertIn("get_all_demos", controller._registered)
def test_reigster_filtered(self):
res = FakeResource()
controller = rpc.Controller()
controller.register(res, filtered=["get_all_demos"])
self.assertIn("get_all_demos", controller._registered)
def test_reigster_excluded(self):
res = FakeResource()
controller = rpc.Controller()
controller.register(res, excluded=["get_all_demos"])
self.assertIn("get_demos", controller._registered)
def test_reigster_refiner(self):
res = FakeResource()
controller = rpc.Controller()
# Not callable
self.assertRaises(AssertionError,
controller.register,
res, refiner="get_all_demos")
# Filter returns False
controller.register(res, refiner=lambda x: False)
self.assertNotIn("get_demos", controller._registered)
self.assertNotIn("get_demos", controller._registered)
# Filter returns True
controller.register(res, refiner=lambda x: True)
self.assertIn("get_demos", controller._registered)
self.assertIn("get_demos", controller._registered)
def test_request(self):
api = create_api()
req = webob.Request.blank('/rpc')
req.method = 'POST'
req.body = jsonutils.dumps([
{
"command": "get_demos",
"kwargs": {"keyword": 1}
}
])
res = req.get_response(api)
returned = jsonutils.loads(res.body)
self.assertIsInstance(returned, list)
self.assertEqual(returned[0], 1)
def test_request_exc(self):
api = create_api()
req = webob.Request.blank('/rpc')
req.method = 'POST'
req.body = jsonutils.dumps([
{
"command": "get_all_demos",
"kwargs": {"keyword": 1}
}
])
# Sending non-accepted keyword
# to get_all_demos method
res = req.get_response(api)
returned = jsonutils.loads(res.body)
self.assertIn("_error", returned[0])
def test_rpc_errors(self):
api = create_api()
req = webob.Request.blank('/rpc')
req.method = 'POST'
req.content_type = 'application/json'
# Body is not a list, it should fail
req.body = jsonutils.dumps({})
res = req.get_response(api)
self.assertEqual(res.status_int, 400)
# cmd is not dict, it should fail.
req.body = jsonutils.dumps([None])
res = req.get_response(api)
self.assertEqual(res.status_int, 400)
# No command key, it should fail.
req.body = jsonutils.dumps([{}])
res = req.get_response(api)
self.assertEqual(res.status_int, 400)
# kwargs not dict, it should fail.
req.body = jsonutils.dumps([{"command": "test", "kwargs": 200}])
res = req.get_response(api)
self.assertEqual(res.status_int, 400)
# Command does not exist, it should fail.
req.body = jsonutils.dumps([{"command": "test"}])
res = req.get_response(api)
self.assertEqual(res.status_int, 404)
def test_rpc_exception_propagation(self):
api = create_api()
req = webob.Request.blank('/rpc')
req.method = 'POST'
req.content_type = 'application/json'
req.body = jsonutils.dumps([{"command": "raise_value_error"}])
res = req.get_response(api)
self.assertEqual(res.status_int, 200)
returned = jsonutils.loads(res.body)[0]
self.assertEqual(returned['_error']['cls'], 'exceptions.ValueError')
req.body = jsonutils.dumps([{"command": "raise_weird_error"}])
res = req.get_response(api)
self.assertEqual(res.status_int, 200)
returned = jsonutils.loads(res.body)[0]
self.assertEqual(returned['_error']['cls'],
'sps.common.exception.RPCError')
class TestRPCClient(base.IsolatedUnitTest):
def setUp(self):
super(TestRPCClient, self).setUp()
self.api = create_api()
self.client = rpc.RPCClient(host="http://127.0.0.1:9191")
self.client._do_request = self.fake_request
def fake_request(self, method, url, body, headers):
req = webob.Request.blank(url.path)
req.body = body
req.method = method
webob_res = req.get_response(self.api)
return test_utils.FakeHTTPResponse(status=webob_res.status_int,
headers=webob_res.headers,
data=webob_res.body)
def test_method_proxy(self):
proxy = self.client.some_method
self.assertIn("method_proxy", str(proxy))
def test_bulk_request(self):
commands = [{"command": "get_demos", 'kwargs': {'keyword': True}},
{"command": "get_all_demos"}]
res = self.client.bulk_request(commands)
self.assertEqual(len(res), 2)
self.assertTrue(res[0])
self.assertFalse(res[1])
def test_exception_raise(self):
try:
self.client.raise_value_error()
self.fail("Exception not raised")
except ValueError as exc:
self.assertEqual(str(exc), "Yep, Just like that!")
def test_rpc_exception(self):
try:
self.client.raise_weird_error()
self.fail("Exception not raised")
except exception.RPCError:
pass
def test_non_str_or_dict_response(self):
rst = self.client.count_demos(demos=[1, 2, 3, 4])
self.assertEqual(rst, 4)
self.assertIsInstance(rst, int)
class TestRPCJSONSerializer(test_utils.BaseTestCase):
def test_to_json(self):
fixture = {"key": "value"}
expected = '{"key": "value"}'
actual = rpc.RPCJSONSerializer().to_json(fixture)
self.assertEqual(actual, expected)
def test_to_json_with_date_format_value(self):
fixture = {"date": datetime.datetime(1900, 3, 8, 2)}
expected = ('{"date": {"_value": "1900-03-08T02:00:00.000000", '
'"_type": "datetime"}}')
actual = rpc.RPCJSONSerializer().to_json(fixture)
self.assertEqual(actual, expected)
def test_to_json_with_more_deep_format(self):
fixture = {"is_public": True, "name": [{"name1": "test"}]}
expected = '{"is_public": true, "name": [{"name1": "test"}]}'
actual = rpc.RPCJSONSerializer().to_json(fixture)
self.assertEqual(actual, expected)
def test_default(self):
fixture = {"key": "value"}
response = webob.Response()
rpc.RPCJSONSerializer().default(response, fixture)
self.assertEqual(response.status_int, 200)
content_types = filter(lambda h: h[0] == 'Content-Type',
response.headerlist)
self.assertEqual(len(content_types), 1)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.body, '{"key": "value"}')
class TestRPCJSONDeserializer(test_utils.BaseTestCase):
def test_has_body_no_content_length(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = 'asdf'
request.headers.pop('Content-Length')
self.assertFalse(rpc.RPCJSONDeserializer().has_body(request))
def test_has_body_zero_content_length(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = 'asdf'
request.headers['Content-Length'] = 0
self.assertFalse(rpc.RPCJSONDeserializer().has_body(request))
def test_has_body_has_content_length(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = 'asdf'
self.assertIn('Content-Length', request.headers)
self.assertTrue(rpc.RPCJSONDeserializer().has_body(request))
def test_no_body_no_content_length(self):
request = wsgi.Request.blank('/')
self.assertFalse(rpc.RPCJSONDeserializer().has_body(request))
def test_from_json(self):
fixture = '{"key": "value"}'
expected = {"key": "value"}
actual = rpc.RPCJSONDeserializer().from_json(fixture)
self.assertEqual(actual, expected)
def test_from_json_malformed(self):
fixture = 'kjasdklfjsklajf'
self.assertRaises(webob.exc.HTTPBadRequest,
rpc.RPCJSONDeserializer().from_json, fixture)
def test_default_no_body(self):
request = wsgi.Request.blank('/')
actual = rpc.RPCJSONDeserializer().default(request)
expected = {}
self.assertEqual(actual, expected)
def test_default_with_body(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = '{"key": "value"}'
actual = rpc.RPCJSONDeserializer().default(request)
expected = {"body": {"key": "value"}}
self.assertEqual(actual, expected)
def test_has_body_has_transfer_encoding(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = 'fake_body'
request.headers['transfer-encoding'] = 0
self.assertIn('transfer-encoding', request.headers)
self.assertTrue(rpc.RPCJSONDeserializer().has_body(request))
def test_to_json_with_date_format_value(self):
fixture = ('{"date": {"_value": "1900-03-08T02:00:00.000000",'
'"_type": "datetime"}}')
expected = {"date": datetime.datetime(1900, 3, 8, 2)}
actual = rpc.RPCJSONDeserializer().from_json(fixture)
self.assertEqual(actual, expected)
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
from pymatgen import Molecule
from pymatgen.io.gaussian import GaussianInput, GaussianOutput
from pymatgen.electronic_structure.core import Spin
"""
Created on Apr 17, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Apr 17, 2012"
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files', "molecules")
class GaussianInputTest(unittest.TestCase):
def setUp(self):
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
self.coords = coords
mol = Molecule(["C", "H", "H", "H", "H"], coords)
self.gau = GaussianInput(
mol, route_parameters={'SP': "", "SCF": "Tight"},
input_parameters={"EPS": 12})
def test_init(self):
mol = Molecule(["C", "H", "H", "H", "H"], self.coords)
gau = GaussianInput(mol, charge=1, route_parameters={'SP': "",
"SCF": "Tight"})
self.assertEqual(gau.spin_multiplicity, 2)
mol = Molecule(["C", "H", "H", "H", "H"], self.coords, charge=-1)
gau = GaussianInput(mol, route_parameters={'SP': "", "SCF": "Tight"})
self.assertEqual(gau.spin_multiplicity, 2)
self.assertRaises(ValueError, GaussianInput, mol, spin_multiplicity=1)
def test_str_and_from_string(self):
ans = """#P HF/6-31G(d) SCF=Tight SP
H4 C1
0 1
C
H 1 B1
H 1 B2 2 A2
H 1 B3 2 A3 3 D3
H 1 B4 2 A4 4 D4
B1=1.089000
B2=1.089000
A2=109.471221
B3=1.089000
A3=109.471213
D3=120.000017
B4=1.089000
A4=109.471213
D4=119.999966
EPS=12
"""
self.assertEqual(str(self.gau), ans)
gau = GaussianInput.from_string(ans)
self.assertEqual(gau.functional, 'HF')
self.assertEqual(gau.input_parameters['EPS'], '12')
def test_from_file(self):
filepath = os.path.join(test_dir, 'MethylPyrrolidine_drawn.gjf')
gau = GaussianInput.from_file(filepath)
self.assertEqual(gau.molecule.composition.formula, "H11 C5 N1")
self.assertIn("opt", gau.route_parameters)
self.assertEqual(gau.route_parameters["geom"], "connectivity")
self.assertEqual(gau.functional, "b3lyp")
self.assertEqual(gau.basis_set, "6-311+g(d,p)")
filepath = os.path.join(test_dir, "g305_hb.txt")
with open(filepath) as f:
txt = f.read()
toks = txt.split("--link1--")
for i, t in enumerate(toks):
lines = t.strip().split("\n")
lines = [l.strip() for l in lines]
gau = GaussianInput.from_string("\n".join(lines))
self.assertIsNotNone(gau.molecule)
if i == 0:
mol = gau.molecule
ans = """Full Formula (H4 O2)
Reduced Formula: H2O
Charge = 0, Spin Mult = 1
Sites (6)
0 O 0.000000 0.000000 0.000000
1 O 0.000000 0.000000 2.912902
2 H 0.892596 0.000000 -0.373266
3 H 0.143970 0.000219 0.964351
4 H -0.582554 0.765401 3.042783
5 H -0.580711 -0.766761 3.043012"""
self.assertEqual(str(mol), ans)
def test_from_string(self):
gau_str = """%mem=5000000
%chk=filename
# mp2/6-31g* scf=direct
SIH4+ H2---SIH2+ CS //MP2(full)/6-31G* MP2=-290.9225259
1,2
Si
X,1,1.
H,1,R1,2,HALF1
H,1,R1,2,HALF1,3,180.,0
X,1,1.,2,90.,3,90.,0
X,1,1.,5,THETA,2,180.,0
H,1,R3,6,HALF3,5,0.,0
H,1,R4,6,HALF3,7,180.,0
R1=1.47014
R3=1.890457
R4=1.83514
HALF1=60.633314
THETA=10.35464
HALF3=11.861807"""
gau = GaussianInput.from_string(gau_str)
self.assertEqual("X3SiH4", gau.molecule.composition.reduced_formula)
def test_gen_basis(self):
gau_str = """#N B3LYP/Gen Pseudo=Read
Test
0 1
C
H 1 B1
H 1 B2 2 A2
H 1 B3 2 A3 3 D3
H 1 B4 2 A4 4 D4
B1=1.089000
B2=1.089000
A2=109.471221
B3=1.089000
A3=109.471213
D3=120.000017
B4=1.089000
A4=109.471213
D4=119.999966
C 0
6-31G(d,p)
****
H 0
6-31G
****
"""
mol = Molecule(["C", "H", "H", "H", "H"], self.coords)
gen_basis = "C 0\n6-31G(d,p)\n****\nH 0\n6-31G\n****"
gau = GaussianInput(mol, functional="B3LYP", gen_basis=gen_basis,
dieze_tag="#N", route_parameters={"Pseudo": "Read"},
title="Test")
self.assertEqual(gau.to_string(cart_coords=False), gau_str)
def test_multiple_paramaters(self):
"""
This test makes sure that input files with multi-parameter keywords
and route cards with multiple lines can be parsed accurately.
"""
filepath = os.path.join(test_dir, "l-cysteine.inp")
route = {"test": None, "integral": {"grid": "UltraFine"},
"opt": {"Z-Matrix": None, "maxcycles": "80", "tight": None}}
gin = GaussianInput.from_file(filepath)
self.assertEqual(gin.dieze_tag, "#n")
self.assertEqual(gin.functional, "B3LYP")
self.assertEqual(gin.basis_set, "6-31+G**")
self.assertEqual(gin.route_parameters, route)
self.assertEqual(gin.title, "L-cysteine neutral")
self.assertEqual(gin.charge, 0)
self.assertEqual(gin.spin_multiplicity, 1)
class GaussianOutputTest(unittest.TestCase):
# todo: Add unittest for PCM type output.
def setUp(self):
self.gauout = GaussianOutput(os.path.join(test_dir, "methane.log"))
def test_resume(self):
resume = self.gauout.resumes[0]
methane_resume = r"""1\1\GINC-SHYUE-LAPTOP\FOpt\RHF\3-21G\C1H4\SHYUE\27-Feb-2008\0\\#p hf/3
-21G opt\\Title Card Required\\0,1\C,0.,0.,0.\H,0.,0.,1.0829014152\H,1
.0209692454,0.,-0.3609671384\H,-0.5104846227,-0.884185303,-0.360967138
4\H,-0.5104846227,0.884185303,-0.3609671384\\Version=IA32L-G03RevD.01\
State=1-A1\HF=-39.9768776\RMSD=3.210e-09\RMSF=5.014e-08\Thermal=0.\Dip
ole=0.,0.,0.\PG=TD [O(C1),4C3(H1)]\\@"""
methane_resume = "".join([r.strip() for r in methane_resume.split("\n")])
self.assertEqual(resume, methane_resume)
def test_props(self):
gau = self.gauout
self.assertEqual(len(gau.energies), 3)
self.assertAlmostEqual(gau.energies[-1], -39.9768775602)
self.assertEqual(len(gau.structures), 4)
for mol in gau.structures:
self.assertEqual(mol.formula, 'H4 C1')
self.assertIn("opt", gau.route_parameters)
self.assertEqual("Minimum", gau.stationary_type)
self.assertEqual("hf", gau.functional)
self.assertEqual("3-21G", gau.basis_set)
self.assertEqual(17, gau.num_basis_func)
d = gau.as_dict()
self.assertEqual(d["input"]["functional"], "hf")
self.assertAlmostEqual(d["output"]["final_energy"], -39.9768775602)
self.assertEqual(len(gau.cart_forces), 3)
self.assertEqual(gau.cart_forces[0][5], 0.009791094)
self.assertEqual(gau.cart_forces[0][-1], -0.003263698)
self.assertEqual(gau.cart_forces[2][-1], -0.000000032)
self.assertEqual(gau.eigenvalues[Spin.up][-1], 1.95586)
self.assertEqual(gau.num_basis_func, 17)
self.assertEqual(gau.is_spin, False)
ch2o_co2 = GaussianOutput(os.path.join(test_dir, "CH2O_CO2.log"))
self.assertEqual(len(ch2o_co2.frequencies), 2)
self.assertEqual(len(ch2o_co2.frequencies[0]), 6)
self.assertEqual(len(ch2o_co2.frequencies[1]), 4)
self.assertEqual(ch2o_co2.frequencies[0][0]["frequency"], 1203.1940)
self.assertEqual(ch2o_co2.frequencies[0][0]["symmetry"], "A\"")
self.assertEqual(ch2o_co2.frequencies[0][3]["IR_intensity"], 60.9575)
self.assertEqual(ch2o_co2.frequencies[0][3]["r_mass"], 3.7543)
self.assertEqual(ch2o_co2.frequencies[0][4]["f_constant"], 5.4175)
self.assertListEqual(ch2o_co2.frequencies[0][1]["mode"], [0.15, 0.00, 0.00,
-0.26, 0.65, 0.00,
-0.26, -0.65, 0.00,
-0.08, 0.00, 0.00])
self.assertListEqual(ch2o_co2.frequencies[1][3]["mode"], [0.00, 0.00, 0.88,
0.00, 0.00, -0.33,
0.00, 0.00, -0.33])
self.assertEqual(ch2o_co2.frequencies[1][3]["symmetry"], "SGU")
self.assertEqual(ch2o_co2.eigenvalues[Spin.up][3], -1.18394)
h2o = GaussianOutput(os.path.join(test_dir, "H2O_gau_vib.out"))
self.assertEqual(len(h2o.frequencies[0]), 3)
self.assertEqual(h2o.frequencies[0][0]["frequency"], 1662.8033)
self.assertEqual(h2o.frequencies[0][1]["symmetry"], "A'")
self.assertEqual(h2o.hessian[0, 0], 0.356872)
self.assertEqual(h2o.hessian.shape, (9, 9))
self.assertEqual(h2o.hessian[8, :].tolist(), [-0.143692e-01, 0.780136e-01,
-0.362637e-01, -0.176193e-01,
0.277304e-01, -0.583237e-02,
0.319885e-01, -0.105744e+00,
0.420960e-01])
def test_pop(self):
gau = GaussianOutput(os.path.join(test_dir, "H2O_gau.out"))
self.assertEqual(gau.num_basis_func, 13)
self.assertEqual(gau.electrons, (5, 5))
self.assertEqual(gau.is_spin, True)
self.assertListEqual(gau.eigenvalues[Spin.down], [-20.55343, -1.35264,
-0.72655, -0.54824,
-0.49831, 0.20705,
0.30297, 1.10569,
1.16144, 1.16717,
1.20460, 1.38903,
1.67608])
mo = gau.molecular_orbital
self.assertEqual(len(mo), 2) # la 6
self.assertEqual(len(mo[Spin.down]), 13)
self.assertEqual(len(mo[Spin.down][0]), 3)
self.assertEqual(mo[Spin.down][5][0]["1S"], -0.08771)
self.assertEqual(mo[Spin.down][5][0]["2PZ"], -0.21625)
self.assertListEqual(gau.eigenvectors[Spin.up][:, 5].tolist(), [-0.08771,
0.10840,
0.00000,
0.00000,
-0.21625,
1.21165,
0.00000,
0.00000,
-0.44481,
-0.06348,
-1.00532,
-0.06348,
-1.00532])
self.assertListEqual(gau.atom_basis_labels[0], ["1S", "2S", "2PX", "2PY",
"2PZ", "3S", "3PX", "3PY",
"3PZ"])
self.assertListEqual(gau.atom_basis_labels[2], ["1S", "2S"])
gau = GaussianOutput(os.path.join(test_dir, "H2O_gau_vib.out"))
self.assertEqual(gau.bond_orders[(0, 1)], 0.7582)
self.assertEqual(gau.bond_orders[(1, 2)], 0.0002)
def test_scan(self):
gau = GaussianOutput(os.path.join(test_dir, "so2_scan.log"))
d = gau.read_scan()
self.assertAlmostEqual(-548.02102, d["energies"][-1])
self.assertEqual(len(d["coords"]), 1)
self.assertEqual(len(d["energies"]), len(gau.energies))
self.assertEqual(len(d["energies"]), 21)
def test_td(self):
gau = GaussianOutput(os.path.join(test_dir, "so2_td.log"))
transitions = gau.read_excitation_energies()
self.assertEqual(len(transitions), 4)
self.assertAlmostEqual(transitions[0], (3.9281, 315.64, 0.0054))
def test_multiple_paramaters(self):
"""
This test makes sure that input files with multi-parameter keywords
and route cards with multiple lines can be parsed accurately.
"""
filepath = os.path.join(test_dir, "l-cysteine.out")
route = {"test": None, "integral": {"grid": "UltraFine"},
"opt": {"Z-Matrix": None, "maxcycles": "80", "tight": None}}
gout = GaussianOutput(filepath)
self.assertEqual(gout.dieze_tag, "#n")
self.assertEqual(gout.functional, "B3LYP")
self.assertEqual(gout.basis_set, "6-31+G**")
self.assertEqual(gout.route_parameters, route)
self.assertEqual(gout.title, "L-cysteine neutral")
self.assertEqual(gout.charge, 0)
self.assertEqual(gout.spin_multiplicity, 1)
if __name__ == "__main__":
unittest.main()
| |
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8776
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| |
"""'
Various transformations for flow cytometry data.
The forward transformations all refer to transforming the
raw data off the machine (i.e. a log transformation is the forword
and an exponential is its inverse).
References:
Bagwell. Cytometry Part A, 2005.
Parks, Roederer, and Moore. Cytometry Part A, 2006.
Trotter, Joseph. In Current Protocols in Cytometry. John Wiley & Sons, Inc., 2001.
TODO:
- Add scale parameters (r,d) to glog (if needed?)
- Implement logicle transformation.
- Add support for transforming a numpy array
"""
from __future__ import division
import warnings
from numpy import (log, log10, exp, where, sign, vectorize, min, max, linspace, logspace, r_, abs,
asarray)
from numpy.lib.shape_base import apply_along_axis
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.optimize import brentq
from FlowCytometryTools.core.utils import to_list, BaseObject
_machine_max = 2 ** 18
_l_mmax = log10(_machine_max)
_display_max = 10 ** 4
def linear(x, old_range, new_range):
"""
Rescale each channel to the new range as following:
new = data/old_range*new_range
Parameters
----------
data : DataFrame
data to be rescaled
new_range : float | array | Series
Maximal data value after rescaling
old_range : float | array | Series
Maximal data value before rescaling
(If old range is not given use the one specified in self.meta['_channels_']['$PnR'])
Deprecated!!!
"""
y = x / old_range * new_range
return y
rescale = linear
def tlog(x, th=1, r=_display_max, d=_l_mmax):
"""
Truncated log10 transform.
Parameters
----------
x : num | num iterable
values to be transformed.
th : num
values below th are transormed to 0.
Must be positive.
r : num (default = 10**4)
maximal transformed value.
d : num (default = log10(2**18))
log10 of maximal possible measured value.
tlog(10**d) = r
Returns
-------
Array of transformed values.
"""
if th <= 0:
raise ValueError('Threshold value must be positive. %s given.' % th)
return where(x <= th, log10(th) * 1. * r / d, log10(x) * 1. * r / d)
def tlog_inv(y, th=1, r=_display_max, d=_l_mmax):
"""
Inverse truncated log10 transform.
Values
Parameters
----------
y : num | num iterable
values to be transformed.
th : num
Inverse values below th are transormed to th.
Must be > positive.
r : num (default = 10**4)
maximal transformed value.
d : num (default = log10(2**18))
log10 of maximal possible measured value.
tlog_inv(r) = 10**d
Returns
-------
Array of transformed values.
"""
if th <= 0:
raise ValueError('Threshold value must be positive. %s given.' % th)
x = 10 ** (y * 1. * d / r)
try:
x[x < th] = th
except TypeError:
if x < th: x = th
return x
def glog(x, l):
"""
Natural base generalized-log transform.
"""
return log(x + (x ** 2 + l) ** 0.5)
def glog_inv(y, l):
ey = exp(y)
return (ey ** 2 - l) / (2 * ey)
def hlog_inv(y, b=500, r=_display_max, d=_l_mmax):
"""
Inverse of base 10 hyperlog transform.
"""
aux = 1. * d / r * y
s = sign(y)
if s.shape: # to catch case where input is a single number
s[s == 0] = 1
elif s == 0:
s = 1
return s * 10 ** (s * aux) + b * aux - s
def _x_for_spln(x, nx, log_spacing):
"""
Create vector of values to be used in constructing a spline.
Parameters
----------
x : num | num iterable
Resulted values will span the range [min(x), max(x)]
nx : int
Length of returned vector.
log_spacing: bool
False - Create linearly spaced values.
True - Create logarithmically spaced values.
To extend to negative values, the spacing is done separately on the
negative and positive range, and these are later combined.
The number of points in the negative/positive range is proportional
to their relative range in log space. i.e., for data in the range
[-100, 1000] 2/5 of the resulting points will be in the negative range.
Returns
-------
x_spln : array
"""
x = asarray(x)
xmin = min(x)
xmax = max(x)
if xmin == xmax:
return asarray([xmin] * nx)
if xmax <= 0: # all values<=0
return -_x_for_spln(-x, nx, log_spacing)[::-1]
if not log_spacing:
return linspace(xmin, xmax, nx)
# All code below is to handle-log-spacing when x has potentially both negative
# and positive values.
if xmin > 0:
return logspace(log10(xmin), log10(xmax), nx)
else:
lxmax = max([log10(xmax), 0])
lxmin = max([log10(abs(xmin)), 0])
# All the code below is for log-spacing, when xmin < 0 and xmax > 0
if lxmax == 0 and lxmin == 0:
return linspace(xmin, xmax, nx) # Use linear spacing as fallback
if xmin > 0:
x_spln = logspace(lxmin, lxmax, nx)
elif xmin == 0:
x_spln = r_[0, logspace(-1, lxmax, nx - 1)]
else: # (xmin < 0)
f = lxmin / (lxmin + lxmax)
nx_neg = int(f * nx)
nx_pos = nx - nx_neg
if nx <= 1:
# If triggered fix edge case behavior
raise AssertionError(u'nx should never bebe 0 or 1')
# Work-around various edge cases
if nx_neg == 0:
nx_neg = 1
nx_pos = nx_pos - 1
if nx_pos == 0:
nx_pos = 1
nx_neg = nx_neg - 1
x_spln_pos = logspace(-1, lxmax, nx_pos)
x_spln_neg = -logspace(lxmin, -1, nx_neg)
x_spln = r_[x_spln_neg, x_spln_pos]
return x_spln
def _make_hlog_numeric(b, r, d):
"""
Return a function that numerically computes the hlog transformation for given parameter values.
"""
hlog_obj = lambda y, x, b, r, d: hlog_inv(y, b, r, d) - x
find_inv = vectorize(lambda x: brentq(hlog_obj, -2 * r, 2 * r,
args=(x, b, r, d)))
return find_inv
def hlog(x, b=500, r=_display_max, d=_l_mmax):
"""
Base 10 hyperlog transform.
Parameters
----------
x : num | num iterable
values to be transformed.
b : num
Parameter controling the location of the shift
from linear to log transformation.
r : num (default = 10**4)
maximal transformed value.
d : num (default = log10(2**18))
log10 of maximal possible measured value.
hlog_inv(r) = 10**d
Returns
-------
Array of transformed values.
"""
hlog_fun = _make_hlog_numeric(b, r, d)
if not hasattr(x, '__len__'): # if transforming a single number
y = hlog_fun(x)
else:
n = len(x)
if not n: # if transforming empty container
return x
else:
y = hlog_fun(x)
return y
_canonical_names = {
'linear': 'linear',
'lin': 'linear',
'rescale': 'linear',
'hlog': 'hlog',
'hyperlog': 'hlog',
'glog': 'glog',
'tlog': 'tlog',
}
def _get_canonical_name(name):
try:
name = name.lower()
except AttributeError:
pass
return _canonical_names.get(name, None)
name_transforms = {
'linear': {'forward': linear, 'inverse': linear},
'hlog': {'forward': hlog, 'inverse': hlog_inv},
'glog': {'forward': glog, 'inverse': glog_inv},
'tlog': {'forward': tlog, 'inverse': tlog_inv},
}
def parse_transform(transform, direction='forward'):
"""
direction : 'forward' | 'inverse'
"""
if hasattr(transform, '__call__'):
tfun = transform
tname = None
elif hasattr(transform, 'lower'):
tname = _get_canonical_name(transform)
if tname is None:
raise ValueError('Unknown transform: %s' % transform)
else:
tfun = name_transforms[tname][direction]
else:
raise TypeError('Unsupported transform type: %s' % type(transform))
return tfun, tname
def transform_frame(frame, transform, columns=None, direction='forward',
return_all=True, args=(), **kwargs):
"""
Apply transform to specified columns.
direction: 'forward' | 'inverse'
return_all: bool
True - return all columns, with specified ones transformed.
False - return only specified columns.
.. warning:: deprecated
"""
tfun, tname = parse_transform(transform, direction)
columns = to_list(columns)
if columns is None:
columns = frame.columns
if return_all:
transformed = frame.copy()
for c in columns:
transformed[c] = tfun(frame[c], *args, **kwargs)
else:
transformed = frame.filter(columns).apply(tfun, *args, **kwargs)
return transformed
class Transformation(BaseObject):
"""
A transformation for flow cytometry data.
"""
def __init__(self, transform, direction='forward', name=None, spln=None, args=(), **kwargs):
"""
Parameters
----------
transform: callable | str
Callable that does a transformation (should accept a number or array),
or one of the supported named transformations.
Supported transformation are: {}.
direction: 'forward' | 'inverse'
Direction of the transformation.
"""
tfun, tname = parse_transform(transform, direction)
self.tfun = tfun
self.tname = tname
self.direction = direction
self.args = args
self.kwargs = kwargs
self.name = name
self.spln = spln
__init__.__doc__ = __init__.__doc__.format(', '.join(name_transforms.keys()))
def __repr__(self):
return repr(self.name)
def transform(self, x, use_spln=False, **kwargs):
"""
Apply transform to x
Parameters
----------
x : float-array-convertible
Data to be transformed.
Should support conversion to an array of floats.
use_spln: bool
True - transform using the spline specified in self.slpn.
If self.spln is None, set the spline.
False - transform using self.tfun
kwargs:
Keyword arguments to be passed to self.set_spline.
Only used if use_spln=True & self.spln=None.
Returns
-------
Array of transformed values.
"""
x = asarray(x, dtype=float)
if use_spln:
if self.spln is None:
self.set_spline(x.min(), x.max(), **kwargs)
return apply_along_axis(self.spln, 0, x)
else:
return self.tfun(x, *self.args, **self.kwargs)
__call__ = transform
@property
def inverse(self):
if self.tname is None:
warnings.warn('inverse is supported only for named transforms. Returning None.')
return None
else:
direction = 'forward' if self.direction == 'inverse' else 'inverse'
ifun = name_transforms[self.tname][direction]
tinv = self.copy()
tinv.tfun = ifun
tinv.direction = direction
return tinv
def set_spline(self, xmin, xmax, nx=1000, log_spacing=None, **kwargs):
if log_spacing is None:
if self.tname in ['hlog', 'tlog', 'glog']:
log_spacing = True
else:
log_spacing = False
x_spln = _x_for_spln([xmin, xmax], nx, log_spacing)
y_spln = self(x_spln)
spln = InterpolatedUnivariateSpline(x_spln, y_spln, **kwargs)
self.spln = spln
| |
""" This file contains view functions for Flask-User forms.
:copyright: (c) 2013 by Ling Thio
:author: Ling Thio (ling.thio@gmail.com)
:license: Simplified BSD License, see LICENSE.txt for more details."""
from datetime import datetime
from flask import current_app, flash, redirect, render_template, request, url_for
from flask_login import current_user, login_user, logout_user
try: # Handle Python 2.x and Python 3.x
from urllib.parse import quote # Python 3.x
except ImportError:
from urllib import quote # Python 2.x
from .decorators import confirm_email_required, login_required
from . import emails
from . import signals
from .translations import gettext as _
def _call_or_get(function_or_property):
return function_or_property() if callable(function_or_property) else function_or_property
def confirm_email(token):
""" Verify email confirmation token and activate the user account."""
# Verify token
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
is_valid, has_expired, object_id = user_manager.verify_token(
token,
user_manager.confirm_email_expiration)
if has_expired:
flash(_('Your confirmation token has expired.'), 'error')
return redirect(url_for('user.login'))
if not is_valid:
flash(_('Invalid confirmation token.'), 'error')
return redirect(url_for('user.login'))
# Confirm email by setting User.confirmed_at=utcnow() or UserEmail.confirmed_at=utcnow()
user = None
if db_adapter.UserEmailClass:
user_email = user_manager.get_user_email_by_id(object_id)
if user_email:
user_email.confirmed_at = datetime.utcnow()
user = user_email.user
else:
user_email = None
user = user_manager.get_user_by_id(object_id)
if user:
user.confirmed_at = datetime.utcnow()
if user:
user.set_active(True)
db_adapter.commit()
else: # pragma: no cover
flash(_('Invalid confirmation token.'), 'error')
return redirect(url_for('user.login'))
# Send email_confirmed signal
signals.user_confirmed_email.send(current_app._get_current_object(), user=user)
# Prepare one-time system message
flash(_('Your email has been confirmed.'), 'success')
# Auto-login after confirm or redirect to login page
next = request.args.get('next', _endpoint_url(user_manager.after_confirm_endpoint))
if user_manager.auto_login_after_confirm:
return _do_login_user(user, next) # auto-login
else:
return redirect(url_for('user.login')+'?next='+next) # redirect to login page
@login_required
@confirm_email_required
def change_password():
""" Prompt for old password and new password and change the user's password."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
# Initialize form
form = user_manager.change_password_form(request.form, user=current_user)
form.next.data = request.args.get('next', _endpoint_url(user_manager.after_change_password_endpoint)) # Place ?next query param in next form field
# Process valid POST
if request.method=='POST' and form.validate():
# Hash password
hashed_password = user_manager.hash_password(form.new_password.data)
# Change password
user_manager.update_password(current_user, hashed_password)
# Send 'password_changed' email
if user_manager.enable_email and user_manager.send_password_changed_email:
emails.send_password_changed_email(current_user)
# Send password_changed signal
signals.user_changed_password.send(current_app._get_current_object(), user=current_user)
# Prepare one-time system message
flash(_('Your password has been changed successfully.'), 'success')
# Redirect to 'next' URL
return redirect(form.next.data)
# Process GET or invalid POST
return render_template(user_manager.change_password_template, form=form)
@login_required
@confirm_email_required
def change_username():
""" Prompt for new username and old password and change the user's username."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
# Initialize form
form = user_manager.change_username_form(request.form)
form.next.data = request.args.get('next', _endpoint_url(user_manager.after_change_username_endpoint)) # Place ?next query param in next form field
# Process valid POST
if request.method=='POST' and form.validate():
new_username = form.new_username.data
# Change username
user_auth = current_user.user_auth if db_adapter.UserAuthClass and hasattr(current_user, 'user_auth') else current_user
db_adapter.update_object(user_auth, username=new_username)
db_adapter.commit()
# Send 'username_changed' email
if user_manager.enable_email and user_manager.send_username_changed_email:
emails.send_username_changed_email(current_user)
# Send username_changed signal
signals.user_changed_username.send(current_app._get_current_object(), user=current_user)
# Prepare one-time system message
flash(_("Your username has been changed to '%(username)s'.", username=new_username), 'success')
# Redirect to 'next' URL
return redirect(form.next.data)
# Process GET or invalid POST
return render_template(user_manager.change_username_template, form=form)
@login_required
@confirm_email_required
def email_action(id, action):
""" Perform action 'action' on UserEmail object 'id'
"""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
# Retrieve UserEmail by id
user_email = db_adapter.find_first_object(db_adapter.UserEmailClass, id=id)
# Users may only change their own UserEmails
if not user_email or user_email.user_id != int(current_user.get_id()):
return unauthorized()
if action=='delete':
# Primary UserEmail can not be deleted
if user_email.is_primary:
return unauthorized()
# Delete UserEmail
db_adapter.delete_object(user_email)
db_adapter.commit()
elif action=='make-primary':
# Disable previously primary emails
user_emails = db_adapter.find_all_objects(db_adapter.UserEmailClass, user_id=int(current_user.get_id()))
for ue in user_emails:
if ue.is_primary:
ue.is_primary = False
# Enable current primary email
user_email.is_primary = True
# Commit
db_adapter.commit()
elif action=='confirm':
_send_confirm_email(user_email.user, user_email)
else:
return unauthorized()
return redirect(url_for('user.manage_emails'))
def forgot_password():
"""Prompt for email and send reset password email."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
# Initialize form
form = user_manager.forgot_password_form(request.form)
# Process valid POST
if request.method=='POST' and form.validate():
email = form.email.data
user, user_email = user_manager.find_user_by_email(email)
if user:
user_manager.send_reset_password_email(email)
# Prepare one-time system message
flash(_("A reset password email has been sent to '%(email)s'. Open that email and follow the instructions to reset your password.", email=email), 'success')
# Redirect to the login page
return redirect(_endpoint_url(user_manager.after_forgot_password_endpoint))
# Process GET or invalid POST
return render_template(user_manager.forgot_password_template, form=form)
def login():
""" Prompt for username/email and password and sign the user in."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
next = request.args.get('next', _endpoint_url(user_manager.after_login_endpoint))
reg_next = request.args.get('reg_next', _endpoint_url(user_manager.after_register_endpoint))
# Immediately redirect already logged in users
if _call_or_get(current_user.is_authenticated) and user_manager.auto_login_at_login:
return redirect(next)
# Initialize form
login_form = user_manager.login_form(request.form) # for login.html
register_form = user_manager.register_form() # for login_or_register.html
if request.method!='POST':
login_form.next.data = register_form.next.data = next
login_form.reg_next.data = register_form.reg_next.data = reg_next
# Process valid POST
if request.method=='POST' and login_form.validate():
# Retrieve User
user = None
user_email = None
if user_manager.enable_username:
# Find user record by username
user = user_manager.find_user_by_username(login_form.username.data)
user_email = None
# Find primary user_email record
if user and db_adapter.UserEmailClass:
user_email = db_adapter.find_first_object(db_adapter.UserEmailClass,
user_id=int(user.get_id()),
is_primary=True,
)
# Find user record by email (with form.username)
if not user and user_manager.enable_email:
user, user_email = user_manager.find_user_by_email(login_form.username.data)
else:
# Find user by email (with form.email)
user, user_email = user_manager.find_user_by_email(login_form.email.data)
if user:
# Log user in
return _do_login_user(user, login_form.next.data, login_form.remember_me.data)
# Process GET or invalid POST
return render_template(user_manager.login_template,
form=login_form,
login_form=login_form,
register_form=register_form)
def logout():
""" Sign the user out."""
user_manager = current_app.user_manager
# Send user_logged_out signal
signals.user_logged_out.send(current_app._get_current_object(), user=current_user)
# Use Flask-Login to sign out user
logout_user()
# Prepare one-time system message
flash(_('You have signed out successfully.'), 'success')
# Redirect to logout_next endpoint or '/'
next = request.args.get('next', _endpoint_url(user_manager.after_logout_endpoint)) # Get 'next' query param
return redirect(next)
@login_required
@confirm_email_required
def manage_emails():
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
user_emails = db_adapter.find_all_objects(db_adapter.UserEmailClass, user_id=int(current_user.get_id()))
form = user_manager.add_email_form()
# Process valid POST request
if request.method=="POST" and form.validate():
user_emails = db_adapter.add_object(db_adapter.UserEmailClass,
user_id=int(current_user.get_id()),
email=form.email.data)
db_adapter.commit()
return redirect(url_for('user.manage_emails'))
# Process GET or invalid POST request
return render_template(user_manager.manage_emails_template,
user_emails=user_emails,
form=form,
)
def register():
""" Display registration form and create new User."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
next = request.args.get('next', _endpoint_url(user_manager.after_login_endpoint))
reg_next = request.args.get('reg_next', _endpoint_url(user_manager.after_register_endpoint))
# Initialize form
login_form = user_manager.login_form() # for login_or_register.html
register_form = user_manager.register_form(request.form) # for register.html
# invite token used to determine validity of registeree
invite_token = request.values.get("token")
# require invite without a token should disallow the user from registering
if user_manager.require_invitation and not invite_token:
flash("Registration is invite only", "error")
return redirect(url_for('user.login'))
user_invite = None
if invite_token and db_adapter.UserInvitationClass:
user_invite = db_adapter.find_first_object(db_adapter.UserInvitationClass, token=invite_token)
if user_invite:
register_form.invite_token.data = invite_token
else:
flash("Invalid invitation token", "error")
return redirect(url_for('user.login'))
if request.method!='POST':
login_form.next.data = register_form.next.data = next
login_form.reg_next.data = register_form.reg_next.data = reg_next
if user_invite:
register_form.email.data = user_invite.email
# Process valid POST
if request.method=='POST' and register_form.validate():
# Create a User object using Form fields that have a corresponding User field
User = db_adapter.UserClass
user_class_fields = User.__dict__
user_fields = {}
# Create a UserEmail object using Form fields that have a corresponding UserEmail field
if db_adapter.UserEmailClass:
UserEmail = db_adapter.UserEmailClass
user_email_class_fields = UserEmail.__dict__
user_email_fields = {}
# Create a UserAuth object using Form fields that have a corresponding UserAuth field
if db_adapter.UserAuthClass:
UserAuth = db_adapter.UserAuthClass
user_auth_class_fields = UserAuth.__dict__
user_auth_fields = {}
# Enable user account
if db_adapter.UserProfileClass:
if hasattr(db_adapter.UserProfileClass, 'active'):
user_auth_fields['active'] = True
elif hasattr(db_adapter.UserProfileClass, 'is_enabled'):
user_auth_fields['is_enabled'] = True
else:
user_auth_fields['is_active'] = True
else:
if hasattr(db_adapter.UserClass, 'active'):
user_fields['active'] = True
elif hasattr(db_adapter.UserClass, 'is_enabled'):
user_fields['is_enabled'] = True
else:
user_fields['is_active'] = True
# For all form fields
for field_name, field_value in register_form.data.items():
# Hash password field
if field_name=='password':
hashed_password = user_manager.hash_password(field_value)
if db_adapter.UserAuthClass:
user_auth_fields['password'] = hashed_password
else:
user_fields['password'] = hashed_password
# Store corresponding Form fields into the User object and/or UserProfile object
else:
if field_name in user_class_fields:
user_fields[field_name] = field_value
if db_adapter.UserEmailClass:
if field_name in user_email_class_fields:
user_email_fields[field_name] = field_value
if db_adapter.UserAuthClass:
if field_name in user_auth_class_fields:
user_auth_fields[field_name] = field_value
# Add User record using named arguments 'user_fields'
user = db_adapter.add_object(User, **user_fields)
if db_adapter.UserProfileClass:
user_profile = user
# Add UserEmail record using named arguments 'user_email_fields'
if db_adapter.UserEmailClass:
user_email = db_adapter.add_object(UserEmail,
user=user,
is_primary=True,
**user_email_fields)
else:
user_email = None
# Add UserAuth record using named arguments 'user_auth_fields'
if db_adapter.UserAuthClass:
user_auth = db_adapter.add_object(UserAuth, **user_auth_fields)
if db_adapter.UserProfileClass:
user = user_auth
else:
user.user_auth = user_auth
require_email_confirmation = True
if user_invite:
if user_invite.email == register_form.email.data:
require_email_confirmation = False
db_adapter.update_object(user, confirmed_at=datetime.utcnow())
db_adapter.commit()
# Send 'registered' email and delete new User object if send fails
if user_manager.send_registered_email:
try:
# Send 'registered' email
_send_registered_email(user, user_email, require_email_confirmation)
except Exception as e:
# delete new User object if send fails
db_adapter.delete_object(user)
db_adapter.commit()
raise
# Send user_registered signal
signals.user_registered.send(current_app._get_current_object(),
user=user,
user_invite=user_invite)
# Redirect if USER_ENABLE_CONFIRM_EMAIL is set
if user_manager.enable_confirm_email and require_email_confirmation:
next = request.args.get('next', _endpoint_url(user_manager.after_register_endpoint))
return redirect(next)
# Auto-login after register or redirect to login page
next = request.args.get('next', _endpoint_url(user_manager.after_confirm_endpoint))
if user_manager.auto_login_after_register:
return _do_login_user(user, reg_next) # auto-login
else:
return redirect(url_for('user.login')+'?next='+reg_next) # redirect to login page
# Process GET or invalid POST
return render_template(user_manager.register_template,
form=register_form,
login_form=login_form,
register_form=register_form)
@login_required
def invite():
""" Allows users to send invitations to register an account """
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
next = request.args.get('next',
_endpoint_url(user_manager.after_invite_endpoint))
invite_form = user_manager.invite_form(request.form)
if request.method=='POST' and invite_form.validate():
email = invite_form.email.data
User = db_adapter.UserClass
user_class_fields = User.__dict__
user_fields = {
"email": email
}
user, user_email = user_manager.find_user_by_email(email)
if user:
flash("User with that email has already registered", "error")
return redirect(url_for('user.invite'))
else:
user_invite = db_adapter \
.add_object(db_adapter.UserInvitationClass, **{
"email": email,
"invited_by_user_id": current_user.id
})
db_adapter.commit()
token = user_manager.generate_token(user_invite.id)
accept_invite_link = url_for('user.register',
token=token,
_external=True)
# Store token
if hasattr(db_adapter.UserInvitationClass, 'token'):
user_invite.token = token
db_adapter.commit()
try:
# Send 'invite' email
emails.send_invite_email(user_invite, accept_invite_link)
except Exception as e:
# delete new User object if send fails
db_adapter.delete_object(user_invite)
db_adapter.commit()
raise
signals \
.user_sent_invitation \
.send(current_app._get_current_object(), user_invite=user_invite,
form=invite_form)
flash(_('Invitation has been sent.'), 'success')
return redirect(next)
return render_template(user_manager.invite_template, form=invite_form)
def resend_confirm_email():
"""Prompt for email and re-send email conformation email."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
# Initialize form
form = user_manager.resend_confirm_email_form(request.form)
# Process valid POST
if request.method=='POST' and form.validate():
email = form.email.data
# Find user by email
user, user_email = user_manager.find_user_by_email(email)
if user:
_send_confirm_email(user, user_email)
# Redirect to the login page
return redirect(_endpoint_url(user_manager.after_resend_confirm_email_endpoint))
# Process GET or invalid POST
return render_template(user_manager.resend_confirm_email_template, form=form)
def reset_password(token):
""" Verify the password reset token, Prompt for new password, and set the user's password."""
# Verify token
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
if _call_or_get(current_user.is_authenticated):
logout_user()
is_valid, has_expired, user_id = user_manager.verify_token(
token,
user_manager.reset_password_expiration)
if has_expired:
flash(_('Your reset password token has expired.'), 'error')
return redirect(url_for('user.login'))
if not is_valid:
flash(_('Your reset password token is invalid.'), 'error')
return redirect(url_for('user.login'))
user = user_manager.get_user_by_id(user_id)
if user:
# Avoid re-using old tokens
if hasattr(user, 'reset_password_token'):
verified = user.reset_password_token == token
else:
verified = True
if not user or not verified:
flash(_('Your reset password token is invalid.'), 'error')
return redirect(_endpoint_url(user_manager.login_endpoint))
# Mark email as confirmed
user_email = emails.get_primary_user_email(user)
user_email.confirmed_at = datetime.utcnow()
# Initialize form
form = user_manager.reset_password_form(request.form, user=user)
# Process valid POST
if request.method=='POST' and form.validate():
# Invalidate the token by clearing the stored token
if hasattr(user, 'reset_password_token'):
db_adapter.update_object(user, reset_password_token='')
# Change password
hashed_password = user_manager.hash_password(form.new_password.data)
user_manager.update_password(user, hashed_password)
# Send 'password_changed' email
if user_manager.enable_email and user_manager.send_password_changed_email:
emails.send_password_changed_email(user)
# Prepare one-time system message
flash(_("Your password has been reset successfully."), 'success')
# Auto-login after reset password or redirect to login page
next = request.args.get('next', _endpoint_url(user_manager.after_reset_password_endpoint))
if user_manager.auto_login_after_reset_password:
return _do_login_user(user, next) # auto-login
else:
return redirect(url_for('user.login')+'?next='+next) # redirect to login page
# Process GET or invalid POST
return render_template(user_manager.reset_password_template, form=form)
def unconfirmed():
""" Prepare a Flash message and redirect to USER_UNCONFIRMED_ENDPOINT"""
# Prepare Flash message
url = request.script_root + request.path
flash(_("You must confirm your email to access '%(url)s'.", url=url), 'error')
# Redirect to USER_UNCONFIRMED_EMAIL_ENDPOINT
user_manager = current_app.user_manager
return redirect(_endpoint_url(user_manager.unconfirmed_email_endpoint))
def unauthenticated():
""" Prepare a Flash message and redirect to USER_UNAUTHENTICATED_ENDPOINT"""
# Prepare Flash message
url = request.url
flash(_("You must be signed in to access '%(url)s'.", url=url), 'error')
# quote the fully qualified url
quoted_url = quote(url)
# Redirect to USER_UNAUTHENTICATED_ENDPOINT
user_manager = current_app.user_manager
return redirect(_endpoint_url(user_manager.unauthenticated_endpoint)+'?next='+ quoted_url)
def unauthorized():
""" Prepare a Flash message and redirect to USER_UNAUTHORIZED_ENDPOINT"""
# Prepare Flash message
url = request.script_root + request.path
flash(_("You do not have permission to access '%(url)s'.", url=url), 'error')
# Redirect to USER_UNAUTHORIZED_ENDPOINT
user_manager = current_app.user_manager
return redirect(_endpoint_url(user_manager.unauthorized_endpoint))
@login_required
@confirm_email_required
def user_profile():
user_manager = current_app.user_manager
return render_template(user_manager.user_profile_template)
def _send_registered_email(user, user_email, require_email_confirmation=True):
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
# Send 'confirm_email' or 'registered' email
if user_manager.enable_email and user_manager.enable_confirm_email:
# Generate confirm email link
object_id = user_email.id if user_email else int(user.get_id())
token = user_manager.generate_token(object_id)
confirm_email_link = url_for('user.confirm_email', token=token, _external=True)
# Send email
emails.send_registered_email(user, user_email, confirm_email_link)
# Prepare one-time system message
if user_manager.enable_confirm_email and require_email_confirmation:
email = user_email.email if user_email else user.email
flash(_('A confirmation email has been sent to %(email)s with instructions to complete your registration.', email=email), 'success')
else:
flash(_('You have registered successfully.'), 'success')
def _send_confirm_email(user, user_email):
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
# Send 'confirm_email' or 'registered' email
if user_manager.enable_email and user_manager.enable_confirm_email:
# Generate confirm email link
object_id = user_email.id if user_email else int(user.get_id())
token = user_manager.generate_token(object_id)
confirm_email_link = url_for('user.confirm_email', token=token, _external=True)
# Send email
emails.send_confirm_email_email(user, user_email, confirm_email_link)
# Prepare one-time system message
email = user_email.email if user_email else user.email
flash(_('A confirmation email has been sent to %(email)s with instructions to complete your registration.', email=email), 'success')
def _do_login_user(user, next, remember_me=False):
# User must have been authenticated
if not user: return unauthenticated()
# Check if user account has been disabled
if not _call_or_get(user.is_active):
flash(_('Your account has not been enabled.'), 'error')
return redirect(url_for('user.login'))
# Check if user has a confirmed email address
user_manager = current_app.user_manager
if user_manager.enable_email and user_manager.enable_confirm_email \
and not current_app.user_manager.enable_login_without_confirm_email \
and not user.has_confirmed_email():
url = url_for('user.resend_confirm_email')
flash(_('Your email address has not yet been confirmed. Check your email Inbox and Spam folders for the confirmation email or <a href="%(url)s">Re-send confirmation email</a>.', url=url), 'error')
return redirect(url_for('user.login'))
# Use Flask-Login to sign in user
#print('login_user: remember_me=', remember_me)
login_user(user, remember=remember_me)
# Send user_logged_in signal
signals.user_logged_in.send(current_app._get_current_object(), user=user)
# Prepare one-time system message
flash(_('You have signed in successfully.'), 'success')
# Redirect to 'next' URL
return redirect(next)
def _endpoint_url(endpoint):
url = '/'
if endpoint:
url = url_for(endpoint)
return url
| |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base import serialize
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class SyncListItemTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_list_items(1).fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://preview.twilio.com/Sync/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists/ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Items/1',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_by": "created_by",
"data": {},
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"index": 100,
"list_sid": "ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"revision": "revision",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items/100"
}
'''
))
actual = self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_list_items(1).fetch()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_list_items(1).delete(if_match="if_match")
headers = {'If-Match': "if_match", }
self.holodeck.assert_has_request(Request(
'delete',
'https://preview.twilio.com/Sync/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists/ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Items/1',
headers=headers,
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_list_items(1).delete()
self.assertTrue(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_list_items.create(data={})
values = {'Data': serialize.object({}), }
self.holodeck.assert_has_request(Request(
'post',
'https://preview.twilio.com/Sync/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists/ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Items',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_by": "created_by",
"data": {},
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"index": 100,
"list_sid": "ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"revision": "revision",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items/100"
}
'''
))
actual = self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_list_items.create(data={})
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_list_items.list()
self.holodeck.assert_has_request(Request(
'get',
'https://preview.twilio.com/Sync/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists/ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Items',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"items": [],
"meta": {
"first_page_url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items?From=from&Bounds=inclusive&Order=asc&PageSize=50&Page=0",
"key": "items",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items?From=from&Bounds=inclusive&Order=asc&PageSize=50&Page=0"
}
}
'''
))
actual = self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_list_items.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"items": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_by": "created_by",
"data": {},
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"index": 100,
"list_sid": "ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"revision": "revision",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items/100"
}
],
"meta": {
"first_page_url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items?From=from&Bounds=inclusive&Order=asc&PageSize=50&Page=0",
"key": "items",
"next_page_url": null,
"page": 0,
"page_size": 50,
"previous_page_url": null,
"url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items?From=from&Bounds=inclusive&Order=asc&PageSize=50&Page=0"
}
}
'''
))
actual = self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_list_items.list()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_list_items(1).update(data={}, if_match="if_match")
values = {'Data': serialize.object({}), }
headers = {'If-Match': "if_match", }
self.holodeck.assert_has_request(Request(
'post',
'https://preview.twilio.com/Sync/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists/ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Items/1',
headers=headers,
))
self.holodeck.assert_has_request(Request(
'post',
'https://preview.twilio.com/Sync/Services/ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Lists/ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Items/1',
data=values,
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_by": "created_by",
"data": {},
"date_created": "2015-07-30T20:00:00Z",
"date_updated": "2015-07-30T20:00:00Z",
"index": 100,
"list_sid": "ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"revision": "revision",
"service_sid": "ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"url": "https://preview.twilio.com/Sync/Services/ISaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Lists/ESaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Items/100"
}
'''
))
actual = self.client.preview.sync.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_lists("ESXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_list_items(1).update(data={})
self.assertIsNotNone(actual)
| |
import sys
try:
from collections.abc import Mapping, MutableMapping
except ImportError:
from collections import Mapping, MutableMapping
PY3 = sys.version_info >= (3, 0)
class HTTPHeaderDict(MutableMapping):
"""
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 7230. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
Usage::
headers = HTTPHeaderDict()
headers.add('Set-Cookie', 'foo=bar')
headers.add('set-cookie', 'baz=quxx')
headers['content-length'] = '7'
headers['SET-cookie']
> 'foo=bar, baz=quxx'
headers['Content-Length']
> '7'
"""
def __init__(self, headers=None, **kwargs):
super(HTTPHeaderDict, self).__init__()
self._container = {}
if headers is not None:
if isinstance(headers, HTTPHeaderDict):
self._copy_from(headers)
else:
self.extend(headers)
if kwargs:
self.extend(kwargs)
def __setitem__(self, key, val):
self._container[key.lower()] = (key, val)
return self._container[key.lower()]
def __getitem__(self, key):
val = self._container[key.lower()]
return ', '.join(val[1:])
def __delitem__(self, key):
del self._container[key.lower()]
def __contains__(self, key):
return key.lower() in self._container
def __eq__(self, other):
if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
return False
if not isinstance(other, type(self)):
other = type(self)(other)
return (dict((k.lower(), v) for k, v in self.itermerged()) ==
dict((k.lower(), v) for k, v in other.itermerged()))
def __ne__(self, other):
return not self.__eq__(other)
if not PY3: # Python 2
iterkeys = MutableMapping.iterkeys
itervalues = MutableMapping.itervalues
__marker = object()
def __len__(self):
return len(self._container)
def __iter__(self):
# Only provide the originally cased names
for vals in self._container.values():
yield vals[0]
def pop(self, key, default=__marker):
"""
D.pop(k[,d]) -> v, remove specified key and return the
corresponding value.
If key is not found, d is returned if given, otherwise KeyError
is raised.
"""
# Using the MutableMapping function directly fails due to the
# private marker. Using ordinary dict.pop would expose the
# internal structures. So let's reinvent the wheel.
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def discard(self, key):
try:
del self[key]
except KeyError:
pass
def add(self, key, val):
"""
Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
Usage::
headers = HTTPHeaderDict(foo='bar')
headers.add('Foo', 'baz')
headers['foo']
> 'bar, baz'
"""
key_lower = key.lower()
new_vals = key, val
# Keep the common case aka no item present as fast as possible
vals = self._container.setdefault(key_lower, new_vals)
if new_vals is not vals:
# new_vals was not inserted, as there was a previous one
if isinstance(vals, list):
# If already several items got inserted, we have a list
vals.append(val)
else:
# vals should be a tuple then, i.e. only one item so far
# Need to convert the tuple to list for further extension
self._container[key_lower] = [vals[0], vals[1], val]
def set(self, key, val):
"""
Sets a header field with the given value, removing
previous values.
Usage::
headers = HTTPHeaderDict(foo='bar')
headers.set('Foo', 'baz')
headers['foo']
> 'baz'
"""
key_lower = key.lower()
new_vals = key, val
# Keep the common case aka no item present as fast as possible
vals = self._container.setdefault(key_lower, new_vals)
if new_vals is not vals:
self._container[key_lower] = [vals[0], vals[1], val]
def extend(self, *args, **kwargs):
"""
Generic import function for any type of header-like object.
Adapted version of MutableMapping.update in order to insert items
with self.add instead of self.__setitem__
"""
if len(args) > 1:
raise TypeError("extend() takes at most 1 positional "
"arguments ({0} given)".format(len(args)))
other = args[0] if len(args) >= 1 else ()
if isinstance(other, HTTPHeaderDict):
for key, val in other.iteritems():
self.add(key, val)
elif isinstance(other, Mapping):
for key in other:
self.add(key, other[key])
elif hasattr(other, "keys"):
for key in other.keys():
self.add(key, other[key])
else:
for key, value in other:
self.add(key, value)
for key, value in kwargs.items():
self.add(key, value)
def getlist(self, key):
"""
Returns a list of all the values for the named field.
Returns an empty list if the key doesn't exist.
"""
try:
vals = self._container[key.lower()]
except KeyError:
return []
else:
if isinstance(vals, tuple):
return [vals[1]]
else:
return vals[1:]
# Backwards compatibility for httplib
getheaders = getlist
getallmatchingheaders = getlist
iget = getlist
def __repr__(self):
return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
def _copy_from(self, other):
for key in other:
val = other.getlist(key)
if isinstance(val, list):
# Don't need to convert tuples
val = list(val)
self._container[key.lower()] = [key] + val
def copy(self):
clone = type(self)()
clone._copy_from(self)
return clone
def iteritems(self):
"""
Iterate over all header lines, including duplicate ones.
"""
for key in self:
vals = self._container[key.lower()]
for val in vals[1:]:
yield vals[0], val
def itermerged(self):
"""
Iterate over all headers, merging duplicate ones together.
"""
for key in self:
val = self._container[key.lower()]
yield val[0], ', '.join(val[1:])
def items(self):
return list(self.iteritems())
def to_dict(self):
return {key: values for key, values in self.items()}
@classmethod
def from_httplib(cls, message): # Python 2
"""
Read headers from a Python 2 httplib message object.
"""
# python2.7 does not expose a proper API for exporting multiheaders
# efficiently. This function re-reads raw lines from the message
# object and extracts the multiheaders properly.
headers = []
for line in message.headers:
if line.startswith((' ', '\t')):
key, value = headers[-1]
headers[-1] = (key, value + '\r\n' + line.rstrip())
continue
key, value = line.split(':', 1)
headers.append((key, value.strip()))
return cls(headers)
| |
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .common import BaseTest
from c7n.executor import MainThreadExecutor
from c7n.resources.appelb import AppELB, AppELBTargetGroup
class AppELBTest(BaseTest):
def test_appelb_simple(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_simple')
p = self.load_policy({
'name': 'appelb-simple',
'resource': 'app-elb'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_appelb_simple_filter(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_simple')
p = self.load_policy({
'name': 'appelb-simple-filter',
'resource': 'app-elb',
'filters': [
{'type': 'value',
'key': 'LoadBalancerName',
'value': 'alb-1'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_appelb_default_vpc_filter(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_default_vpc')
p = self.load_policy({
'name': 'appelb-default-vpc',
'resource': 'app-elb',
'filters': [{'type': 'default-vpc'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_appelb_tags_filter(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_simple')
p = self.load_policy({
'name': 'appelb-tags-filter',
'resource': 'app-elb',
'filters': [{"tag:KEY1": "VALUE1"}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
p = self.load_policy({
'name': 'appelb-tags-filter',
'resource': 'app-elb',
'filters': [{"tag:KEY1": "VALUE2"}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 0)
def test_appelb_is_https_filter(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_is_https')
p = self.load_policy({
'name': 'appelb-is-https-filter',
'resource': 'app-elb',
'filters': [
{'type': 'listener', 'key': "length([?Protocol=='HTTPS'])", 'value': 1, 'op': 'gte'}
]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 0)
def test_appelb_target_group_filter(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_instance_count_non_zero')
p = self.load_policy({
'name': 'appelb-target-group-filter',
'resource': 'app-elb',
'filters': [
{'type': 'target-group', 'key': "length([?Protocol=='HTTP'])", 'value': 1, 'op': 'eq'}
]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_appelb_instance_count_filter_zero(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_instance_count_zero')
p = self.load_policy({
'name': 'appelb-instance-count-filter-zero',
'resource': 'app-elb',
'filters': [
{'type': 'target-group', 'key': "max([].length(TargetHealthDescriptions))", 'value': 0, 'op': 'eq'}
]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_appelb_instance_count_filter_non_zero(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_instance_count_non_zero')
p = self.load_policy({
'name': 'appelb-instance-count-filter-non-zero',
'resource': 'app-elb',
'filters': [
{'type': 'target-group', 'key': "max([].length(TargetHealthDescriptions))", 'value': 0, 'op': 'gt'}
]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_appelb_add_tag(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_add_tag')
p = self.load_policy({
'name': 'appelb-add-tag',
'resource': 'app-elb',
'filters': [
{'type': 'value',
'key': 'LoadBalancerName',
'value': 'alb-1'}],
'actions': [
{'type': 'tag', 'key': 'KEY42', 'value': 'VALUE99'}]
},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_appelb_remove_tag(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_remove_tag')
p = self.load_policy({
'name': 'appelb-remove-tag',
'resource': 'app-elb',
'filters': [
{'type': 'value',
'key': 'LoadBalancerName',
'value': 'alb-1'}],
'actions': [
{'type': 'remove-tag', 'tags': ['KEY42']}]
},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_appelb_mark_for_delete(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_mark_for_delete')
p = self.load_policy({
'name': 'appelb-mark-for-delete',
'resource': 'app-elb',
'filters': [
{'type': 'value',
'key': 'LoadBalancerName',
'value': 'alb-1'}],
'actions': [
{'type': 'mark-for-op', 'op': 'delete',
'tag': 'custodian_next', 'days': 1}]
},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_appelb_delete(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_delete')
p = self.load_policy({
'name': 'appelb-delete',
'resource': 'app-elb',
'filters': [
{'type': 'value',
'key': 'LoadBalancerName',
'value': 'alb-2'}],
'actions': [
{'type': 'delete'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
class AppELBHealthcheckProtocolMismatchTest(BaseTest):
def test_appelb_healthcheck_protocol_mismatch_filter_good(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_healthcheck_protocol_mismatch_good')
p = self.load_policy({
'name': 'appelb-healthcheck-protocol-mismatch-good',
'resource': 'app-elb',
'filters': ['healthcheck-protocol-mismatch']},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 0)
def test_appelb_healthcheck_protocol_mismatch_filter_bad(self):
self.patch(AppELB, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_healthcheck_protocol_mismatch_bad')
p = self.load_policy({
'name': 'appelb-healthcheck-protocol-mismatch-bad',
'resource': 'app-elb',
'filters': ['healthcheck-protocol-mismatch']},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 2)
class AppELBTargetGroupTest(BaseTest):
def test_appelb_target_group_simple(self):
self.patch(AppELBTargetGroup, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_target_group_simple')
p = self.load_policy({
'name': 'appelb-target-group-simple',
'resource': 'app-elb-target-group'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_appelb_target_group_simple_filter(self):
self.patch(AppELBTargetGroup, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_target_group_simple')
p = self.load_policy({
'name': 'appelb-target-group-simple-filter',
'resource': 'app-elb-target-group',
'filters': [
{'type': 'value',
'key': 'Port',
'value': 443}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_appelb_target_group_default_vpc(self):
self.patch(AppELBTargetGroup, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_appelb_target_group_default_vpc')
p = self.load_policy({
'name': 'appelb-target-group-default-vpc',
'resource': 'app-elb-target-group',
'filters': [{'type': 'default-vpc'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
| |
#!/usr/bin/python2.7
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Template objects which represent data types.
This module contains objects which usable in templates and represent data type
idioms.
"""
__author__ = 'aiuto@google.com (Tony Aiuto)'
from googleapis.codegen import template_objects
class DataType(template_objects.CodeObject):
"""Template object which represents a data type.
This is the base class for things which might be data type definitions, such
as Schema objects derived from JSONSchema blocks or primitive types.
"""
def __init__(self, def_dict, api, parent=None, language_model=None):
"""Construct a DataType.
Args:
def_dict: (dict) The discovery dictionary for this element.
api: (Api) The Api instance which owns this element.
parent: (CodeObject) The parent of this element.
language_model: (LanguageModel) The language we are targetting.
Dynamically defaults to the parent's language model.
"""
super(DataType, self).__init__(def_dict, api, parent=parent,
language_model=language_model)
self._json_type = def_dict.get('type')
# If the schema has an id, that is a good default class name for it.
schema_id = def_dict.get('id')
if schema_id:
self.SetTemplateValue('className', schema_id)
# Top level primitive and container classes end up as known schema objects,
# because some languages will want to generate definitions for them. Thus
# they need a module. ComplexDataType instances might set a different
# module in their constructor. The check for the api is to facilitiate
# unit tests, which sometimes create types without any API.
# TODO(user): Do not set _module here. Let it be dyanmically found from
# the parent when needed. But, there is currently a problem with
# BuildSchemaDefinitions where the top level schemas do not have a parent
# because we are calling 'self.DataTypeFromJson(def_dict, name)' without
# the parent parameter. If we add parent there, all hell seems to break.
# Fix that and remove this.
if self.api and hasattr(self.api, 'model_module'):
self._module = self.api.model_module
# Set some specific annotations as template values.
annotations = def_dict.get('annotations')
if annotations:
required = annotations.get('required')
self.SetTemplateValue('required_for_methods', required)
@property
def json_type(self):
"""Expose the type element from the JSON Schema type definition."""
return self._json_type
@property
def code_type(self):
"""Returns the string representing this datatype."""
return self.values.get('codeType') or self.values.get('className')
@property
def safe_code_type(self):
"""Returns the safe code type representing this datatype."""
return self.safeClassName or self.code_type
@property
def primitive_data_type(self):
"""Returns the language specific primitive representing this datatype."""
return None
@property
def class_name(self):
return self.GetTemplateValue('className')
@property
def safeClassName(self): # pylint: disable=g-bad-name
"""Returns a language appropriate name for this object.
This property should only be used during template expansion. It is computed
once, using the LanguageModel in play, and then that value is cached.
Returns:
(str) a name for an instance of this object.
"""
safe_class_name = self.GetTemplateValue('safe_class_name')
if not safe_class_name:
safe_class_name = self.values.get('wireName')
if not safe_class_name:
return None
if self.language_model:
safe_class_name = self.language_model.ToSafeClassName(
safe_class_name, self._api, self._parent)
self.SetTemplateValue('safeClassName', safe_class_name)
return safe_class_name
class PrimitiveDataType(DataType):
"""DataType which represents a "built in" data type.
Primitive types are those which are provided by the language or one of its
packages, rather than those defined by the API. A language specific
generater should annotate PrimitiveDataType objects with a specific codeType
before using them to generate code.
"""
def __init__(self, def_dict, api, parent=None):
"""Construct a PrimitiveDataType.
Args:
def_dict: (dict) The discovery dictionary for this element.
api: (Api) The Api instance which owns this element.
parent: (TemplateObject) The parent of this object.
"""
super(PrimitiveDataType, self).__init__(def_dict, api, parent=parent)
self.SetTemplateValue('builtIn', True)
self.SetTemplateValue('isScalar', True)
@property
def class_name(self):
return self.code_type
@property
def fullClassName(self): # pylint: disable=g-bad-name
"""Override the TemplateObject path chaining."""
return self.code_type
@property
def code_type(self):
"""Returns the language specific type representing this datatype."""
user_override = self.values.get('codeType')
if user_override:
return user_override
if self.language_model:
s = self.language_model.GetCodeTypeFromDictionary(self._def_dict)
return s
return self.values.get('type')
@property
def safe_code_type(self):
"""Returns the safe code type representing this datatype."""
return self.code_type
@property
def primitive_data_type(self):
"""Returns the language specific type representing this datatype."""
if self.language_model:
s = self.language_model.GetPrimitiveTypeFromDictionary(self._def_dict)
return s
return None
@property
def json_format(self):
"""Expose the format element from the JSON Schema type definition."""
return self.values.get('format')
class ComplexDataType(DataType):
"""A DataType which requires a definition: that is, not primitive.
ComplexDataTypes are structured objects and containers of objects.
"""
def __init__(self, default_name, def_dict, api, parent=None,
language_model=None, wire_name=None):
"""Construct an ComplexDataType.
Args:
default_name: (str) The name to give this type if there is no 'id' in
the default dict.
def_dict: (dict) The discovery dictionary for this element.
api: (Api) The Api instance which owns this element.
parent: (CodeObject) The parent of this element.
language_model: (LanguageModel) The language we are targetting.
Dynamically defaults to the parent's language model.
wire_name: (str) The identifier used in the wire protocol for this object.
Raises:
ValueError: if there is no identifing name for this object.
"""
super(ComplexDataType, self).__init__(def_dict, api, parent=parent,
language_model=language_model)
name = def_dict.get('id') or default_name or wire_name
if not name:
raise ValueError(
'Complex data types must have an id or be assigned a name: %s' %
def_dict)
self.SetTemplateValue('wireName', wire_name or name)
@property
def code_type(self):
"""Returns the string representing this datatype."""
return self.values.get('codeType') or self.className
@property
def safe_code_type(self):
"""Returns the safe code type representing this datatype."""
return self.safeClassName or self.code_type
@property
def class_name(self):
return self.values.get('className')
@property
def className(self): # pylint: disable=g-bad-name
return self.class_name or self.safeClassName
class ContainerDataType(ComplexDataType):
"""Superclass for all DataTypes which represent containers."""
def __init__(self, name, base_type, parent=None, wire_name=None):
"""Construct an ArrayDataType.
Args:
name: (str) The name to give this type if there is no 'id' in
the default dict.
base_type: (DataType) The DataType to represent an array of.
parent: (TemplateObject) The parent of this object.
wire_name: (str) The identifier used in the wire protocol for this object.
"""
# Access to protected _language_model OK here. pylint: disable=protected-access
super(ContainerDataType, self).__init__(
name, {}, base_type.api, parent=parent,
language_model=base_type._language_model,
wire_name=wire_name)
self._base_type = base_type
self.SetTemplateValue('isContainer', True)
self.SetTemplateValue('baseType', base_type)
self.SetTemplateValue('builtIn', True)
# TODO(user): This gets parenting right so language models propagate down.
# We should invert the computation of code_type so we ask the language
# model for code type of a primitive.
if isinstance(base_type, PrimitiveDataType):
self._base_type.SetParent(self)
class ArrayDataType(ContainerDataType):
"""DataType which represents a array of another DataType."""
def __init__(self, name, base_type, parent=None, wire_name=None):
"""Construct an ArrayDataType.
Args:
name: (str) The name to give this type.
base_type: (DataType) The DataType to represent an array of.
parent: (TemplateObject) The parent of this object.
wire_name: (str) The identifier used in the wire protocol for this object.
"""
super(ArrayDataType, self).__init__(name, base_type, parent=parent,
wire_name=wire_name)
self._json_type = 'array'
self.SetTemplateValue('arrayOf', base_type)
@property
def code_type(self):
"""Returns the string representing the datatype of this variable.
Note: This may should only be called after the language model is set.
Returns:
(str) A printable representation of this data type.
"""
return self.language_model.ArrayOf(self._base_type,
self._base_type.code_type)
@property
def safe_code_type(self):
return self.language_model.ArrayOf(self._base_type,
self._base_type.safe_code_type)
class MapDataType(ContainerDataType):
"""DataType which represents a map of string to another DataType.
This is the base class for things which might be data type definitions, such
as Schema objects derived from JSONSchema blocks or primitive types.
"""
def __init__(self, name, base_type, parent=None, wire_name=None):
"""Construct a MapDataType.
Args:
name: (str) The name to give this type.
base_type: (DataType) The DataType to represent an map of string to.
parent: (TemplateObject) The parent of this object.
wire_name: (str) The identifier used in the wire protocol for this object.
"""
super(MapDataType, self).__init__(name, base_type, parent=parent,
wire_name=wire_name)
self._json_type = 'map'
self.SetTemplateValue('mapOf', base_type)
@property
def code_type(self):
"""Returns the string representing the datatype of this variable.
Note: This may should only be called after the language model is set.
Returns:
(str) A printable representation of this data type.
"""
return self.language_model.MapOf(self._base_type, self._base_type.code_type)
@property
def safe_code_type(self):
"""Returns the string representing the safe datatype of this variable.
Note: This may should only be called after the language model is set.
Returns:
(str) A printable representation of this data type.
"""
return self.language_model.MapOf(self._base_type,
self._base_type.safe_code_type)
class SchemaReference(DataType):
"""DataType which represents a type alias to named schema.
Provides a lazy reference to schema by name.
"""
def __init__(self, referenced_schema_name, api):
"""Construct a SchemaReference.
Args:
referenced_schema_name: (str) The name of the schema we are referencing.
api: (Api) The Api instance which owns this element.
Returns:
SchemaReference
"""
super(SchemaReference, self).__init__({}, api)
self._referenced_schema_name = referenced_schema_name
self.SetTemplateValue('className', referenced_schema_name)
self.SetTemplateValue('wireName', referenced_schema_name)
self.SetTemplateValue('reference', True)
# TODO(user): 20130227
# I thought there was another way to do this, but I don't remember
# right now. This feels like something we should do after parsing all
# the schemas, so that we can resolve in one pass and not worry about
# loading order.
@property
def referenced_schema(self):
"""Returns the concrete schema being referenced by this instance."""
data_type = self
while isinstance(data_type, SchemaReference):
# pylint: disable=protected-access
data_type = data_type.api.SchemaByName(data_type._referenced_schema_name)
return data_type
@property
def values(self):
"""Forwards the 'values' property of this object to the referenced object.
This enables GetTemplateValue called on a Ref to effectively return
the value for the truly desired schema.
This may be safely called at any time, but may not produce expected
results until after the entire API has been parsed. In practice, this
means that anything done during template expansion is fine.
Returns:
dict of values which can be used in template.
"""
s = self.referenced_schema
if s:
return s.values
return self._def_dict
@property
def code_type(self):
"""Returns the string representing the datatype of this variable."""
s = self.referenced_schema
if s:
return s.code_type
return self._def_dict.get('codeType') or self._def_dict.get('className')
@property
def safe_code_type(self): # pylint: disable=g-bad-name
if not self.referenced_schema:
return '<bad $ref>'
return self.referenced_schema.safe_code_type
@property
def parent(self):
"""Returns the parent of the schema I reference."""
return self.referenced_schema.parent
@property
def module(self):
"""Returns the module of the schema I reference."""
return self.referenced_schema.module
def __str__(self):
return '<SchemaReference to %s>' % self.code_type
class Void(PrimitiveDataType):
"""DataType which represents a 'void'.
Some API methods have no response. To provide some consistency in assigning
a responseType to these methods, we use the Void data type. When it is
referenced in a template, it forwards requests for it's code_type to a
langauge model specific emitter.
"""
def __init__(self, api):
"""Construct a Void.
Args:
api: (Api) The Api instance which owns this element. This is used for
a parent chain so that we can pick up the language model at template
generation time.
Returns:
Void
"""
super(Void, self).__init__({}, api, parent=api)
self.SetTemplateValue('isVoid', True)
@property
def code_type(self):
"""Returns the string representing the datatype of this variable."""
if self.language_model:
return self.language_model.CodeTypeForVoid()
return 'void'
class Enum(PrimitiveDataType):
"""The definition of an Enum.
Example enum in discovery.
"enum": [
"@comments",
"@consumption",
"@liked",
"@public",
"@self"
],
"enumDescriptions": [
"Limit to activities commented on by the user.",
"Limit to activities to be consumed by the user.",
"Limit to activities liked by the user.",
"Limit to public activities posted by the user.",
"Limit to activities posted by the user."
]
"""
def __init__(self, def_dict, api, wire_name, values, descriptions, parent):
"""Create an enum.
Args:
def_dict: (dict) The discovery dictionary for this element.
api: (Api) The Api which owns this Property
wire_name: (str) The identifier used in the wire protocol for this enum.
values: ([str]) List of possible values. If not provided, use the 'enum'
element from def_dict.
descriptions: ([str]) List of value descriptions. If not provided, use
the 'enumDescriptions' element from def_dict.
parent: (Method) The object owning this enum.
"""
super(Enum, self).__init__(def_dict, api, parent=parent)
name = def_dict.get('id') or wire_name
self.ValidateName(name)
self.SetTemplateValue('wireName', name)
self.SetTemplateValue('className',
api.ToClassName(name, self, element_type='enum'))
if values is None:
values = def_dict.get('enum')
if descriptions is None:
descriptions = def_dict.get('enumDescriptions') or []
self._elements = []
for i in range(len(values)):
v = values[i]
# Sometimes the description list is too short.
d = descriptions[i] if (i < len(descriptions)) else None
self._elements.append(
template_objects.Constant(v, description=d, parent=self))
self.SetTemplateValue(
'elements', self._elements,
meaning='The individual possible values of an Enum data type.')
# TODO(user): Migrate users away from the enum pairs to 'elements' and
# delete the rest of this method.
def FixName(name):
name = name[0].isdigit() and 'VALUE_' + name or name.lstrip('@')
return name.upper().replace('-', '_')
names = [FixName(s) for s in values]
def FixDescription(desc):
return self.ValidateAndSanitizeComment(self.StripHTML(desc))
pairs = zip(names, values, map(FixDescription, descriptions))
self.SetTemplateValue('pairs', pairs)
@property
def enum_name(self):
return self.language_model.ApplyPolicy('enum', self,
self.values['wireName'])
def CreatePrimitiveDataType(def_dict, api, wire_name, parent=None):
"""Creates a PrimitiveDataType from a JSON dictionary.
Creates a primitive built in type or an enum for a blob of json.
Args:
def_dict: (dict) The discovery dictionary for this element.
api: (Api) The Api instance which owns this element.
wire_name: (str) The identifier used in the wire protocol for this object.
parent: (TemplateObject) The parent of this object.
Returns:
(PrimitiveDataType) A data type.
"""
if def_dict.get('enum'):
return Enum(def_dict,
api,
wire_name,
None,
None,
parent)
return PrimitiveDataType(def_dict, api, parent)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
nodeenv
~~~~~~~
Node.js virtual environment
:copyright: (c) 2014 by Eugene Kalinin
:license: BSD, see LICENSE for more details.
"""
import contextlib
import io
import sys
import os
import re
import stat
import logging
import operator
import optparse
import subprocess
import tarfile
import pipes
try: # pragma: no cover (py2 only)
from ConfigParser import SafeConfigParser as ConfigParser
from HTMLParser import HTMLParser
from urllib2 import urlopen
iteritems = operator.methodcaller('iteritems')
except ImportError: # pragma: no cover (py3 only)
from configparser import ConfigParser
from html.parser import HTMLParser
from urllib.request import urlopen
iteritems = operator.methodcaller('items')
from pkg_resources import parse_version
nodeenv_version = '0.13.2'
join = os.path.join
abspath = os.path.abspath
src_domain = "nodejs.org"
is_PY3 = sys.version_info[0] == 3
if is_PY3:
from functools import cmp_to_key
# ---------------------------------------------------------
# Utils
class Config(object):
"""
Configuration namespace.
"""
# Defaults
node = 'latest'
npm = 'latest'
with_npm = False
jobs = '2'
without_ssl = False
debug = False
profile = False
make = 'make'
prebuilt = False
@classmethod
def _load(cls, configfiles, verbose=False):
"""
Load configuration from the given files in reverse order,
if they exist and have a [nodeenv] section.
"""
for configfile in reversed(configfiles):
configfile = os.path.expanduser(configfile)
if not os.path.exists(configfile):
continue
ini_file = ConfigParser()
ini_file.read(configfile)
section = "nodeenv"
if not ini_file.has_section(section):
continue
for attr, val in iteritems(vars(cls)):
if attr.startswith('_') or not \
ini_file.has_option(section, attr):
continue
if isinstance(val, bool):
val = ini_file.getboolean(section, attr)
else:
val = ini_file.get(section, attr)
if verbose:
print('CONFIG {0}: {1} = {2}'.format(
os.path.basename(configfile), attr, val))
setattr(cls, attr, val)
@classmethod
def _dump(cls):
"""
Print defaults for the README.
"""
print(" [nodeenv]")
print(" " + "\n ".join(
"%s = %s" % (k, v) for k, v in sorted(iteritems(vars(cls)))
if not k.startswith('_')))
Config._default = dict(
(attr, val) for attr, val in iteritems(vars(Config))
if not attr.startswith('_')
)
def clear_output(out):
"""
Remove new-lines and
"""
return out.decode('utf-8').replace('\n', '')
def remove_env_bin_from_path(env, env_bin_dir):
"""
Remove bin directory of the current environment from PATH
"""
return env.replace(env_bin_dir + ':', '')
def node_version_from_opt(opt):
"""
Parse the node version from the optparse options
"""
if opt.node == 'system':
out, err = subprocess.Popen(
["node", "--version"], stdout=subprocess.PIPE).communicate()
return parse_version(clear_output(out).replace('v', ''))
return parse_version(opt.node)
def create_logger():
"""
Create logger for diagnostic
"""
# create logger
logger = logging.getLogger("nodeenv")
logger.setLevel(logging.INFO)
# monkey patch
def emit(self, record):
msg = self.format(record)
fs = "%s" if getattr(record, "continued", False) else "%s\n"
self.stream.write(fs % msg)
self.flush()
logging.StreamHandler.emit = emit
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter(fmt="%(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
return logger
logger = create_logger()
def parse_args(check=True):
"""
Parses command line arguments.
Set `check` to False to skip validation checks.
"""
parser = optparse.OptionParser(
version=nodeenv_version,
usage="%prog [OPTIONS] ENV_DIR")
parser.add_option(
'-n', '--node', dest='node', metavar='NODE_VER', default=Config.node,
help='The node.js version to use, e.g., '
'--node=0.4.3 will use the node-v0.4.3 '
'to create the new environment. '
'The default is last stable version (`latest`). '
'Use `system` to use system-wide node.')
parser.add_option(
'-i', '--iojs',
action='store_true', dest='io', default=False,
help='Use iojs instead of nodejs.')
parser.add_option(
'-j', '--jobs', dest='jobs', default=Config.jobs,
help='Sets number of parallel commands at node.js compilation. '
'The default is 2 jobs.')
parser.add_option(
'--load-average', dest='load_average',
help='Sets maximum load average for executing parallel commands '
'at node.js compilation.')
parser.add_option(
'-v', '--verbose',
action='store_true', dest='verbose', default=False,
help="Verbose mode")
parser.add_option(
'-q', '--quiet',
action='store_true', dest='quiet', default=False,
help="Quiet mode")
parser.add_option(
'-C', '--config-file', dest='config_file', default=None,
help="Load a different file than '~/.nodeenvrc'. "
"Pass an empty string for no config (use built-in defaults).")
parser.add_option(
'-r', '--requirements',
dest='requirements', default='', metavar='FILENAME',
help='Install all the packages listed in the given requirements file.')
parser.add_option(
'--prompt', dest='prompt',
help='Provides an alternative prompt prefix for this environment')
parser.add_option(
'-l', '--list', dest='list',
action='store_true', default=False,
help='Lists available node.js versions')
parser.add_option(
'--update', dest='update',
action='store_true', default=False,
help='Install npm packages from file without node')
parser.add_option(
'--without-ssl', dest='without_ssl',
action='store_true', default=Config.without_ssl,
help='Build node.js without SSL support')
parser.add_option(
'--debug', dest='debug',
action='store_true', default=Config.debug,
help='Build debug variant of the node.js')
parser.add_option(
'--profile', dest='profile',
action='store_true', default=Config.profile,
help='Enable profiling for node.js')
parser.add_option(
'--with-npm', dest='with_npm',
action='store_true', default=Config.with_npm,
help='Build without installing npm into the new virtual environment. '
'Required for node.js < 0.6.3. By default, the npm included with '
'node.js is used.')
parser.add_option(
'--npm', dest='npm',
metavar='NPM_VER', default=Config.npm,
help='The npm version to use, e.g., '
'--npm=0.3.18 will use the npm-0.3.18.tgz '
'tarball to install. '
'The default is last available version (`latest`).')
parser.add_option(
'--no-npm-clean', dest='no_npm_clean',
action='store_true', default=False,
help='Skip the npm 0.x cleanup. Cleanup is enabled by default.')
parser.add_option(
'--python-virtualenv', '-p', dest='python_virtualenv',
action='store_true', default=False,
help='Use current python virtualenv')
parser.add_option(
'--clean-src', '-c', dest='clean_src',
action='store_true', default=False,
help='Remove "src" directory after installation')
parser.add_option(
'--force', dest='force',
action='store_true', default=False,
help='Force installation in a pre-existing directory')
parser.add_option(
'--make', '-m', dest='make_path',
metavar='MAKE_PATH',
help='Path to make command',
default=Config.make)
parser.add_option(
'--prebuilt', dest='prebuilt',
action='store_true', default=Config.prebuilt,
help='Install node.js from prebuilt package')
options, args = parser.parse_args()
if options.config_file is None:
options.config_file = ["./setup.cfg", "~/.nodeenvrc"]
elif not options.config_file:
options.config_file = []
else:
# Make sure that explicitly provided files exist
if not os.path.exists(options.config_file):
parser.error("Config file '{0}' doesn't exist!".format(
options.config_file))
options.config_file = [options.config_file]
if not check:
return options, args
if not options.list and not options.python_virtualenv:
if not args:
parser.error('You must provide a DEST_DIR or '
'use current python virtualenv')
if len(args) > 1:
parser.error('There must be only one argument: DEST_DIR '
'(you gave: {0})'.format(' '.join(args)))
return options, args
def mkdir(path):
"""
Create directory
"""
if not os.path.exists(path):
logger.debug(' * Creating: %s ... ', path, extra=dict(continued=True))
os.makedirs(path)
logger.debug('done.')
else:
logger.debug(' * Directory %s already exists', path)
def writefile(dest, content, overwrite=True, append=False):
"""
Create file and write content in it
"""
content = content.encode('utf-8')
if not os.path.exists(dest):
logger.debug(' * Writing %s ... ', dest, extra=dict(continued=True))
with open(dest, 'wb') as f:
f.write(content)
logger.debug('done.')
return
else:
with open(dest, 'rb') as f:
c = f.read()
if c == content:
logger.debug(' * Content %s already in place', dest)
return
if not overwrite:
logger.info(' * File %s exists with different content; '
' not overwriting', dest)
return
if append:
logger.info(' * Appending data to %s', dest)
with open(dest, 'ab') as f:
f.write(DISABLE_POMPT.encode('utf-8'))
f.write(content)
f.write(ENABLE_PROMPT.encode('utf-8'))
return
logger.info(' * Overwriting %s with new content', dest)
with open(dest, 'wb') as f:
f.write(content)
def callit(cmd, show_stdout=True, in_shell=False,
cwd=None, extra_env=None):
"""
Execute cmd line in sub-shell
"""
all_output = []
cmd_parts = []
for part in cmd:
if len(part) > 45:
part = part[:20] + "..." + part[-20:]
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
cmd_desc = ' '.join(cmd_parts)
logger.debug(" ** Running command %s" % cmd_desc)
if in_shell:
cmd = ' '.join(cmd)
# output
stdout = subprocess.PIPE
# env
if extra_env:
env = os.environ.copy()
if extra_env:
env.update(extra_env)
else:
env = None
# execute
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env, shell=in_shell)
except Exception:
e = sys.exc_info()[1]
logger.error("Error %s while executing command %s" % (e, cmd_desc))
raise
stdout = proc.stdout
while stdout:
line = stdout.readline()
if not line:
break
line = line.decode('utf-8').rstrip()
all_output.append(line)
if show_stdout:
logger.info(line)
proc.wait()
# error handler
if proc.returncode:
if show_stdout:
for s in all_output:
logger.critical(s)
raise OSError("Command %s failed with error code %s"
% (cmd_desc, proc.returncode))
return proc.returncode, all_output
def get_node_src_url(version, postfix=''):
node_name = '%s-v%s%s' % (get_binary_prefix(), version, postfix)
tar_name = '%s.tar.gz' % (node_name)
if parse_version(version) > parse_version("0.5.0"):
node_url = 'http://%s/dist/v%s/%s' % (src_domain, version, tar_name)
else:
node_url = 'http://%s/dist/%s' % (src_domain, tar_name)
return node_url
@contextlib.contextmanager
def tarfile_open(*args, **kwargs):
"""Compatibility layer because py26."""
tf = tarfile.open(*args, **kwargs)
try:
yield tf
finally:
tf.close()
def download_node(node_url, src_dir, env_dir, opt):
"""
Download source code
"""
tar_contents = io.BytesIO(urlopen(node_url).read())
with tarfile_open(fileobj=tar_contents) as tarfile_obj:
tarfile_obj.extractall(src_dir)
logger.info(')', extra=dict(continued=True))
def get_node_src_url_postfix(opt):
if not opt.prebuilt:
return ''
import platform
postfix_system = platform.system().lower()
arches = {'x86_64': 'x64', 'i686': 'x86'}
postfix_arch = arches[platform.machine()]
return '-{0}-{1}'.format(postfix_system, postfix_arch)
# ---------------------------------------------------------
# Virtual environment functions
def copy_node_from_prebuilt(env_dir, src_dir):
"""
Copy prebuilt binaries into environment
"""
logger.info('.', extra=dict(continued=True))
prefix = get_binary_prefix()
callit(['cp', '-a', src_dir + '/%s-v*/*' % prefix, env_dir], True, env_dir)
logger.info('.', extra=dict(continued=True))
def build_node_from_src(env_dir, src_dir, node_src_dir, opt):
env = {}
make_param_names = ['load-average', 'jobs']
make_param_values = map(
lambda x: getattr(opt, x.replace('-', '_')),
make_param_names)
make_opts = [
'--{0}={1}'.format(name, value)
if len(value) > 0 else '--{0}'.format(name)
for name, value in zip(make_param_names, make_param_values)
if value is not None
]
if getattr(sys.version_info, 'major', sys.version_info[0]) > 2:
# Currently, the node.js build scripts are using python2.*,
# therefore we need to temporarily point python exec to the
# python 2.* version in this case.
try:
_, which_python2_output = callit(
['which', 'python2'], opt.verbose, True, node_src_dir, env
)
python2_path = which_python2_output[0]
except (OSError, IndexError):
raise OSError(
'Python >=3.0 virtualenv detected, but no python2 '
'command (required for building node.js) was found'
)
logger.debug(' * Temporarily pointing python to %s', python2_path)
node_tmpbin_dir = join(src_dir, 'tmpbin')
node_tmpbin_link = join(node_tmpbin_dir, 'python')
mkdir(node_tmpbin_dir)
if not os.path.exists(node_tmpbin_link):
callit(['ln', '-s', python2_path, node_tmpbin_link])
env['PATH'] = '{}:{}'.format(node_tmpbin_dir,
os.environ.get('PATH', ''))
conf_cmd = []
conf_cmd.append('./configure')
conf_cmd.append('--prefix=%s' % pipes.quote(env_dir))
if opt.without_ssl:
conf_cmd.append('--without-ssl')
if opt.debug:
conf_cmd.append('--debug')
if opt.profile:
conf_cmd.append('--profile')
make_cmd = opt.make_path
callit(conf_cmd, opt.verbose, True, node_src_dir, env)
logger.info('.', extra=dict(continued=True))
callit([make_cmd] + make_opts, opt.verbose, True, node_src_dir, env)
logger.info('.', extra=dict(continued=True))
callit([make_cmd + ' install'], opt.verbose, True, node_src_dir, env)
def get_binary_prefix():
return 'node' if src_domain == 'nodejs.org' else 'iojs'
def install_node(env_dir, src_dir, opt):
"""
Download source code for node.js, unpack it
and install it in virtual environment.
"""
prefix = get_binary_prefix()
logger.info(' * Install %s (%s' % (prefix, opt.node),
extra=dict(continued=True))
node_url = get_node_src_url(opt.node, get_node_src_url_postfix(opt))
node_src_dir = join(src_dir, '%s-v%s' % (prefix, opt.node))
env_dir = abspath(env_dir)
# get src if not downloaded yet
if not os.path.exists(node_src_dir):
download_node(node_url, src_dir, env_dir, opt)
logger.info('.', extra=dict(continued=True))
if opt.prebuilt:
copy_node_from_prebuilt(env_dir, src_dir)
else:
build_node_from_src(env_dir, src_dir, node_src_dir, opt)
logger.info(' done.')
def install_npm(env_dir, src_dir, opt):
"""
Download source code for npm, unpack it
and install it in virtual environment.
"""
logger.info(' * Install npm.js (%s) ... ' % opt.npm,
extra=dict(continued=True))
npm_contents = urlopen('https://www.npmjs.org/install.sh').read()
env = dict(
os.environ,
clean='no' if opt.no_npm_clean else 'yes',
npm_install=opt.npm,
)
proc = subprocess.Popen(
(
'bash', '-c',
'. {0} && exec bash'.format(
pipes.quote(join(env_dir, 'bin', 'activate')),
)
),
env=env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
out, _ = proc.communicate(npm_contents)
if opt.verbose:
logger.info(out)
logger.info('done.')
def install_packages(env_dir, opt):
"""
Install node.js packages via npm
"""
logger.info(' * Install node.js packages ... ',
extra=dict(continued=True))
packages = [package.strip() for package in
open(opt.requirements).readlines()]
activate_path = join(env_dir, 'bin', 'activate')
real_npm_ver = opt.npm if opt.npm.count(".") == 2 else opt.npm + ".0"
if opt.npm == "latest" or real_npm_ver >= "1.0.0":
cmd = '. ' + pipes.quote(activate_path) + \
' && npm install -g %(pack)s'
else:
cmd = '. ' + pipes.quote(activate_path) + \
' && npm install %(pack)s' + \
' && npm activate %(pack)s'
for package in packages:
if not package:
continue
callit(cmd=[
cmd % {"pack": package}], show_stdout=opt.verbose, in_shell=True)
logger.info('done.')
def install_activate(env_dir, opt):
"""
Install virtual environment activation script
"""
files = {'activate': ACTIVATE_SH, 'shim': SHIM}
if opt.node == "system":
files["node"] = SHIM
bin_dir = join(env_dir, 'bin')
mod_dir = join('lib', 'node_modules')
prompt = opt.prompt or '(%s)' % os.path.basename(os.path.abspath(env_dir))
mode_0755 = (stat.S_IRWXU | stat.S_IXGRP |
stat.S_IRGRP | stat.S_IROTH | stat.S_IXOTH)
shim_node = join(bin_dir, "node")
shim_nodejs = join(bin_dir, "nodejs")
if opt.node == "system":
env = os.environ.copy()
env.update({'PATH': remove_env_bin_from_path(env['PATH'], bin_dir)})
for candidate in ("nodejs", "node"):
which_node_output, _ = subprocess.Popen(
["which", candidate],
stdout=subprocess.PIPE, env=env).communicate()
shim_node = clear_output(which_node_output)
if shim_node:
break
assert shim_node, "Did not find nodejs or node system executable"
for name, content in files.items():
file_path = join(bin_dir, name)
content = content.replace('__NODE_VIRTUAL_PROMPT__', prompt)
content = content.replace('__NODE_VIRTUAL_ENV__',
os.path.abspath(env_dir))
content = content.replace('__SHIM_NODE__', shim_node)
content = content.replace('__BIN_NAME__', os.path.basename(bin_dir))
content = content.replace('__MOD_NAME__', mod_dir)
# if we call in the same environment:
# $ nodeenv -p --prebuilt
# $ nodeenv -p --node=system
# we should get `bin/node` not as binary+string.
# `bin/activate` should be appended if we inside
# existing python's virtual environment
need_append = 0 if name in ('node', 'shim') else opt.python_virtualenv
writefile(file_path, content, append=need_append)
os.chmod(file_path, mode_0755)
if not os.path.exists(shim_nodejs):
os.symlink("node", shim_nodejs)
def create_environment(env_dir, opt):
"""
Creates a new environment in ``env_dir``.
"""
if os.path.exists(env_dir) and not opt.python_virtualenv:
logger.info(' * Environment already exists: %s', env_dir)
if not opt.force:
sys.exit(2)
src_dir = abspath(join(env_dir, 'src'))
mkdir(src_dir)
if opt.node != "system":
install_node(env_dir, src_dir, opt)
else:
mkdir(join(env_dir, 'bin'))
mkdir(join(env_dir, 'lib'))
mkdir(join(env_dir, 'lib', 'node_modules'))
# activate script install must be
# before npm install, npm use activate
# for install
install_activate(env_dir, opt)
if node_version_from_opt(opt) < parse_version("0.6.3") or opt.with_npm:
install_npm(env_dir, src_dir, opt)
if opt.requirements:
install_packages(env_dir, opt)
# Cleanup
if opt.clean_src:
callit(['rm -rf', pipes.quote(src_dir)], opt.verbose, True, env_dir)
class GetsAHrefs(HTMLParser):
def __init__(self):
# Old style class in py2 :(
HTMLParser.__init__(self)
self.hrefs = []
def handle_starttag(self, tag, attrs):
if tag == 'a':
self.hrefs.append(dict(attrs).get('href', ''))
VERSION_RE = re.compile('\d+\.\d+\.\d+')
def _py2_cmp(a, b):
# -1 = a < b, 0 = eq, 1 = a > b
return (a > b) - (a < b)
def compare_versions(version, other_version):
version_tuple = version.split('.')
other_tuple = other_version.split('.')
version_length = len(version_tuple)
other_length = len(other_tuple)
version_dots = min(version_length, other_length)
for i in range(version_dots):
a = int(version_tuple[i])
b = int(other_tuple[i])
cmp_value = _py2_cmp(a, b)
if cmp_value != 0:
return cmp_value
return _py2_cmp(version_length, other_length)
def get_node_versions():
response = urlopen('https://{0}/dist'.format(src_domain))
href_parser = GetsAHrefs()
href_parser.feed(response.read().decode('UTF-8'))
versions = set(
VERSION_RE.search(href).group()
for href in href_parser.hrefs
if VERSION_RE.search(href)
)
if is_PY3:
key_compare = cmp_to_key(compare_versions)
versions = sorted(versions, key=key_compare)
else:
versions = sorted(versions, cmp=compare_versions)
return versions
def print_node_versions():
"""
Prints into stdout all available node.js versions
"""
versions = get_node_versions()
chunks_of_8 = [
versions[pos:pos + 8] for pos in range(0, len(versions), 8)
]
for chunk in chunks_of_8:
logger.info('\t'.join(chunk))
def get_last_stable_node_version():
"""
Return last stable node.js version
"""
response = urlopen('https://%s/dist/latest/' % (src_domain))
href_parser = GetsAHrefs()
href_parser.feed(response.read().decode('UTF-8'))
links = []
pattern = re.compile(r'''%s-v([0-9]+)\.([0-9]+)\.([0-9]+)\.tar\.gz''' % (
get_binary_prefix()))
for href in href_parser.hrefs:
match = pattern.match(href)
if match:
version = u'.'.join(match.groups())
major, minor, revision = map(int, match.groups())
links.append((version, major, minor, revision))
break
return links[-1][0]
def get_env_dir(opt, args):
if opt.python_virtualenv:
if hasattr(sys, 'real_prefix'):
return sys.prefix
elif hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix:
return sys.prefix
logger.error('No python virtualenv is available')
sys.exit(2)
else:
return args[0]
def is_installed(name):
try:
devnull = open(os.devnull)
subprocess.Popen([name], stdout=devnull, stderr=devnull)
except OSError as e:
if e.errno == os.errno.ENOENT:
return False
return True
def main():
"""
Entry point
"""
# quick&dirty way to help update the README
if "--dump-config-defaults" in sys.argv:
Config._dump()
return
opt, args = parse_args(check=False)
Config._load(opt.config_file, opt.verbose)
opt, args = parse_args()
if opt.io:
global src_domain
src_domain = "iojs.org"
if not opt.node or opt.node.lower() == "latest":
opt.node = get_last_stable_node_version()
if opt.list:
print_node_versions()
elif opt.update:
env_dir = get_env_dir(opt, args)
install_packages(env_dir, opt)
else:
env_dir = get_env_dir(opt, args)
create_environment(env_dir, opt)
# ---------------------------------------------------------
# Shell scripts content
DISABLE_POMPT = """
# disable nodeenv's prompt
# (prompt already changed by original virtualenv's script)
# https://github.com/ekalinin/nodeenv/issues/26
NODE_VIRTUAL_ENV_DISABLE_PROMPT=1
"""
ENABLE_PROMPT = """
unset NODE_VIRTUAL_ENV_DISABLE_PROMPT
"""
SHIM = """#!/usr/bin/env bash
export NODE_PATH=__NODE_VIRTUAL_ENV__/lib/node_modules
export NPM_CONFIG_PREFIX=__NODE_VIRTUAL_ENV__
export npm_config_prefix=__NODE_VIRTUAL_ENV__
exec __SHIM_NODE__ "$@"
"""
ACTIVATE_SH = """
# This file must be used with "source bin/activate" *from bash*
# you cannot run it directly
deactivate_node () {
# reset old environment variables
if [ -n "$_OLD_NODE_VIRTUAL_PATH" ] ; then
PATH="$_OLD_NODE_VIRTUAL_PATH"
export PATH
unset _OLD_NODE_VIRTUAL_PATH
NODE_PATH="$_OLD_NODE_PATH"
export NODE_PATH
unset _OLD_NODE_PATH
NPM_CONFIG_PREFIX="$_OLD_NPM_CONFIG_PREFIX"
npm_config_prefix="$_OLD_npm_config_prefix"
export NPM_CONFIG_PREFIX
export npm_config_prefix
unset _OLD_NPM_CONFIG_PREFIX
unset _OLD_npm_config_prefix
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "$BASH" -o -n "$ZSH_VERSION" ] ; then
hash -r
fi
if [ -n "$_OLD_NODE_VIRTUAL_PS1" ] ; then
PS1="$_OLD_NODE_VIRTUAL_PS1"
export PS1
unset _OLD_NODE_VIRTUAL_PS1
fi
unset NODE_VIRTUAL_ENV
if [ ! "$1" = "nondestructive" ] ; then
# Self destruct!
unset -f deactivate_node
fi
}
freeze () {
local NPM_VER=`npm -v | cut -d '.' -f 1`
local re="[a-zA-Z0-9\.\-]+@[0-9]+\.[0-9]+\.[0-9]+([\+\-][a-zA-Z0-9\.\-]+)*"
if [ "$NPM_VER" = '0' ]; then
NPM_LIST=`npm list installed active 2>/dev/null | \
cut -d ' ' -f 1 | grep -v npm`
else
local npmls="npm ls -g"
if [ "$1" = "-l" ]; then
npmls="npm ls"
shift
fi
NPM_LIST=$(eval ${npmls} | grep -E '^.{4}\w{1}'| \
grep -o -E "$re"| grep -v npm)
fi
if [ -z "$@" ]; then
echo "$NPM_LIST"
else
echo "$NPM_LIST" > $@
fi
}
# unset irrelavent variables
deactivate_node nondestructive
# find the directory of this script
# http://stackoverflow.com/a/246128
if [ "${BASH_SOURCE}" ] ; then
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
NODE_VIRTUAL_ENV="$(dirname "$DIR")"
else
# dash not movable. fix use case:
# dash -c " . node-env/bin/activate && node -v"
NODE_VIRTUAL_ENV="__NODE_VIRTUAL_ENV__"
fi
# NODE_VIRTUAL_ENV is the parent of the directory where this script is
export NODE_VIRTUAL_ENV
_OLD_NODE_VIRTUAL_PATH="$PATH"
PATH="$NODE_VIRTUAL_ENV/__BIN_NAME__:$PATH"
export PATH
_OLD_NODE_PATH="$NODE_PATH"
NODE_PATH="$NODE_VIRTUAL_ENV/__MOD_NAME__"
export NODE_PATH
_OLD_NPM_CONFIG_PREFIX="$NPM_CONFIG_PREFIX"
_OLD_npm_config_prefix="$npm_config_prefix"
NPM_CONFIG_PREFIX="$NODE_VIRTUAL_ENV"
npm_config_prefix="$NODE_VIRTUAL_ENV"
export NPM_CONFIG_PREFIX
export npm_config_prefix
if [ -z "$NODE_VIRTUAL_ENV_DISABLE_PROMPT" ] ; then
_OLD_NODE_VIRTUAL_PS1="$PS1"
if [ "x__NODE_VIRTUAL_PROMPT__" != x ] ; then
PS1="__NODE_VIRTUAL_PROMPT__$PS1"
else
if [ "`basename \"$NODE_VIRTUAL_ENV\"`" = "__" ] ; then
# special case for Aspen magic directories
# see http://www.zetadev.com/software/aspen/
PS1="[`basename \`dirname \"$NODE_VIRTUAL_ENV\"\``] $PS1"
else
PS1="(`basename \"$NODE_VIRTUAL_ENV\"`)$PS1"
fi
fi
export PS1
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "$BASH" -o -n "$ZSH_VERSION" ] ; then
hash -r
fi
"""
if __name__ == '__main__':
main()
| |
#!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import argparse
import sys
import json
import os.path
import platform
import tarfile
from distutils.util import strtobool
from ftplib import FTP
if sys.version_info[0] < 3:
from urllib2 import urlopen
input = raw_input
else:
from urllib.request import urlopen
MINIFI_SUBFOLDER = '/nifi/nifi-minifi-cpp/'
APACHE_CLOSER_REPO_JSON_URL = 'https://www.apache.org/dyn/closer.cgi?as_json=1&path=/nifi/nifi-minifi-cpp'
APACHE_MIRROR_LIST = "http://www.apache.org/mirrors/"
def install_package(package_name):
try:
import pip
if hasattr(pip, 'main'):
pipcode = pip.main(['install', package_name])
else:
pipcode = pip._internal.main(['install', package_name])
return pipcode == 0
except ImportError:
return False
distro_available = False
try:
import distro
distro_available = True
except ImportError:
distro_available = install_package("distro")
def get_distro():
if is_mac():
return ["osx", "", "darwin"]
try:
if distro_available:
return distro.linux_distribution(full_distribution_name=False)
else:
return platform.linux_distribution()
except Exception:
return ["N/A", "N/A", "N/A"]
def is_mac():
return platform.system() == "Darwin"
def mapped_distro():
distro_info = get_distro()
distro = distro_info[0].lower()
release = distro_info[2].lower()
if any(d in distro for d in ["rhel", "red hat", "centos"]):
return "rhel", release
else:
return distro, release
def find_closest_mirror():
try:
url = urlopen(APACHE_CLOSER_REPO_JSON_URL)
data = json.loads(url.read().decode())
return data['ftp'][0]
except Exception:
print("Failed to find closest mirror, please specify one!")
return ""
def get_release_and_binaries_from_ftp(host, apache_dir, version=None):
ftp = FTP(host)
ftp.login()
ftp.cwd(apache_dir + MINIFI_SUBFOLDER)
# list files with ftplib
file_list = list(filter(lambda x: any(char.isdigit() for char in x),
ftp.nlst(""))) # to filter "." and ".." - relese names contain number
file_list.sort(reverse=True)
if not version:
latest_release = file_list[0]
else:
if version not in file_list:
print("The specified version (" + version + ") doesn't exist. Please use one of the following: " + ", ".join(file_list))
exit(-1)
latest_release = version
ftp.cwd("./" + latest_release)
binaries = list(filter(lambda x: any(char.isdigit() for char in x), ftp.nlst("")))
ftp.quit()
return latest_release, binaries
def download_binary_from_ftp(host, apache_dir, release, binary):
successful_download = False
try:
ftp = FTP(host)
ftp.login()
ftp.cwd(apache_dir + MINIFI_SUBFOLDER + release)
print("Downloading: ftp://" + host + "/" + MINIFI_SUBFOLDER + release + "/" + binary)
with open(os.path.join(os.getcwd(), binary), "wb") as targetfile:
ftp.retrbinary("RETR " + binary, targetfile.write)
successful_download = True
except Exception:
print("Failed to download binary")
finally:
ftp.quit()
return successful_download
def main(args):
print(get_distro())
binaries = []
try:
local_repo = args.mirror if args.mirror else find_closest_mirror()
print(local_repo)
host, dir = local_repo.replace('ftp://', '').split('/', 1)
latest_release, binaries = get_release_and_binaries_from_ftp(host, dir, args.version if args.version else None)
except Exception:
print("Failed to get binaries from Apache mirror")
return -1
matching_binaries = []
for binary in binaries:
distro, release = mapped_distro()
if release and release in binary:
matching_binaries.append(binary)
elif distro and distro in binary:
matching_binaries.append(binary)
if not matching_binaries:
print("No compatible binary found, MiNiFi needs to be compiled locally")
return 1
invalid_input = True
download = None
selected_binary = None
if len(matching_binaries) == 1:
print("A binary in Apache repo seems to match your system: " + matching_binaries[0])
while invalid_input:
try:
download = strtobool(input("Would you like to download? [y/n]"))
invalid_input = False
if download:
selected_binary = matching_binaries[0]
except Exception:
pass
else:
print("The following binaries in Apache repo seem to match your system: ")
for i, item in enumerate(matching_binaries):
print(str(i + 1) + " - " + item)
print()
while invalid_input:
try:
user_input = input("Please select one to download (1 to " + str(len(matching_binaries)) + ") or \"s\" to skip and compile locally\n")
user_input.lower()
if user_input == "s":
invalid_input = False
download = False
break
idx = int(user_input) - 1
if (idx < 0):
continue
selected_binary = matching_binaries[idx]
download = True
invalid_input = False
except Exception:
pass
if not download:
return 1
if not download_binary_from_ftp(host, dir, latest_release, selected_binary):
return -1
try:
with tarfile.open(os.path.join(os.getcwd(), selected_binary), "r:gz") as tar:
tar.extractall()
except Exception:
print("Failed to extract tar file")
return -1
print("Successfully downloaded and extracted MiNiFi")
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Download latest MiNiFi release")
parser.add_argument("-m", "--mirror", dest="mirror", help="user-specified apache mirror")
parser.add_argument("-v", "--version", dest="version", help="user-specified version to be downloaded")
args = parser.parse_args()
main(args)
| |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
- LICENCE
The MIT License (MIT)
Copyright (c) 2016 Eleftherios Anagnostopoulos for Ericsson AB (EU FP7 CityPulse Project)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
- DESCRIPTION OF DOCUMENTS
-- MongoDB Database Documents:
address_document: {
'_id', 'name', 'node_id', 'point': {'longitude', 'latitude'}
}
bus_line_document: {
'_id', 'bus_line_id', 'bus_stops': [{'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}}]
}
bus_stop_document: {
'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}
}
bus_stop_waypoints_document: {
'_id', 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[edge_object_id]]
}
bus_vehicle_document: {
'_id', 'bus_vehicle_id', 'maximum_capacity',
'routes': [{'starting_datetime', 'ending_datetime', 'timetable_id'}]
}
detailed_bus_stop_waypoints_document: {
'_id', 'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[edge_document]]
}
edge_document: {
'_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'max_speed', 'road_type', 'way_id', 'traffic_density'
}
node_document: {
'_id', 'osm_id', 'tags', 'point': {'longitude', 'latitude'}
}
point_document: {
'_id', 'osm_id', 'point': {'longitude', 'latitude'}
}
timetable_document: {
'_id', 'timetable_id', 'bus_line_id', 'bus_vehicle_id',
'timetable_entries': [{
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'departure_datetime', 'arrival_datetime', 'number_of_onboarding_passengers',
'number_of_deboarding_passengers', 'number_of_current_passengers',
'route': {
'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges',
'distances_from_starting_node', 'times_from_starting_node',
'distances_from_previous_node', 'times_from_previous_node'
}
}],
'travel_requests': [{
'_id', 'client_id', 'bus_line_id',
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'departure_datetime', 'arrival_datetime',
'starting_timetable_entry_index', 'ending_timetable_entry_index'
}]
}
traffic_event_document: {
'_id', 'event_id', 'event_type', 'event_level', 'point': {'longitude', 'latitude'}, 'datetime'
}
travel_request_document: {
'_id', 'client_id', 'bus_line_id',
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'departure_datetime', 'arrival_datetime',
'starting_timetable_entry_index', 'ending_timetable_entry_index'
}
way_document: {
'_id', 'osm_id', 'tags', 'references'
}
-- Route Generator Responses:
get_route_between_two_bus_stops: {
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'route': {
'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges',
'distances_from_starting_node', 'times_from_starting_node',
'distances_from_previous_node', 'times_from_previous_node'
}
}
get_route_between_multiple_bus_stops: [{
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'route': {
'total_distance', 'total_time', 'node_osm_ids', 'points', 'edges',
'distances_from_starting_node', 'times_from_starting_node',
'distances_from_previous_node', 'times_from_previous_node'
}
}]
get_waypoints_between_two_bus_stops: {
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[{
'_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'max_speed', 'road_type', 'way_id', 'traffic_density'
}]]
}
get_waypoints_between_multiple_bus_stops: [{
'starting_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'ending_bus_stop': {'_id', 'osm_id', 'name', 'point': {'longitude', 'latitude'}},
'waypoints': [[{
'_id', 'starting_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'ending_node': {'osm_id', 'point': {'longitude', 'latitude'}},
'max_speed', 'road_type', 'way_id', 'traffic_density'
}]]
}]
"""
import time
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from src.common.logger import log
from src.look_ahead.bus_vehicle_handler import BusVehicleHandler
__author__ = 'Eleftherios Anagnostopoulos'
__email__ = 'eanagnostopoulos@hotmail.com'
__credits__ = [
'Azadeh Bararsani (Senior Researcher at Ericsson AB) - email: azadeh.bararsani@ericsson.com'
'Aneta Vulgarakis Feljan (Senior Researcher at Ericsson AB) - email: aneta.vulgarakis@ericsson.com'
]
class BusVehicleHandlerTester(object):
def __init__(self):
self.module_name = 'bus_vehicle_handler_tester'
self.log_type = 'INFO'
self.log_message = 'initialize_bus_vehicle_handler: starting'
log(module_name=self.module_name, log_type=self.log_type, log_message=self.log_message)
self.start_time = time.time()
self.bus_vehicle_handler = BusVehicleHandler()
self.elapsed_time = time.time() - self.start_time
self.log_message = 'initialize_bus_vehicle_handler: finished - elapsed_time = ' \
+ str(self.elapsed_time) + ' sec'
log(module_name=self.module_name, log_type=self.log_type, log_message=self.log_message)
def test_clear_bus_vehicle_documents_collection(self):
"""
Delete all the documents of the BusVehicleDocuments collection.
:return: number_of_deleted_documents: int
"""
self.log_message = 'test_clear_bus_vehicle_documents_collection: starting'
log(module_name=self.module_name, log_type=self.log_type, log_message=self.log_message)
self.start_time = time.time()
number_of_deleted_documents = self.bus_vehicle_handler.clear_bus_vehicle_documents_collection()
self.elapsed_time = time.time() - self.start_time
self.log_message = 'test_clear_bus_vehicle_documents_collection: finished - elapsed_time = ' \
+ str(self.elapsed_time) + ' sec'
log(module_name=self.module_name, log_type=self.log_type, log_message=self.log_message)
return number_of_deleted_documents
def test_delete_bus_vehicle_document(self, object_id=None, bus_vehicle_id=None):
"""
Delete a bus_vehicle_document.
:param object_id: ObjectId
:param bus_vehicle_id: int
:return: True if the document was successfully deleted, otherwise False.
"""
self.log_message = 'test_delete_bus_vehicle_document: starting'
log(module_name=self.module_name, log_type=self.log_type, log_message=self.log_message)
self.start_time = time.time()
deleted = self.bus_vehicle_handler.delete_bus_vehicle_document(
object_id=object_id,
bus_vehicle_id=bus_vehicle_id
)
self.elapsed_time = time.time() - self.start_time
self.log_message = 'test_delete_bus_vehicle_document: finished - elapsed_time = ' \
+ str(self.elapsed_time) + ' sec'
log(module_name=self.module_name, log_type=self.log_type, log_message=self.log_message)
return deleted
def test_delete_bus_vehicle_documents(self, object_ids=None, bus_vehicle_ids=None):
"""
Delete multiple bus_vehicle_documents.
:param object_ids: [ObjectId]
:param bus_vehicle_ids: [int]
:return: number_of_deleted_documents: int
"""
self.log_message = 'test_delete_bus_vehicle_documents: starting'
log(module_name=self.module_name, log_type=self.log_type, log_message=self.log_message)
self.start_time = time.time()
number_of_deleted_documents = self.bus_vehicle_handler.delete_bus_vehicle_documents(
object_ids=object_ids,
bus_vehicle_ids=bus_vehicle_ids
)
self.elapsed_time = time.time() - self.start_time
self.log_message = 'test_delete_bus_vehicle_documents: finished - elapsed_time = ' \
+ str(self.elapsed_time) + ' sec'
log(module_name=self.module_name, log_type=self.log_type, log_message=self.log_message)
return number_of_deleted_documents
def test_generate_bus_vehicle_document(self, maximum_capacity):
"""
Generate a new bus_vehicle_document.
:param maximum_capacity: int
:return: new_object_id: ObjectId
"""
self.log_message = 'test_generate_bus_vehicle_document: starting'
log(module_name=self.module_name, log_type=self.log_type, log_message=self.log_message)
self.start_time = time.time()
new_object_id = self.bus_vehicle_handler.generate_bus_vehicle_document(
maximum_capacity=maximum_capacity
)
self.elapsed_time = time.time() - self.start_time
self.log_message = 'test_generate_bus_vehicle_document: finished - elapsed_time = ' \
+ str(self.elapsed_time) + ' sec'
log(module_name=self.module_name, log_type=self.log_type, log_message=self.log_message)
return new_object_id
def test_generate_bus_vehicle_documents(self, maximum_capacity, number_of_bus_vehicle_documents):
"""
Generate multiple bus_vehicle_documents.
:param maximum_capacity: int
:param number_of_bus_vehicle_documents: int
:return: new_object_ids: [ObjectIds]
"""
self.log_message = 'test_generate_bus_vehicle_documents: starting'
log(module_name=self.module_name, log_type=self.log_type, log_message=self.log_message)
self.start_time = time.time()
new_object_ids = self.bus_vehicle_handler.generate_bus_vehicle_documents(
maximum_capacity=maximum_capacity,
number_of_bus_vehicle_documents=number_of_bus_vehicle_documents
)
self.elapsed_time = time.time() - self.start_time
self.log_message = 'test_generate_bus_vehicle_documents: finished - elapsed_time = ' \
+ str(self.elapsed_time) + ' sec'
log(module_name=self.module_name, log_type=self.log_type, log_message=self.log_message)
return new_object_ids
def test_insert_bus_vehicle_document(self, bus_vehicle_document=None, bus_vehicle_id=None,
maximum_capacity=None, routes=None):
"""
Insert a new bus_vehicle_document or update, if it already exists in the database.
:param bus_vehicle_document
:param bus_vehicle_id: int
:param maximum_capacity: int
:param routes: [{'starting_datetime', 'ending_datetime', 'timetable_id'}]
:return: new_object_id: ObjectId
"""
self.log_message = 'test_insert_bus_vehicle_document: starting'
log(module_name=self.module_name, log_type=self.log_type, log_message=self.log_message)
self.start_time = time.time()
new_object_id = self.bus_vehicle_handler.insert_bus_vehicle_document(
bus_vehicle_document=bus_vehicle_document,
bus_vehicle_id=bus_vehicle_id,
maximum_capacity=maximum_capacity,
routes=routes
)
self.elapsed_time = time.time() - self.start_time
self.log_message = 'test_insert_bus_vehicle_document: finished - elapsed_time = ' \
+ str(self.elapsed_time) + ' sec'
log(module_name=self.module_name, log_type=self.log_type, log_message=self.log_message)
return new_object_id
def test_insert_bus_vehicle_documents(self, bus_vehicle_documents, insert_many=False):
"""
Insert multiple bus_vehicle_documents or update existing ones.
:param bus_vehicle_documents:
:param insert_many: bool
:return: new_object_ids: [ObjectId]
"""
self.log_message = 'test_insert_bus_vehicle_documents: starting'
log(module_name=self.module_name, log_type=self.log_type, log_message=self.log_message)
self.start_time = time.time()
new_object_ids = self.bus_vehicle_handler.insert_bus_vehicle_documents(
bus_vehicle_documents=bus_vehicle_documents,
insert_many=insert_many
)
self.elapsed_time = time.time() - self.start_time
self.log_message = 'test_insert_bus_vehicle_documents: finished - elapsed_time = ' \
+ str(self.elapsed_time) + ' sec'
log(module_name=self.module_name, log_type=self.log_type, log_message=self.log_message)
return new_object_ids
def test_print_bus_vehicle_document(self, object_id=None, bus_vehicle_id=None):
"""
Print a bus_vehicle_document.
:param object_id: ObjectId
:param bus_vehicle_id: int
:return: None
"""
self.log_message = 'test_print_bus_vehicle_document: starting'
log(module_name=self.module_name, log_type=self.log_type, log_message=self.log_message)
self.start_time = time.time()
self.bus_vehicle_handler.print_bus_vehicle_document(
object_id=object_id,
bus_vehicle_id=bus_vehicle_id
)
self.elapsed_time = time.time() - self.start_time
self.log_message = 'test_print_bus_vehicle_document: finished - elapsed_time = ' \
+ str(self.elapsed_time) + ' sec'
log(module_name=self.module_name, log_type=self.log_type, log_message=self.log_message)
def test_print_bus_vehicle_documents(self, object_ids=None, bus_vehicle_ids=None, counter=None):
"""
Print multiple bus_vehicle_documents.
:param object_ids: [ObjectId]
:param bus_vehicle_ids: [int]
:param counter: int
:return: None
"""
self.log_message = 'test_print_bus_vehicle_documents: starting'
log(module_name=self.module_name, log_type=self.log_type, log_message=self.log_message)
self.start_time = time.time()
self.bus_vehicle_handler.print_bus_vehicle_documents(
object_ids=object_ids,
bus_vehicle_ids=bus_vehicle_ids,
counter=counter
)
self.elapsed_time = time.time() - self.start_time
self.log_message = 'test_print_bus_vehicle_documents: finished - elapsed_time = ' \
+ str(self.elapsed_time) + ' sec'
log(module_name=self.module_name, log_type=self.log_type, log_message=self.log_message)
if __name__ == '__main__':
bus_vehicle_handler_tester = BusVehicleHandlerTester()
while True:
time.sleep(0.01)
selection = raw_input(
'\n0. exit'
'\n1. test_clear_bus_vehicle_documents_collection'
'\n2. test_delete_bus_vehicle_document'
'\n3. test_delete_bus_vehicle_documents'
'\n4. test_generate_bus_vehicle_document'
'\n5. test_generate_bus_vehicle_documents'
'\n6. test_insert_bus_vehicle_document'
'\n7. test_insert_bus_vehicle_documents'
'\n8. test_print_bus_vehicle_document'
'\n9. test_print_bus_vehicle_documents'
'\nSelection: '
)
# 0. exit
if selection == '0':
break
# 1. test_clear_bus_vehicle_documents_collection
elif selection == '1':
bus_vehicle_handler_tester.test_clear_bus_vehicle_documents_collection()
# 2. test_delete_bus_vehicle_document
elif selection == '2':
bus_vehicle_id = int(
raw_input(
'\n2. test_delete_bus_vehicle_document'
'\nbus_vehicle_id: '
)
)
bus_vehicle_handler_tester.test_delete_bus_vehicle_document(
object_id=None,
bus_vehicle_id=bus_vehicle_id
)
# 3. test_delete_bus_vehicle_documents
elif selection == '3':
bus_vehicle_ids = []
bus_vehicle_handler_tester.test_delete_bus_vehicle_documents(
object_ids=None,
bus_vehicle_ids=bus_vehicle_ids
)
# 4. test_generate_bus_vehicle_document
elif selection == '4':
maximum_capacity = int(
raw_input(
'\n4. test_generate_bus_vehicle_document'
'\nmaximum_capacity: '
)
)
bus_vehicle_handler_tester.test_generate_bus_vehicle_document(
maximum_capacity=maximum_capacity
)
# 5. test_generate_bus_vehicle_documents
elif selection == '5':
maximum_capacity = int(
raw_input(
'\n5. test_generate_bus_vehicle_documents'
'\nmaximum_capacity: '
)
)
number_of_bus_vehicle_documents = int(
raw_input(
'\nnumber_of_bus_vehicle_documents: '
)
)
bus_vehicle_handler_tester.test_generate_bus_vehicle_documents(
maximum_capacity=maximum_capacity,
number_of_bus_vehicle_documents=number_of_bus_vehicle_documents
)
# 6. test_insert_bus_vehicle_document
elif selection == '6':
pass
# 7. test_insert_bus_vehicle_documents
elif selection == '7':
pass
# 8. test_print_bus_vehicle_document
elif selection == '8':
bus_vehicle_id = int(
raw_input(
'\n8. test_print_bus_vehicle_document'
'\nbus_vehicle_id: '
)
)
bus_vehicle_handler_tester.test_print_bus_vehicle_document(
object_id=None,
bus_vehicle_id=bus_vehicle_id
)
# 9. test_print_bus_vehicle_documents
elif selection == '9':
bus_vehicle_handler_tester.test_print_bus_vehicle_documents()
else:
pass
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''General Python client for SCAN service, built on top of PyScanClient
library, which uses a RESTful web service to manage and proceed scan jobs.
The scan server host and port could be configured in configuration file, e.g.:
phantasy.ini.
The scan server was developed at SNS, its nightly built binary could be found
at: https://ics-web.sns.ornl.gov/css/nightly/ and source code is managed on
github: https://github.com/ControlSystemStudio/cs-studio/tree/master/applications/scan/scan-plugins/org.csstudio.scan.server
The PyScanClient source code is managed at github:
https://github.com/PythonScanClient/PyScanClient
'''
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
import re
from urlparse import urlparse
import scan
from scan.client.scanclient import ScanClient
from scan.commands import Set, Loop, Delay, Log, Comment
from scan.client.logdata import createTable
import logging
_LOGGER = logging.getLogger(__name__)
try:
# Python 2.X
basestring
except NameError:
# Python 3.X
basestring = str
class BaseScanClient(scan.ScanClient):
__NAME_ID = 1
def __init__(self, url=None, **kws):
self._url = None
if url is None:
self.url = 'http://localhost:4810'
host, port = 'localhost', 4810
else:
self.url = url
_, host, port = [s.strip('/') for s in re.split('[:]', self.url)]
self._host, self._port = host, port
scan.ScanClient.__init__(self, host=host, port=port)
self._post_init(**kws)
@property
def url(self):
"""str: URL of scan server, e.g. http://127.0.0.1:4810."""
return self._url
@property
def host(self):
"""str: Scan server address, e.g. 127.0.0.1."""
return self._host
@host.setter
def host(self, host):
if host is None or host == self.host:
pass
else:
self._host = host
scan.ScanClient.__init__(self, host=self.host, port=self.port)
@property
def port(self):
"""int: Scan server port, e.g. 4810."""
return self._port
@port.setter
def port(self, port):
if port is None or port == self.port:
pass
else:
self._port = port
scan.ScanClient.__init__(self, host=self.host, port=self.port)
@url.setter
def url(self, url):
if url is not None and url == self.url:
pass
else:
urlp = urlparse(url)
if urlp.scheme in ['http', 'https']:
self._url = urlp.geturl()
_, host, port = [s.strip('/') for s in re.split('[:]', self._url)]
self._host, self._port = host, port
scan.ScanClient.__init__(self, host=self.host, port=self.port)
else:
raise TypeError("Invalid URL.")
@property
def name(self):
"""str: Name of the scan task, 'Scan-###' by default, '###' is an
auto-incremental integer, ranging from 001-999."""
return self._name
@name.setter
def name(self, name):
if name is None:
self._name = "Scan-{0:03d}".format(BaseScanClient.__NAME_ID)
BaseScanClient.__NAME_ID += 1
else:
self._name = name
@property
def n_sample(self):
"""int: Counter of DAQ for every *device_set* updating, 1 by default.
"""
return self._n_sample
@n_sample.setter
def n_sample(self, n):
if n is None:
self._n_sample = 1
elif isinstance(n, (int, float, long)):
self._n_sample = int(n)
else:
raise TypeError("Input should be an integer.")
def _post_init(self, **kws):
pass
# self.name = kws.get('name', None)
# self.host = kws.get('host', None)
# self.port = kws.get('port', None)
# self.n_sample = kws.get('n_sample', None)
def __repr__(self):
pass
def saveData(self):
"""Save data
"""
pass
def scan2d(self, device1, device2, meas_dev, **kwds):
""" Perform a 2-D alignment scan, it checks the readback within given tolerance,
or waits callback completion if readback is `False`, for each setting.
If original setting is provided, it will restore to this point after scan finished.
If there is any error, it will try again, then abort.
acceptable kwds key words:
- title: job title for a scan, "phyutil 1D Scan" by default
- orig1: original settings for `device`, default None.
- readback1: `False` to not check any readback,
`True` to wait for readback from the 'device',
or name of specific device to check for readback.
- tolerance1:Tolerance when checking numeric `readback`, 0 by default.
- orig2: original settings for `device`, default None.
- readback2: `False` to not check any readback,
`True` to wait for readback from the 'device',
or name of specific device to check for readback.
- tolerance2:Tolerance when checking numeric `readback`, 0 by default.
- timeout: Timeout in seconds, used for `completion` and `readback` check, 5.0 by default.
- ramping: ramping `device` to start at beginning, and ramp back to original after finish.
If orig is not given, then ignore since not know where to go.
False by default.
`False` to directly jump to start for the 'device',
`True` to ramp to start with same step for the 'device',
- delay: delay in seconds, 5.0 by default.
- wait: whether wait until done, `True` by default
- samples: how many point taken for each measurement device, 1 by default
- compress: how to compress data if multiple samples are taken, None by default.
Has to be:
`None`: no compress, and keep all data as it is;
`average`: take an average.
:param device1: first dimension information with format [Device name, start, stop, step]
:param device2: second dimension information with format [Device name, start, stop, step]
:param meas_dev: Device to measure
:return: a table with column following the device order: device, meas_dev
:raise:
"""
if not isinstance(device1, (list, tuple)) or len(device1) != 4 or \
not isinstance(device2, (list, tuple)) or len(device2) != 4:
raise RuntimeError("Scan parameters are not sufficient.")
if not isinstance(device1[0], basestring):
raise Exception("Expecting device1 name, got '%s'" % str(device1[0]))
else:
# Ensure device is NOT unicode object until
# it is supported by PyScanClient library.
device1[0] = str(device1[0])
if not isinstance(device2[0], basestring):
raise Exception("Expecting device2 name, got '%s'" % str(device2[0]))
else:
# Ensure device is NOT unicode object until
# it is supported by PyScanClient library.
device2[0] = str(device2[0])
comments = kwds.get("title", "phyutil 2D Scan")
orig1 = kwds.get("orig1", None)
readback1 = kwds.get("readback1", False)
tolerance1 = kwds.get("tolerance1", 0.0)
orig2 = kwds.get("orig2", None)
readback2 = kwds.get("readback2", False)
tolerance2 = kwds.get("tolerance2", 0.0)
timeout = kwds.get("timeout", 5.0)
ramping = kwds.get("ramping", False)
delay = kwds.get("delay", 5.0)
samples = int(kwds.get("samples", 1))
wait = kwds.get('wait', True)
compress = kwds.get("compress", None)
completion = kwds.get("completion", False)
errhandler = kwds.get('errhandler', None)
if compress is not None:
# TODO add support to compress multiple samples and compress.lower not in ["average"]:
raise RuntimeError("Compress algorithm is not support yet.")
scan_cmds = []
# prepare scan comments
scan_cmds.append(Comment(comments))
# ramp to start point if needed
if orig1 is not None and ramping:
# slow ramping to the start point for scan
if orig1 < device1[1]:
scan_cmds.append(Loop(device1[0], orig1, device1[1],
abs(device1[3]), [Delay(delay)],
completion=completion,
readback=readback1, tolerance=tolerance1,
timeout=timeout, errhandler=errhandler))
else:
scan_cmds.append(Loop(device1[0], orig1, device1[1],
-abs(device1[3]), [Delay(delay)],
completion=completion,
readback=readback1, tolerance=tolerance1,
timeout=timeout, errhandler=errhandler))
# ramp to start point if needed
if orig2 is not None and ramping:
# slow ramping to the start point for scan
if orig2 < device2[1]:
scan_cmds.append(Loop(device2[0], orig2, device2[1],
abs(device2[3]), [Delay(delay)],
completion=completion,
readback=readback2, tolerance=tolerance2,
timeout=timeout, errhandler=errhandler))
else:
scan_cmds.append(Loop(device2[0], orig2, device2[1],
-abs(device2[3]), [Delay(delay)],
completion=completion,
readback=readback2, tolerance=tolerance2,
timeout=timeout, errhandler=errhandler))
# confirm start point
scan_cmds.append(Set(device1[0], device1[1], completion=completion,
readback=readback1, tolerance=tolerance1,
timeout=timeout, errhandler=errhandler))
scan_cmds.append(Set(device2[0], device2[1], completion=completion,
readback=readback2, tolerance=tolerance2,
timeout=timeout, errhandler=errhandler))
# real scan
if samples == 1:
scan_cmds.append(Loop(device1[0], device1[1], device1[2], device1[3],
[Loop(device2[0], device2[1], device2[2], device2[3],
[Delay(delay),
Log([device1[0], device2[0]] + list(meas_dev))
],
completion=completion,
readback=readback2, tolerance=tolerance2,
),
],
completion=completion,
readback=readback1, tolerance=tolerance1,
timeout=timeout, errhandler=errhandler))
else:
scan_cmds.append(Loop(device1[0], device1[1], device1[2], device1[3],
[Loop(device2[0], device2[1], device2[2], device2[3],
[Loop('loc://i(0)', 1, samples, 1,
[Delay(delay), Log([device1[0], device2[0]] + list(meas_dev))])
],
completion=completion,
readback=readback2, tolerance=tolerance2,
),
],
completion=completion,
readback=readback1, tolerance=tolerance1,
timeout=timeout, errhandler=errhandler))
# ramp back to original setting
if orig1 is not None and ramping:
# slow ramping to the start point for scan
if device1[2] < orig1:
scan_cmds.append(Loop(device1[0], device1[2], orig1,
abs(device1[3]), [Delay(delay)],
completion=completion,
readback=readback1, tolerance=tolerance1,
timeout=timeout, errhandler=errhandler))
else:
scan_cmds.append(Loop(device1[0], device1[2], orig1,
-abs(device1[3]), [Delay(delay)],
completion=completion,
readback=readback1, tolerance=tolerance1,
timeout=timeout, errhandler=errhandler))
# ramp back to original setting
if orig2 is not None and ramping:
# slow ramping to the start point for scan
if device2[2] < orig2:
scan_cmds.append(Loop(device2[0], device2[2], orig2,
abs(device2[3]), [Delay(delay)],
completion=completion,
readback=readback2, tolerance=tolerance2,
timeout=timeout, errhandler=errhandler))
else:
scan_cmds.append(Loop(device2[0], device2[2], orig2,
-abs(device2[3]), [Delay(delay)],
completion=completion,
readback=readback2, tolerance=tolerance2,
timeout=timeout, errhandler=errhandler))
# confirm original setting
if orig1 is not None:
scan_cmds.append(Set(device1[0], orig1, completion=completion,
readback=readback1, tolerance=tolerance1,
timeout=timeout, errhandler=errhandler))
if orig2 is not None:
scan_cmds.append(Set(device2[0], orig2, completion=completion,
readback=readback2, tolerance=tolerance2,
timeout=timeout, errhandler=errhandler))
if self.scanclient is None:
self._connectscanserver()
scid = self.scanclient.submit(scan_cmds, name=comments)
if wait:
self.scanclient.waitUntilDone(scid)
return scid
| |
"""
Calculates mold growth indication from temperature and humidity.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.mold_indicator/
"""
import logging
import math
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
import homeassistant.util as util
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import track_state_change
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT, TEMP_CELSIUS, TEMP_FAHRENHEIT, CONF_NAME)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Mold Indicator'
CONF_INDOOR_TEMP = 'indoor_temp_sensor'
CONF_OUTDOOR_TEMP = 'outdoor_temp_sensor'
CONF_INDOOR_HUMIDITY = 'indoor_humidity_sensor'
CONF_CALIBRATION_FACTOR = 'calibration_factor'
MAGNUS_K2 = 17.62
MAGNUS_K3 = 243.12
ATTR_DEWPOINT = 'Dewpoint'
ATTR_CRITICAL_TEMP = 'Est. Crit. Temp'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_INDOOR_TEMP): cv.entity_id,
vol.Required(CONF_OUTDOOR_TEMP): cv.entity_id,
vol.Required(CONF_INDOOR_HUMIDITY): cv.entity_id,
vol.Optional(CONF_CALIBRATION_FACTOR): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup MoldIndicator sensor."""
name = config.get(CONF_NAME, DEFAULT_NAME)
indoor_temp_sensor = config.get(CONF_INDOOR_TEMP)
outdoor_temp_sensor = config.get(CONF_OUTDOOR_TEMP)
indoor_humidity_sensor = config.get(CONF_INDOOR_HUMIDITY)
calib_factor = config.get(CONF_CALIBRATION_FACTOR)
add_devices([MoldIndicator(
hass, name, indoor_temp_sensor, outdoor_temp_sensor,
indoor_humidity_sensor, calib_factor)])
# pylint: disable=too-many-instance-attributes
class MoldIndicator(Entity):
"""Represents a MoldIndication sensor."""
# pylint: disable=too-many-arguments
def __init__(self, hass, name, indoor_temp_sensor, outdoor_temp_sensor,
indoor_humidity_sensor, calib_factor):
"""Initialize the sensor."""
self._state = None
self._name = name
self._indoor_temp_sensor = indoor_temp_sensor
self._indoor_humidity_sensor = indoor_humidity_sensor
self._outdoor_temp_sensor = outdoor_temp_sensor
self._calib_factor = calib_factor
self._is_metric = hass.config.units.is_metric
self._dewpoint = None
self._indoor_temp = None
self._outdoor_temp = None
self._indoor_hum = None
self._crit_temp = None
track_state_change(hass, indoor_temp_sensor, self._sensor_changed)
track_state_change(hass, outdoor_temp_sensor, self._sensor_changed)
track_state_change(hass, indoor_humidity_sensor, self._sensor_changed)
# Read initial state
indoor_temp = hass.states.get(indoor_temp_sensor)
outdoor_temp = hass.states.get(outdoor_temp_sensor)
indoor_hum = hass.states.get(indoor_humidity_sensor)
if indoor_temp:
self._indoor_temp = MoldIndicator._update_temp_sensor(indoor_temp)
if outdoor_temp:
self._outdoor_temp = MoldIndicator._update_temp_sensor(
outdoor_temp)
if indoor_hum:
self._indoor_hum = MoldIndicator._update_hum_sensor(indoor_hum)
self.update()
@staticmethod
def _update_temp_sensor(state):
"""Parse temperature sensor value."""
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
temp = util.convert(state.state, float)
if temp is None:
_LOGGER.error('Unable to parse sensor temperature: %s',
state.state)
return None
# convert to celsius if necessary
if unit == TEMP_FAHRENHEIT:
return util.temperature.fahrenheit_to_celsius(temp)
elif unit == TEMP_CELSIUS:
return temp
else:
_LOGGER.error("Temp sensor has unsupported unit: %s"
" (allowed: %s, %s)",
unit, TEMP_CELSIUS, TEMP_FAHRENHEIT)
return None
@staticmethod
def _update_hum_sensor(state):
"""Parse humidity sensor value."""
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
hum = util.convert(state.state, float)
if hum is None:
_LOGGER.error('Unable to parse sensor humidity: %s',
state.state)
return None
if unit != '%':
_LOGGER.error("Humidity sensor has unsupported unit: %s %s",
unit, " (allowed: %)")
if hum > 100 or hum < 0:
_LOGGER.error("Humidity sensor out of range: %s %s", hum,
" (allowed: 0-100%)")
return hum
def update(self):
"""Calculate latest state."""
# check all sensors
if None in (self._indoor_temp, self._indoor_hum, self._outdoor_temp):
return
# re-calculate dewpoint and mold indicator
self._calc_dewpoint()
self._calc_moldindicator()
def _sensor_changed(self, entity_id, old_state, new_state):
"""Called when sensor values change."""
if new_state is None:
return
if entity_id == self._indoor_temp_sensor:
self._indoor_temp = MoldIndicator._update_temp_sensor(new_state)
elif entity_id == self._outdoor_temp_sensor:
self._outdoor_temp = MoldIndicator._update_temp_sensor(new_state)
elif entity_id == self._indoor_humidity_sensor:
self._indoor_hum = MoldIndicator._update_hum_sensor(new_state)
self.update()
self.update_ha_state()
def _calc_dewpoint(self):
"""Calculate the dewpoint for the indoor air."""
# use magnus approximation to calculate the dew point
alpha = MAGNUS_K2 * self._indoor_temp / (MAGNUS_K3 + self._indoor_temp)
beta = MAGNUS_K2 * MAGNUS_K3 / (MAGNUS_K3 + self._indoor_temp)
if self._indoor_hum == 0:
self._dewpoint = -50 # not defined, assume very low value
else:
self._dewpoint = \
MAGNUS_K3 * (alpha + math.log(self._indoor_hum / 100.0)) / \
(beta - math.log(self._indoor_hum / 100.0))
_LOGGER.debug("Dewpoint: %f " + TEMP_CELSIUS, self._dewpoint)
def _calc_moldindicator(self):
"""Calculate the humidity at the (cold) calibration point."""
if None in (self._dewpoint, self._calib_factor) or \
self._calib_factor == 0:
_LOGGER.debug("Invalid inputs - dewpoint: %s,"
" calibration-factor: %s",
self._dewpoint, self._calib_factor)
self._state = None
return
# first calculate the approximate temperature at the calibration point
self._crit_temp = \
self._outdoor_temp + (self._indoor_temp - self._outdoor_temp) / \
self._calib_factor
_LOGGER.debug("Estimated Critical Temperature: %f " +
TEMP_CELSIUS, self._crit_temp)
# Then calculate the humidity at this point
alpha = MAGNUS_K2 * self._crit_temp / (MAGNUS_K3 + self._crit_temp)
beta = MAGNUS_K2 * MAGNUS_K3 / (MAGNUS_K3 + self._crit_temp)
crit_humidity = \
math.exp(
(self._dewpoint * beta - MAGNUS_K3 * alpha) /
(self._dewpoint + MAGNUS_K3)) * 100.0
# check bounds and format
if crit_humidity > 100:
self._state = '100'
elif crit_humidity < 0:
self._state = '0'
else:
self._state = '{0:d}'.format(int(crit_humidity))
_LOGGER.debug('Mold indicator humidity: %s ', self._state)
@property
def should_poll(self):
"""Polling needed."""
return False
@property
def name(self):
"""Return the name."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return '%'
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def state_attributes(self):
"""Return the state attributes."""
if self._is_metric:
return {
ATTR_DEWPOINT: self._dewpoint,
ATTR_CRITICAL_TEMP: self._crit_temp,
}
else:
return {
ATTR_DEWPOINT:
util.temperature.celsius_to_fahrenheit(self._dewpoint),
ATTR_CRITICAL_TEMP:
util.temperature.celsius_to_fahrenheit(self._crit_temp),
}
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dense Bayesian layer using KL-divergence based variational inference.
@@DenseReparameterization
@@DenseLocalReparameterization
@@DenseFlipout
@@dense_reparameterization
@@dense_local_reparameterization
@@dense_flipout
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.bayesflow.python.ops import layers_util
from tensorflow.contrib.distributions.python.ops import independent as independent_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as layers_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops.distributions import kullback_leibler as kl_lib
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.distributions import util as distribution_util
__all__ = [
"DenseReparameterization",
"DenseLocalReparameterization",
"DenseFlipout",
"dense_reparameterization",
"dense_local_reparameterization",
"dense_flipout",
]
class _DenseVariational(layers_lib.Layer):
"""Abstract densely-connected class (private, used as implementation base).
This layer implements the Bayesian variational inference analogue to
a dense layer by assuming the `kernel` and/or the `bias` are drawn
from distributions. By default, the layer implements a stochastic
forward pass via sampling from the kernel and bias posteriors,
```none
kernel, bias ~ posterior
outputs = activation(matmul(inputs, kernel) + bias)
```
The arguments permit separate specification of the surrogate posterior
(`q(W|x)`), prior (`p(W)`), and divergence for both the `kernel` and `bias`
distributions.
Args:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (`callable`). Set it to None to maintain a
linear activation.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
kernel_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `kernel` parameter. Default value:
`default_mean_field_normal_fn()`.
kernel_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
kernel_prior_fn: Python `callable` which creates `tf.distributions`
instance. See `default_mean_field_normal_fn` docstring for required
parameter signature.
Default value: `tf.distributions.Normal(loc=0., scale=1.)`.
kernel_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
bias_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `bias` parameter. Default value:
`default_mean_field_normal_fn(is_singular=True)` (which creates an
instance of `tf.distributions.Deterministic`).
bias_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
bias_prior_fn: Python `callable` which creates `tf.distributions` instance.
See `default_mean_field_normal_fn` docstring for required parameter
signature. Default value: `None` (no prior, no variational inference)
bias_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
name: Python `str`, the name of the layer. Layers with the same name will
share `tf.Variable`s, but to avoid mistakes we require `reuse=True` in
such cases.
reuse: Python `bool`, whether to reuse the `tf.Variable`s of a previous
layer by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (`callable`).
activity_regularizer: Regularizer function for the output.
kernel_posterior_fn: `callable` returning posterior.
kernel_posterior_tensor_fn: `callable` operating on posterior.
kernel_prior_fn: `callable` returning prior.
kernel_divergence_fn: `callable` returning divergence.
bias_posterior_fn: `callable` returning posterior.
bias_posterior_tensor_fn: `callable` operating on posterior.
bias_prior_fn: `callable` returning prior.
bias_divergence_fn: `callable` returning divergence.
"""
def __init__(
self,
units,
activation=None,
activity_regularizer=None,
trainable=True,
kernel_posterior_fn=layers_util.default_mean_field_normal_fn(),
kernel_posterior_tensor_fn=lambda d: d.sample(),
kernel_prior_fn=lambda dtype, *args: normal_lib.Normal( # pylint: disable=g-long-lambda
loc=dtype.as_numpy_dtype(0.), scale=dtype.as_numpy_dtype(1.)),
kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
bias_posterior_fn=layers_util.default_mean_field_normal_fn(is_singular=True), # pylint: disable=line-too-long
bias_posterior_tensor_fn=lambda d: d.sample(),
bias_prior_fn=None,
bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
name=None,
**kwargs):
super(_DenseVariational, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.units = units
self.activation = activation
self.input_spec = layers_lib.InputSpec(min_ndim=2)
self.kernel_posterior_fn = kernel_posterior_fn
self.kernel_posterior_tensor_fn = kernel_posterior_tensor_fn
self.kernel_prior_fn = kernel_prior_fn
self.kernel_divergence_fn = kernel_divergence_fn
self.bias_posterior_fn = bias_posterior_fn
self.bias_posterior_tensor_fn = bias_posterior_tensor_fn
self.bias_prior_fn = bias_prior_fn
self.bias_divergence_fn = bias_divergence_fn
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
in_size = input_shape.with_rank_at_least(2)[-1].value
if in_size is None:
raise ValueError("The last dimension of the inputs to `Dense` "
"should be defined. Found `None`.")
self._input_spec = layers_lib.InputSpec(min_ndim=2, axes={-1: in_size})
dtype = dtypes.as_dtype(self.dtype)
# Must have a posterior kernel.
self.kernel_posterior = self.kernel_posterior_fn(
dtype, [in_size, self.units], "kernel_posterior",
self.trainable, self.add_variable)
if self.kernel_prior_fn is None:
self.kernel_prior = None
else:
self.kernel_prior = self.kernel_prior_fn(
dtype, [in_size, self.units], "kernel_prior",
self.trainable, self.add_variable)
self._built_kernel_divergence = False
if self.bias_posterior_fn is None:
self.bias_posterior = None
else:
self.bias_posterior = self.bias_posterior_fn(
dtype, [self.units], "bias_posterior",
self.trainable, self.add_variable)
if self.bias_prior_fn is None:
self.bias_prior = None
else:
self.bias_prior = self.bias_prior_fn(
dtype, [self.units], "bias_prior",
self.trainable, self.add_variable)
self._built_bias_divergence = False
self.built = True
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
outputs = self._apply_variational_kernel(inputs)
outputs = self._apply_variational_bias(outputs)
if self.activation is not None:
outputs = self.activation(outputs) # pylint: disable=not-callable
if not self._built_kernel_divergence:
kernel_posterior = self.kernel_posterior
kernel_prior = self.kernel_prior
if isinstance(self.kernel_posterior, independent_lib.Independent):
kernel_posterior = kernel_posterior.distribution
if isinstance(self.kernel_prior, independent_lib.Independent):
kernel_prior = kernel_prior.distribution
self._apply_divergence(self.kernel_divergence_fn,
kernel_posterior,
kernel_prior,
self.kernel_posterior_tensor,
name="divergence_kernel")
self._built_kernel_divergence = True
if not self._built_bias_divergence:
bias_posterior = self.bias_posterior
bias_prior = self.bias_prior
if isinstance(self.bias_posterior, independent_lib.Independent):
bias_posterior = bias_posterior.distribution
if isinstance(self.bias_prior, independent_lib.Independent):
bias_prior = bias_prior.distribution
self._apply_divergence(self.bias_divergence_fn,
bias_posterior,
bias_prior,
self.bias_posterior_tensor,
name="divergence_bias")
self._built_bias_divergence = True
return outputs
def _apply_variational_bias(self, inputs):
if self.bias_posterior is None:
self.bias_posterior_tensor = None
return inputs
self.bias_posterior_tensor = self.bias_posterior_tensor_fn(
self.bias_posterior)
return nn.bias_add(inputs, self.bias_posterior_tensor)
def _apply_divergence(self, divergence_fn, posterior, prior,
posterior_tensor, name):
if (divergence_fn is None or
posterior is None or
prior is None):
divergence = None
return
divergence = standard_ops.identity(
divergence_fn(
posterior, prior, posterior_tensor),
name=name)
self.add_loss(divergence)
def _matmul(self, inputs, kernel):
if inputs.shape.ndims <= 2:
return standard_ops.matmul(inputs, kernel)
# To handle broadcasting, we must use `tensordot`.
return standard_ops.tensordot(inputs, kernel, axes=[[-1], [0]])
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).with_rank_at_least(2)
if input_shape[-1].value is None:
raise ValueError(
"The innermost dimension of input_shape must be defined, "
"but saw: {}".format(input_shape))
return input_shape[:-1].concatenate(self.units)
class DenseReparameterization(_DenseVariational):
"""Densely-connected layer class with reparameterization estimator.
This layer implements the Bayesian variational inference analogue to
a dense layer by assuming the `kernel` and/or the `bias` are drawn
from distributions. By default, the layer implements a stochastic
forward pass via sampling from the kernel and bias posteriors,
```none
kernel, bias ~ posterior
outputs = activation(matmul(inputs, kernel) + bias)
```
The arguments permit separate specification of the surrogate posterior
(`q(W|x)`), prior (`p(W)`), and divergence for both the `kernel` and `bias`
distributions.
Args:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (`callable`). Set it to None to maintain a
linear activation.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
kernel_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `kernel` parameter. Default value:
`default_mean_field_normal_fn()`.
kernel_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
kernel_prior_fn: Python `callable` which creates `tf.distributions`
instance. See `default_mean_field_normal_fn` docstring for required
parameter signature.
Default value: `tf.distributions.Normal(loc=0., scale=1.)`.
kernel_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
bias_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `bias` parameter. Default value:
`default_mean_field_normal_fn(is_singular=True)` (which creates an
instance of `tf.distributions.Deterministic`).
bias_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
bias_prior_fn: Python `callable` which creates `tf.distributions` instance.
See `default_mean_field_normal_fn` docstring for required parameter
signature. Default value: `None` (no prior, no variational inference)
bias_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
name: Python `str`, the name of the layer. Layers with the same name will
share `tf.Variable`s, but to avoid mistakes we require `reuse=True` in
such cases.
reuse: Python `bool`, whether to reuse the `tf.Variable`s of a previous
layer by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (`callable`).
activity_regularizer: Regularizer function for the output.
kernel_posterior_fn: `callable` returning posterior.
kernel_posterior_tensor_fn: `callable` operating on posterior.
kernel_prior_fn: `callable` returning prior.
kernel_divergence_fn: `callable` returning divergence.
bias_posterior_fn: `callable` returning posterior.
bias_posterior_tensor_fn: `callable` operating on posterior.
bias_prior_fn: `callable` returning prior.
bias_divergence_fn: `callable` returning divergence.
#### Examples
We illustrate a Bayesian neural network with [variational inference](
https://en.wikipedia.org/wiki/Variational_Bayesian_methods),
assuming a dataset of `features` and `labels`.
```python
tfp = tf.contrib.bayesflow
net = tfp.layers.DenseReparameterization(
512, activation=tf.nn.relu)(features)
logits = tfp.layers.DenseReparameterization(10)(net)
neg_log_likelihood = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
kl = sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
loss = neg_log_likelihood + kl
train_op = tf.train.AdamOptimizer().minimize(loss)
```
It uses reparameterization gradients to minimize the
Kullback-Leibler divergence up to a constant, also known as the
negative Evidence Lower Bound. It consists of the sum of two terms:
the expected negative log-likelihood, which we approximate via
Monte Carlo; and the KL divergence, which is added via regularizer
terms which are arguments to the layer.
"""
def __init__(
self,
units,
activation=None,
activity_regularizer=None,
trainable=True,
kernel_posterior_fn=layers_util.default_mean_field_normal_fn(),
kernel_posterior_tensor_fn=lambda d: d.sample(),
kernel_prior_fn=lambda dtype, *args: normal_lib.Normal( # pylint: disable=g-long-lambda
loc=dtype.as_numpy_dtype(0.), scale=dtype.as_numpy_dtype(1.)),
kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
bias_posterior_fn=layers_util.default_mean_field_normal_fn(
is_singular=True),
bias_posterior_tensor_fn=lambda d: d.sample(),
bias_prior_fn=None,
bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
name=None,
**kwargs):
super(DenseReparameterization, self).__init__(
units=units,
activation=activation,
activity_regularizer=activity_regularizer,
trainable=trainable,
kernel_posterior_fn=kernel_posterior_fn,
kernel_posterior_tensor_fn=kernel_posterior_tensor_fn,
kernel_prior_fn=kernel_prior_fn,
kernel_divergence_fn=kernel_divergence_fn,
bias_posterior_fn=bias_posterior_fn,
bias_posterior_tensor_fn=bias_posterior_tensor_fn,
bias_prior_fn=bias_prior_fn,
bias_divergence_fn=bias_divergence_fn,
name=name,
**kwargs)
def _apply_variational_kernel(self, inputs):
self.kernel_posterior_tensor = self.kernel_posterior_tensor_fn(
self.kernel_posterior)
self.kernel_posterior_affine = None
self.kernel_posterior_affine_tensor = None
return self._matmul(inputs, self.kernel_posterior_tensor)
def dense_reparameterization(
inputs,
units,
activation=None,
activity_regularizer=None,
trainable=True,
kernel_posterior_fn=layers_util.default_mean_field_normal_fn(),
kernel_posterior_tensor_fn=lambda d: d.sample(),
kernel_prior_fn=lambda dtype, *args: normal_lib.Normal( # pylint: disable=g-long-lambda
loc=dtype.as_numpy_dtype(0.), scale=dtype.as_numpy_dtype(1.)),
kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
bias_posterior_fn=layers_util.default_mean_field_normal_fn(is_singular=True), # pylint: disable=line-too-long
bias_posterior_tensor_fn=lambda d: d.sample(),
bias_prior_fn=None,
bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
name=None,
reuse=None):
"""Densely-connected layer with reparameterization estimator.
This layer implements the Bayesian variational inference analogue to
a dense layer by assuming the `kernel` and/or the `bias` are drawn
from distributions. By default, the layer implements a stochastic
forward pass via sampling from the kernel and bias posteriors,
```none
kernel, bias ~ posterior
outputs = activation(matmul(inputs, kernel) + bias)
```
The arguments permit separate specification of the surrogate posterior
(`q(W|x)`), prior (`p(W)`), and divergence for both the `kernel` and `bias`
distributions.
Args:
inputs: Tensor input.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (`callable`). Set it to None to maintain a
linear activation.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
kernel_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `kernel` parameter. Default value:
`default_mean_field_normal_fn()`.
kernel_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
kernel_prior_fn: Python `callable` which creates `tf.distributions`
instance. See `default_mean_field_normal_fn` docstring for required
parameter signature.
Default value: `tf.distributions.Normal(loc=0., scale=1.)`.
kernel_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
bias_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `bias` parameter. Default value:
`default_mean_field_normal_fn(is_singular=True)` (which creates an
instance of `tf.distributions.Deterministic`).
bias_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
bias_prior_fn: Python `callable` which creates `tf.distributions` instance.
See `default_mean_field_normal_fn` docstring for required parameter
signature. Default value: `None` (no prior, no variational inference)
bias_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
name: Python `str`, the name of the layer. Layers with the same name will
share `tf.Variable`s, but to avoid mistakes we require `reuse=True` in
such cases.
reuse: Python `bool`, whether to reuse the `tf.Variable`s of a previous
layer by the same name.
Returns:
output: `Tensor` representing a the affine transformed input under a random
draw from the surrogate posterior distribution.
#### Examples
We illustrate a Bayesian neural network with [variational inference](
https://en.wikipedia.org/wiki/Variational_Bayesian_methods),
assuming a dataset of `features` and `labels`.
```python
tfp = tf.contrib.bayesflow
net = tfp.layers.dense_reparameterization(
features, 512, activation=tf.nn.relu)
logits = tfp.layers.dense_reparameterization(net, 10)
neg_log_likelihood = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
kl = sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
loss = neg_log_likelihood + kl
train_op = tf.train.AdamOptimizer().minimize(loss)
```
It uses reparameterization gradients to minimize the
Kullback-Leibler divergence up to a constant, also known as the
negative Evidence Lower Bound. It consists of the sum of two terms:
the expected negative log-likelihood, which we approximate via
Monte Carlo; and the KL divergence, which is added via regularizer
terms which are arguments to the layer.
"""
layer = DenseReparameterization(
units,
activation=activation,
activity_regularizer=activity_regularizer,
trainable=trainable,
kernel_posterior_fn=kernel_posterior_fn,
kernel_posterior_tensor_fn=kernel_posterior_tensor_fn,
kernel_prior_fn=kernel_prior_fn,
kernel_divergence_fn=kernel_divergence_fn,
bias_posterior_fn=bias_posterior_fn,
bias_posterior_tensor_fn=bias_posterior_tensor_fn,
bias_prior_fn=bias_prior_fn,
bias_divergence_fn=bias_divergence_fn,
name=name,
dtype=inputs.dtype.base_dtype,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
class DenseLocalReparameterization(_DenseVariational):
"""Densely-connected layer class with local reparameterization estimator.
This layer implements the Bayesian variational inference analogue to
a dense layer by assuming the `kernel` and/or the `bias` are drawn
from distributions. By default, the layer implements a stochastic
forward pass via sampling from the kernel and bias posteriors,
```none
kernel, bias ~ posterior
outputs = activation(matmul(inputs, kernel) + bias)
```
The arguments permit separate specification of the surrogate posterior
(`q(W|x)`), prior (`p(W)`), and divergence for both the `kernel` and `bias`
distributions.
Args:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (`callable`). Set it to None to maintain a
linear activation.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
kernel_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `kernel` parameter. Default value:
`default_mean_field_normal_fn()`.
kernel_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
kernel_prior_fn: Python `callable` which creates `tf.distributions`
instance. See `default_mean_field_normal_fn` docstring for required
parameter signature.
Default value: `tf.distributions.Normal(loc=0., scale=1.)`.
kernel_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
bias_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `bias` parameter. Default value:
`default_mean_field_normal_fn(is_singular=True)` (which creates an
instance of `tf.distributions.Deterministic`).
bias_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
bias_prior_fn: Python `callable` which creates `tf.distributions` instance.
See `default_mean_field_normal_fn` docstring for required parameter
signature. Default value: `None` (no prior, no variational inference)
bias_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
name: Python `str`, the name of the layer. Layers with the same name will
share `tf.Variable`s, but to avoid mistakes we require `reuse=True` in
such cases.
reuse: Python `bool`, whether to reuse the `tf.Variable`s of a previous
layer by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (`callable`).
activity_regularizer: Regularizer function for the output.
kernel_posterior_fn: `callable` returning posterior.
kernel_posterior_tensor_fn: `callable` operating on posterior.
kernel_prior_fn: `callable` returning prior.
kernel_divergence_fn: `callable` returning divergence.
bias_posterior_fn: `callable` returning posterior.
bias_posterior_tensor_fn: `callable` operating on posterior.
bias_prior_fn: `callable` returning prior.
bias_divergence_fn: `callable` returning divergence.
#### Examples
We illustrate a Bayesian neural network with [variational inference](
https://en.wikipedia.org/wiki/Variational_Bayesian_methods),
assuming a dataset of `features` and `labels`.
```python
tfp = tf.contrib.bayesflow
net = tfp.layers.DenseLocalReparameterization(
512, activation=tf.nn.relu)(features)
logits = tfp.layers.DenseLocalReparameterization(10)(net)
neg_log_likelihood = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
kl = sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
loss = neg_log_likelihood + kl
train_op = tf.train.AdamOptimizer().minimize(loss)
```
It uses local reparameterization gradients to minimize the
Kullback-Leibler divergence up to a constant, also known as the
negative Evidence Lower Bound. It consists of the sum of two terms:
the expected negative log-likelihood, which we approximate via
Monte Carlo; and the KL divergence, which is added via regularizer
terms which are arguments to the layer.
"""
def __init__(
self,
units,
activation=None,
activity_regularizer=None,
trainable=True,
kernel_posterior_fn=layers_util.default_mean_field_normal_fn(),
kernel_posterior_tensor_fn=lambda d: d.sample(),
kernel_prior_fn=lambda dtype, *args: normal_lib.Normal( # pylint: disable=g-long-lambda
loc=dtype.as_numpy_dtype(0.), scale=dtype.as_numpy_dtype(1.)),
kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
bias_posterior_fn=layers_util.default_mean_field_normal_fn(
is_singular=True),
bias_posterior_tensor_fn=lambda d: d.sample(),
bias_prior_fn=None,
bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
name=None,
**kwargs):
super(DenseLocalReparameterization, self).__init__(
units=units,
activation=activation,
activity_regularizer=activity_regularizer,
trainable=trainable,
kernel_posterior_fn=kernel_posterior_fn,
kernel_posterior_tensor_fn=kernel_posterior_tensor_fn,
kernel_prior_fn=kernel_prior_fn,
kernel_divergence_fn=kernel_divergence_fn,
bias_posterior_fn=bias_posterior_fn,
bias_posterior_tensor_fn=bias_posterior_tensor_fn,
bias_prior_fn=bias_prior_fn,
bias_divergence_fn=bias_divergence_fn,
name=name,
**kwargs)
def _apply_variational_kernel(self, inputs):
if (not isinstance(self.kernel_posterior, independent_lib.Independent) or
not isinstance(self.kernel_posterior.distribution, normal_lib.Normal)):
raise TypeError(
"`DenseLocalReparameterization` requires "
"`kernel_posterior_fn` produce an instance of "
"`tf.distributions.Independent(tf.distributions.Normal)` "
"(saw: \"{}\").".format(type(self.kernel_posterior).__name__))
self.kernel_posterior_affine = normal_lib.Normal(
loc=self._matmul(inputs, self.kernel_posterior.distribution.loc),
scale=standard_ops.sqrt(self._matmul(
standard_ops.square(inputs),
standard_ops.square(self.kernel_posterior.distribution.scale))))
self.kernel_posterior_affine_tensor = (
self.kernel_posterior_tensor_fn(self.kernel_posterior_affine))
self.kernel_posterior_tensor = None
return self.kernel_posterior_affine_tensor
def dense_local_reparameterization(
inputs,
units,
activation=None,
activity_regularizer=None,
trainable=True,
kernel_posterior_fn=layers_util.default_mean_field_normal_fn(),
kernel_posterior_tensor_fn=lambda d: d.sample(),
kernel_prior_fn=lambda dtype, *args: normal_lib.Normal( # pylint: disable=g-long-lambda
loc=dtype.as_numpy_dtype(0.), scale=dtype.as_numpy_dtype(1.)),
kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
bias_posterior_fn=layers_util.default_mean_field_normal_fn(
is_singular=True),
bias_posterior_tensor_fn=lambda d: d.sample(),
bias_prior_fn=None,
bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
name=None,
reuse=None):
"""Densely-connected layer with local reparameterization estimator.
This layer implements the Bayesian variational inference analogue to
a dense layer by assuming the `kernel` and/or the `bias` are drawn
from distributions. By default, the layer implements a stochastic
forward pass via sampling from the kernel and bias posteriors,
```none
kernel, bias ~ posterior
outputs = activation(matmul(inputs, kernel) + bias)
```
The arguments permit separate specification of the surrogate posterior
(`q(W|x)`), prior (`p(W)`), and divergence for both the `kernel` and `bias`
distributions.
Args:
inputs: Tensor input.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (`callable`). Set it to None to maintain a
linear activation.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
kernel_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `kernel` parameter. Default value:
`default_mean_field_normal_fn()`.
kernel_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
kernel_prior_fn: Python `callable` which creates `tf.distributions`
instance. See `default_mean_field_normal_fn` docstring for required
parameter signature.
Default value: `tf.distributions.Normal(loc=0., scale=1.)`.
kernel_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
bias_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `bias` parameter. Default value:
`default_mean_field_normal_fn(is_singular=True)` (which creates an
instance of `tf.distributions.Deterministic`).
bias_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
bias_prior_fn: Python `callable` which creates `tf.distributions` instance.
See `default_mean_field_normal_fn` docstring for required parameter
signature. Default value: `None` (no prior, no variational inference)
bias_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
name: Python `str`, the name of the layer. Layers with the same name will
share `tf.Variable`s, but to avoid mistakes we require `reuse=True` in
such cases.
reuse: Python `bool`, whether to reuse the `tf.Variable`s of a previous
layer by the same name.
Returns:
output: `Tensor` representing a the affine transformed input under a random
draw from the surrogate posterior distribution.
#### Examples
We illustrate a Bayesian neural network with [variational inference](
https://en.wikipedia.org/wiki/Variational_Bayesian_methods),
assuming a dataset of `features` and `labels`.
```python
tfp = tf.contrib.bayesflow
net = tfp.layers.dense_local_reparameterization(
features, 512, activation=tf.nn.relu)
logits = tfp.layers.dense_local_reparameterization(net, 10)
neg_log_likelihood = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
kl = sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
loss = neg_log_likelihood + kl
train_op = tf.train.AdamOptimizer().minimize(loss)
```
It uses local reparameterization gradients to minimize the
Kullback-Leibler divergence up to a constant, also known as the
negative Evidence Lower Bound. It consists of the sum of two terms:
the expected negative log-likelihood, which we approximate via
Monte Carlo; and the KL divergence, which is added via regularizer
terms which are arguments to the layer.
"""
layer = DenseLocalReparameterization(
units,
activation=activation,
activity_regularizer=activity_regularizer,
trainable=trainable,
kernel_posterior_fn=kernel_posterior_fn,
kernel_posterior_tensor_fn=kernel_posterior_tensor_fn,
kernel_prior_fn=kernel_prior_fn,
kernel_divergence_fn=kernel_divergence_fn,
bias_posterior_fn=bias_posterior_fn,
bias_posterior_tensor_fn=bias_posterior_tensor_fn,
bias_prior_fn=bias_prior_fn,
bias_divergence_fn=bias_divergence_fn,
name=name,
dtype=inputs.dtype.base_dtype,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
class DenseFlipout(_DenseVariational):
"""Densely-connected layer class with Flipout estimator.
This layer implements the Bayesian variational inference analogue to
a dense layer by assuming the `kernel` and/or the `bias` are drawn
from distributions. By default, the layer implements a stochastic
forward pass via sampling from the kernel and bias posteriors,
```none
kernel, bias ~ posterior
outputs = activation(matmul(inputs, kernel) + bias)
```
The arguments permit separate specification of the surrogate posterior
(`q(W|x)`), prior (`p(W)`), and divergence for both the `kernel` and `bias`
distributions.
Args:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (`callable`). Set it to None to maintain a
linear activation.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
kernel_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `kernel` parameter. Default value:
`default_mean_field_normal_fn()`.
kernel_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
kernel_prior_fn: Python `callable` which creates `tf.distributions`
instance. See `default_mean_field_normal_fn` docstring for required
parameter signature.
Default value: `tf.distributions.Normal(loc=0., scale=1.)`.
kernel_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
bias_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `bias` parameter. Default value:
`default_mean_field_normal_fn(is_singular=True)` (which creates an
instance of `tf.distributions.Deterministic`).
bias_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
bias_prior_fn: Python `callable` which creates `tf.distributions` instance.
See `default_mean_field_normal_fn` docstring for required parameter
signature. Default value: `None` (no prior, no variational inference)
bias_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
seed: Python scalar `int` which initializes the random number
generator. Default value: `None` (i.e., use global seed).
name: Python `str`, the name of the layer. Layers with the same name will
share `tf.Variable`s, but to avoid mistakes we require `reuse=True` in
such cases.
reuse: Python `bool`, whether to reuse the `tf.Variable`s of a previous
layer by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (`callable`).
activity_regularizer: Regularizer function for the output.
kernel_posterior_fn: `callable` returning posterior.
kernel_posterior_tensor_fn: `callable` operating on posterior.
kernel_prior_fn: `callable` returning prior.
kernel_divergence_fn: `callable` returning divergence.
bias_posterior_fn: `callable` returning posterior.
bias_posterior_tensor_fn: `callable` operating on posterior.
bias_prior_fn: `callable` returning prior.
bias_divergence_fn: `callable` returning divergence.
seed: Python integer, used to create random seeds.
#### Examples
We illustrate a Bayesian neural network with [variational inference](
https://en.wikipedia.org/wiki/Variational_Bayesian_methods),
assuming a dataset of `features` and `labels`.
```python
tfp = tf.contrib.bayesflow
net = tfp.layers.DenseFlipout(
512, activation=tf.nn.relu)(features)
logits = tfp.layers.DenseFlipout(10)(net)
neg_log_likelihood = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
kl = sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
loss = neg_log_likelihood + kl
train_op = tf.train.AdamOptimizer().minimize(loss)
```
It uses the Flipout gradient estimator to minimize the
Kullback-Leibler divergence up to a constant, also known as the
negative Evidence Lower Bound. It consists of the sum of two terms:
the expected negative log-likelihood, which we approximate via
Monte Carlo; and the KL divergence, which is added via regularizer
terms which are arguments to the layer.
"""
def __init__(
self,
units,
activation=None,
activity_regularizer=None,
trainable=True,
kernel_posterior_fn=layers_util.default_mean_field_normal_fn(),
kernel_posterior_tensor_fn=lambda d: d.sample(),
kernel_prior_fn=lambda dtype, *args: normal_lib.Normal( # pylint: disable=g-long-lambda
loc=dtype.as_numpy_dtype(0.), scale=dtype.as_numpy_dtype(1.)),
kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
bias_posterior_fn=layers_util.default_mean_field_normal_fn(
is_singular=True),
bias_posterior_tensor_fn=lambda d: d.sample(),
bias_prior_fn=None,
bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
seed=None,
name=None,
**kwargs):
super(DenseFlipout, self).__init__(
units=units,
activation=activation,
activity_regularizer=activity_regularizer,
trainable=trainable,
kernel_posterior_fn=kernel_posterior_fn,
kernel_posterior_tensor_fn=kernel_posterior_tensor_fn,
kernel_prior_fn=kernel_prior_fn,
kernel_divergence_fn=kernel_divergence_fn,
bias_posterior_fn=bias_posterior_fn,
bias_posterior_tensor_fn=bias_posterior_tensor_fn,
bias_prior_fn=bias_prior_fn,
bias_divergence_fn=bias_divergence_fn,
name=name,
**kwargs)
self.seed = seed
def _apply_variational_kernel(self, inputs):
if (not isinstance(self.kernel_posterior, independent_lib.Independent) or
not isinstance(self.kernel_posterior.distribution, normal_lib.Normal)):
raise TypeError(
"`DenseFlipout` requires "
"`kernel_posterior_fn` produce an instance of "
"`tf.distributions.Independent(tf.distributions.Normal)` "
"(saw: \"{}\").".format(type(self.kernel_posterior).__name__))
self.kernel_posterior_affine = normal_lib.Normal(
loc=array_ops.zeros_like(self.kernel_posterior.distribution.loc),
scale=self.kernel_posterior.distribution.scale)
self.kernel_posterior_affine_tensor = (
self.kernel_posterior_tensor_fn(self.kernel_posterior_affine))
self.kernel_posterior_tensor = None
input_shape = array_ops.shape(inputs)
batch_shape = input_shape[:-1]
sign_input = random_sign(input_shape, dtype=inputs.dtype, seed=self.seed)
sign_output = random_sign(
array_ops.concat([batch_shape,
array_ops.expand_dims(self.units, 0)], 0),
dtype=inputs.dtype,
seed=distribution_util.gen_new_seed(
self.seed, salt="dense_flipout"))
perturbed_inputs = self._matmul(
inputs * sign_input, self.kernel_posterior_affine_tensor) * sign_output
outputs = self._matmul(inputs, self.kernel_posterior.distribution.loc)
outputs += perturbed_inputs
return outputs
def dense_flipout(
inputs,
units,
activation=None,
activity_regularizer=None,
trainable=True,
kernel_posterior_fn=layers_util.default_mean_field_normal_fn(),
kernel_posterior_tensor_fn=lambda d: d.sample(),
kernel_prior_fn=lambda dtype, *args: normal_lib.Normal( # pylint: disable=g-long-lambda
loc=dtype.as_numpy_dtype(0.), scale=dtype.as_numpy_dtype(1.)),
kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
bias_posterior_fn=layers_util.default_mean_field_normal_fn(
is_singular=True),
bias_posterior_tensor_fn=lambda d: d.sample(),
bias_prior_fn=None,
bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p),
seed=None,
name=None,
reuse=None):
"""Densely-connected layer with Flipout estimator.
This layer implements the Bayesian variational inference analogue to
a dense layer by assuming the `kernel` and/or the `bias` are drawn
from distributions. By default, the layer implements a stochastic
forward pass via sampling from the kernel and bias posteriors,
```none
kernel, bias ~ posterior
outputs = activation(matmul(inputs, kernel) + bias)
```
The arguments permit separate specification of the surrogate posterior
(`q(W|x)`), prior (`p(W)`), and divergence for both the `kernel` and `bias`
distributions.
Args:
inputs: Tensor input.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (`callable`). Set it to None to maintain a
linear activation.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
kernel_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `kernel` parameter. Default value:
`default_mean_field_normal_fn()`.
kernel_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
kernel_prior_fn: Python `callable` which creates `tf.distributions`
instance. See `default_mean_field_normal_fn` docstring for required
parameter signature.
Default value: `tf.distributions.Normal(loc=0., scale=1.)`.
kernel_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
bias_posterior_fn: Python `callable` which creates
`tf.distributions.Distribution` instance representing the surrogate
posterior of the `bias` parameter. Default value:
`default_mean_field_normal_fn(is_singular=True)` (which creates an
instance of `tf.distributions.Deterministic`).
bias_posterior_tensor_fn: Python `callable` which takes a
`tf.distributions.Distribution` instance and returns a representative
value. Default value: `lambda d: d.sample()`.
bias_prior_fn: Python `callable` which creates `tf.distributions` instance.
See `default_mean_field_normal_fn` docstring for required parameter
signature. Default value: `None` (no prior, no variational inference)
bias_divergence_fn: Python `callable` which takes the surrogate posterior
distribution, prior distribution and random variate sample(s) from the
surrogate posterior and computes or approximates the KL divergence. The
distributions are `tf.distributions.Distribution`-like instances and the
sample is a `Tensor`.
seed: Python scalar `int` which initializes the random number
generator. Default value: `None` (i.e., use global seed).
name: Python `str`, the name of the layer. Layers with the same name will
share `tf.Variable`s, but to avoid mistakes we require `reuse=True` in
such cases.
reuse: Python `bool`, whether to reuse the `tf.Variable`s of a previous
layer by the same name.
Returns:
output: `Tensor` representing a the affine transformed input under a random
draw from the surrogate posterior distribution.
#### Examples
We illustrate a Bayesian neural network with [variational inference](
https://en.wikipedia.org/wiki/Variational_Bayesian_methods),
assuming a dataset of `features` and `labels`.
```python
tfp = tf.contrib.bayesflow
net = tfp.layers.dense_flipout(
features, 512, activation=tf.nn.relu)
logits = tfp.layers.dense_flipout(net, 10)
neg_log_likelihood = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
kl = sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
loss = neg_log_likelihood + kl
train_op = tf.train.AdamOptimizer().minimize(loss)
```
It uses the Flipout gradient estimator to minimize the
Kullback-Leibler divergence up to a constant, also known as the
negative Evidence Lower Bound. It consists of the sum of two terms:
the expected negative log-likelihood, which we approximate via
Monte Carlo; and the KL divergence, which is added via regularizer
terms which are arguments to the layer.
"""
layer = DenseFlipout(
units,
activation=activation,
activity_regularizer=activity_regularizer,
trainable=trainable,
kernel_posterior_fn=kernel_posterior_fn,
kernel_posterior_tensor_fn=kernel_posterior_tensor_fn,
kernel_prior_fn=kernel_prior_fn,
kernel_divergence_fn=kernel_divergence_fn,
bias_posterior_fn=bias_posterior_fn,
bias_posterior_tensor_fn=bias_posterior_tensor_fn,
bias_prior_fn=bias_prior_fn,
bias_divergence_fn=bias_divergence_fn,
seed=seed,
name=name,
dtype=inputs.dtype.base_dtype,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
def random_sign(shape, dtype=dtypes.float32, seed=None):
"""Draw values from {-1, 1} uniformly, i.e., Rademacher distribution."""
random_bernoulli = random_ops.random_uniform(shape, minval=0, maxval=2,
dtype=dtypes.int32,
seed=seed)
return math_ops.cast(2 * random_bernoulli - 1, dtype)
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import logging
from monty.json import MSONable
from monty.io import zopen
from pymatgen.core import Molecule
from .utils import read_table_pattern, read_pattern, lower_and_check_unique
# Classes for reading/manipulating/writing QChem ouput files.
__author__ = "Brandon Wood, Samuel Blau, Shyam Dwaraknath, Julian Self"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "0.1"
__email__ = "b.wood@berkeley.edu"
__credits__ = "Xiaohui Qu"
logger = logging.getLogger(__name__)
class QCInput(MSONable):
"""
An object representing a QChem input file. QCInput attributes represent different sections of a QChem input file.
To add a new section one needs to modify __init__, __str__, from_sting and add staticmethods
to read and write the new section i.e. section_template and read_section. By design, there is very little (or no)
checking that input parameters conform to the appropriate QChem format, this responsible lands on the user or a
separate error handling software.
Args:
molecule (pymatgen Molecule object or "read"):
Input molecule. molecule can be set as either a pymatgen Molecule object or as the str "read".
"read" can be used in multi_job QChem input files where the molecule is read in from the
previous calculation.
rem (dict):
A dictionary of all the input parameters for the rem section of QChem input file.
Ex. rem = {'method': 'rimp2', 'basis': '6-31*G++' ... }
opt (dict of lists):
A dictionary of opt sections, where each opt section is a key and the corresponding
values are a list of strings. Stings must be formatted as instructed by the QChem manual.
The different opt sections are: CONSTRAINT, FIXED, DUMMY, and CONNECT
Ex. opt = {"CONSTRAINT": ["tors 2 3 4 5 25.0", "tors 2 5 7 9 80.0"], "FIXED": ["2 XY"]}
"""
def __init__(self, molecule, rem, opt=None, pcm=None, solvent=None, smx=None):
self.molecule = molecule
self.rem = lower_and_check_unique(rem)
self.opt = opt
self.pcm = lower_and_check_unique(pcm)
self.solvent = lower_and_check_unique(solvent)
self.smx = lower_and_check_unique(smx)
# Make sure molecule is valid: either the string "read" or a pymatgen molecule object
if isinstance(self.molecule, str):
self.molecule = self.molecule.lower()
if self.molecule != "read":
raise ValueError(
'The only acceptable text value for molecule is "read"')
elif not isinstance(self.molecule, Molecule):
raise ValueError(
"The molecule must either be the string 'read' or be a pymatgen Molecule object"
)
# Make sure rem is valid:
# - Has a basis
# - Has a method or DFT exchange functional
# - Has a valid job_type or jobtype
valid_job_types = [
"opt", "optimization", "sp", "freq", "frequency", "nmr"
]
if "basis" not in self.rem:
raise ValueError("The rem dictionary must contain a 'basis' entry")
if "method" not in self.rem:
if "exchange" not in self.rem:
raise ValueError(
"The rem dictionary must contain either a 'method' entry or an 'exchange' entry"
)
if "job_type" not in self.rem:
raise ValueError(
"The rem dictionary must contain a 'job_type' entry")
if self.rem.get("job_type").lower() not in valid_job_types:
raise ValueError(
"The rem dictionary must contain a valid 'job_type' entry")
# Still to do:
# - Check that the method or functional is valid
# - Check that basis is valid
# - Check that basis is defined for all species in the molecule
# - Validity checks specific to job type?
# - Check OPT and PCM sections?
def __str__(self):
combined_list = []
# molecule section
combined_list.append(self.molecule_template(self.molecule))
combined_list.append("")
# rem section
combined_list.append(self.rem_template(self.rem))
combined_list.append("")
# opt section
if self.opt:
combined_list.append(self.opt_template(self.opt))
combined_list.append("")
# pcm section
if self.pcm:
combined_list.append(self.pcm_template(self.pcm))
combined_list.append("")
# solvent section
if self.solvent:
combined_list.append(self.solvent_template(self.solvent))
combined_list.append("")
if self.smx:
combined_list.append(self.smx_template(self.smx))
combined_list.append("")
return '\n'.join(combined_list)
@staticmethod
def multi_job_string(job_list):
multi_job_string = str()
for i, job_i in enumerate(job_list):
if i < len(job_list) - 1:
multi_job_string += job_i.__str__() + "\n@@@\n\n"
else:
multi_job_string += job_i.__str__()
return multi_job_string
@classmethod
def from_string(cls, string):
sections = cls.find_sections(string)
molecule = cls.read_molecule(string)
rem = cls.read_rem(string)
# only molecule and rem are necessary everything else is checked
opt = None
pcm = None
solvent = None
smx=None
if "opt" in sections:
opt = cls.read_opt(string)
if "pcm" in sections:
pcm = cls.read_pcm(string)
if "solvent" in sections:
solvent = cls.read_solvent(string)
if "smx" in sections:
smx = cls.read_smx(string)
return cls(molecule, rem, opt=opt, pcm=pcm, solvent=solvent, smx=smx)
def write_file(self, filename):
with zopen(filename, 'wt') as f:
f.write(self.__str__())
@staticmethod
def write_multi_job_file(job_list, filename):
with zopen(filename, 'wt') as f:
f.write(QCInput.multi_job_string(job_list))
@staticmethod
def from_file(filename):
with zopen(filename, 'rt') as f:
return QCInput.from_string(f.read())
@classmethod
def from_multi_jobs_file(cls, filename):
# returns a list of QCInput objects
with zopen(filename, 'rt') as f:
# the delimiter between QChem jobs is @@@
multi_job_strings = f.read().split("@@@")
# list of individual QChem jobs
input_list = [cls.from_string(i) for i in multi_job_strings]
return input_list
@staticmethod
def molecule_template(molecule):
# todo: add ghost atoms
mol_list = []
mol_list.append("$molecule")
if isinstance(molecule, str):
if molecule == "read":
mol_list.append(" read")
else:
raise ValueError('The only acceptable text value for molecule is "read"')
else:
mol_list.append(" {charge} {spin_mult}".format(
charge=int(molecule.charge),
spin_mult=molecule.spin_multiplicity))
for site in molecule.sites:
mol_list.append(
" {atom} {x: .10f} {y: .10f} {z: .10f}".format(
atom=site.species_string, x=site.x, y=site.y,
z=site.z))
mol_list.append("$end")
return '\n'.join(mol_list)
@staticmethod
def rem_template(rem):
rem_list = []
rem_list.append("$rem")
for key, value in rem.items():
rem_list.append(" {key} = {value}".format(key=key, value=value))
rem_list.append("$end")
return '\n'.join(rem_list)
@staticmethod
def opt_template(opt):
opt_list = []
opt_list.append("$opt")
# loops over all opt sections
for key, value in opt.items():
opt_list.append("{section}".format(section=key))
# loops over all values within the section
for i in value:
opt_list.append(" {val}".format(val=i))
opt_list.append("END{section}".format(section=key))
opt_list.append("")
# this deletes the empty space after the last section
del opt_list[-1]
opt_list.append("$end")
return '\n'.join(opt_list)
@staticmethod
def pcm_template(pcm):
pcm_list = []
pcm_list.append("$pcm")
for key, value in pcm.items():
pcm_list.append(" {key} {value}".format(key=key, value=value))
pcm_list.append("$end")
return '\n'.join(pcm_list)
@staticmethod
def solvent_template(solvent):
solvent_list = []
solvent_list.append("$solvent")
for key, value in solvent.items():
solvent_list.append(" {key} {value}".format(
key=key, value=value))
solvent_list.append("$end")
return '\n'.join(solvent_list)
@staticmethod
def smx_template(smx):
smx_list = []
smx_list.append("$smx")
for key, value in smx.items():
smx_list.append(" {key} {value}".format(
key=key, value=value))
smx_list.append("$end")
return '\n'.join(smx_list)
@staticmethod
def find_sections(string):
patterns = {"sections": r"^\s*?\$([a-z]+)", "multiple_jobs": r"(@@@)"}
matches = read_pattern(string, patterns)
# list of the sections present
sections = [val[0] for val in matches["sections"]]
# remove end from sections
sections = [sec for sec in sections if sec != 'end']
# this error should be replaced by a multi job read function when it is added
if "multiple_jobs" in matches.keys():
raise ValueError(
"Output file contains multiple qchem jobs please parse separately"
)
if "molecule" not in sections:
raise ValueError("Output file does not contain a molecule section")
if "rem" not in sections:
raise ValueError("Output file does not contain a rem section")
return sections
@staticmethod
def read_molecule(string):
charge = None
spin_mult = None
patterns = {
"read": r"^\s*\$molecule\n\s*(read)",
"charge": r"^\s*\$molecule\n\s*((?:\-)*\d+)\s+\d",
"spin_mult": r"^\s*\$molecule\n\s*\d+\s*(\d)"
}
matches = read_pattern(string, patterns)
if "read" in matches.keys():
return "read"
if "charge" in matches.keys():
charge = float(matches["charge"][0][0])
if "spin_mult" in matches.keys():
spin_mult = int(matches["spin_mult"][0][0])
header = r"^\s*\$molecule\n\s*(?:\-)*\d+\s*\d"
row = r"\s*((?i)[a-z]+)\s+([\d\-\.]+)\s+([\d\-\.]+)\s+([\d\-\.]+)"
footer = r"^\$end"
mol_table = read_table_pattern(
string,
header_pattern=header,
row_pattern=row,
footer_pattern=footer)
species = [val[0] for val in mol_table[0]]
coords = [[float(val[1]), float(val[2]),
float(val[3])] for val in mol_table[0]]
mol = Molecule(
species=species,
coords=coords,
charge=charge,
spin_multiplicity=spin_mult)
return mol
@staticmethod
def read_rem(string):
header = r"^\s*\$rem"
row = r"\s*([a-zA-Z\_]+)\s*=?\s*(\S+)"
footer = r"^\s*\$end"
rem_table = read_table_pattern(
string,
header_pattern=header,
row_pattern=row,
footer_pattern=footer)
rem = {key: val for key, val in rem_table[0]}
return rem
@staticmethod
def read_opt(string):
patterns = {
"CONSTRAINT": r"^\s*CONSTRAINT",
"FIXED": r"^\s*FIXED",
"DUMMY": r"^\s*DUMMY",
"CONNECT": r"^\s*CONNECT"
}
opt_matches = read_pattern(string, patterns)
opt_sections = [key for key in opt_matches.keys()]
opt = {}
if "CONSTRAINT" in opt_sections:
c_header = r"^\s*CONSTRAINT\n"
c_row = r"(\w.*)\n"
c_footer = r"^\s*ENDCONSTRAINT\n"
c_table = read_table_pattern(
string,
header_pattern=c_header,
row_pattern=c_row,
footer_pattern=c_footer)
opt["CONSTRAINT"] = [val[0] for val in c_table[0]]
if "FIXED" in opt_sections:
f_header = r"^\s*FIXED\n"
f_row = r"(\w.*)\n"
f_footer = r"^\s*ENDFIXED\n"
f_table = read_table_pattern(
string,
header_pattern=f_header,
row_pattern=f_row,
footer_pattern=f_footer)
opt["FIXED"] = [val[0] for val in f_table[0]]
if "DUMMY" in opt_sections:
d_header = r"^\s*DUMMY\n"
d_row = r"(\w.*)\n"
d_footer = r"^\s*ENDDUMMY\n"
d_table = read_table_pattern(
string,
header_pattern=d_header,
row_pattern=d_row,
footer_pattern=d_footer)
opt["DUMMY"] = [val[0] for val in d_table[0]]
if "CONNECT" in opt_sections:
cc_header = r"^\s*CONNECT\n"
cc_row = r"(\w.*)\n"
cc_footer = r"^\s*ENDCONNECT\n"
cc_table = read_table_pattern(
string,
header_pattern=cc_header,
row_pattern=cc_row,
footer_pattern=cc_footer)
opt["CONNECT"] = [val[0] for val in cc_table[0]]
return opt
@staticmethod
def read_pcm(string):
header = r"^\s*\$pcm"
row = r"\s*([a-zA-Z\_]+)\s+(\S+)"
footer = r"^\s*\$end"
pcm_table = read_table_pattern(
string,
header_pattern=header,
row_pattern=row,
footer_pattern=footer)
if pcm_table == []:
print(
"No valid PCM inputs found. Note that there should be no '=' chracters in PCM input lines."
)
return {}
else:
pcm = {key: val for key, val in pcm_table[0]}
return pcm
@staticmethod
def read_solvent(string):
header = r"^\s*\$solvent"
row = r"\s*([a-zA-Z\_]+)\s+(\S+)"
footer = r"^\s*\$end"
solvent_table = read_table_pattern(
string,
header_pattern=header,
row_pattern=row,
footer_pattern=footer)
if solvent_table == []:
print(
"No valid solvent inputs found. Note that there should be no '=' chracters in solvent input lines."
)
return {}
else:
solvent = {key: val for key, val in solvent_table[0]}
return solvent
@staticmethod
def read_smx(string):
header = r"^\s*\$smx"
row = r"\s*([a-zA-Z\_]+)\s+(\S+)"
footer = r"^\s*\$end"
smx_table = read_table_pattern(
string,
header_pattern=header,
row_pattern=row,
footer_pattern=footer)
if smx_table == []:
print(
"No valid smx inputs found. Note that there should be no '=' chracters in smx input lines."
)
return {}
else:
smx = {key: val for key, val in smx_table[0]}
return smx
| |
from functools import partial
import threading
from PyQt5.QtCore import Qt, pyqtSignal, QRegExp
from PyQt5.QtGui import QRegExpValidator
from PyQt5.QtWidgets import (QVBoxLayout, QLabel, QGridLayout, QPushButton,
QHBoxLayout, QButtonGroup, QGroupBox,
QTextEdit, QLineEdit, QRadioButton, QCheckBox, QWidget,
QMessageBox, QFileDialog, QSlider, QTabWidget)
from electrum_ltc.gui.qt.util import (WindowModalDialog, WWLabel, Buttons, CancelButton,
OkButton, CloseButton, getOpenFileName)
from electrum_ltc.i18n import _
from electrum_ltc.plugin import hook
from electrum_ltc.util import bh2u
from ..hw_wallet.qt import QtHandlerBase, QtPluginBase
from ..hw_wallet.plugin import only_hook_if_libraries_available
from .safe_t import SafeTPlugin, TIM_NEW, TIM_RECOVER, TIM_MNEMONIC
PASSPHRASE_HELP_SHORT =_(
"Passphrases allow you to access new wallets, each "
"hidden behind a particular case-sensitive passphrase.")
PASSPHRASE_HELP = PASSPHRASE_HELP_SHORT + " " + _(
"You need to create a separate Electrum wallet for each passphrase "
"you use as they each generate different addresses. Changing "
"your passphrase does not lose other wallets, each is still "
"accessible behind its own passphrase.")
RECOMMEND_PIN = _(
"You should enable PIN protection. Your PIN is the only protection "
"for your litecoins if your device is lost or stolen.")
PASSPHRASE_NOT_PIN = _(
"If you forget a passphrase you will be unable to access any "
"litecoins in the wallet behind it. A passphrase is not a PIN. "
"Only change this if you are sure you understand it.")
class QtHandler(QtHandlerBase):
pin_signal = pyqtSignal(object, object)
def __init__(self, win, pin_matrix_widget_class, device):
super(QtHandler, self).__init__(win, device)
self.pin_signal.connect(self.pin_dialog)
self.pin_matrix_widget_class = pin_matrix_widget_class
def get_pin(self, msg, *, show_strength=True):
self.done.clear()
self.pin_signal.emit(msg, show_strength)
self.done.wait()
return self.response
def pin_dialog(self, msg, show_strength):
# Needed e.g. when resetting a device
self.clear_dialog()
dialog = WindowModalDialog(self.top_level_window(), _("Enter PIN"))
matrix = self.pin_matrix_widget_class(show_strength)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(msg))
vbox.addWidget(matrix)
vbox.addLayout(Buttons(CancelButton(dialog), OkButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
self.response = str(matrix.get_value())
self.done.set()
class QtPlugin(QtPluginBase):
# Derived classes must provide the following class-static variables:
# icon_file
# pin_matrix_widget_class
@only_hook_if_libraries_available
@hook
def receive_menu(self, menu, addrs, wallet):
if len(addrs) != 1:
return
for keystore in wallet.get_keystores():
if type(keystore) == self.keystore_class:
def show_address(keystore=keystore):
keystore.thread.add(partial(self.show_address, wallet, addrs[0], keystore))
device_name = "{} ({})".format(self.device, keystore.label)
menu.addAction(_("Show on {}").format(device_name), show_address)
def show_settings_dialog(self, window, keystore):
def connect():
device_id = self.choose_device(window, keystore)
return device_id
def show_dialog(device_id):
if device_id:
SettingsDialog(window, self, keystore, device_id).exec_()
keystore.thread.add(connect, on_success=show_dialog)
def request_safe_t_init_settings(self, wizard, method, device):
vbox = QVBoxLayout()
next_enabled = True
label = QLabel(_("Enter a label to name your device:"))
name = QLineEdit()
hl = QHBoxLayout()
hl.addWidget(label)
hl.addWidget(name)
hl.addStretch(1)
vbox.addLayout(hl)
def clean_text(widget):
text = widget.toPlainText().strip()
return ' '.join(text.split())
if method in [TIM_NEW, TIM_RECOVER]:
gb = QGroupBox()
hbox1 = QHBoxLayout()
gb.setLayout(hbox1)
vbox.addWidget(gb)
gb.setTitle(_("Select your seed length:"))
bg = QButtonGroup()
for i, count in enumerate([12, 18, 24]):
rb = QRadioButton(gb)
rb.setText(_("{:d} words").format(count))
bg.addButton(rb)
bg.setId(rb, i)
hbox1.addWidget(rb)
rb.setChecked(True)
cb_pin = QCheckBox(_('Enable PIN protection'))
cb_pin.setChecked(True)
else:
text = QTextEdit()
text.setMaximumHeight(60)
if method == TIM_MNEMONIC:
msg = _("Enter your BIP39 mnemonic:")
else:
msg = _("Enter the master private key beginning with xprv:")
def set_enabled():
from electrum_ltc.bip32 import is_xprv
wizard.next_button.setEnabled(is_xprv(clean_text(text)))
text.textChanged.connect(set_enabled)
next_enabled = False
vbox.addWidget(QLabel(msg))
vbox.addWidget(text)
pin = QLineEdit()
pin.setValidator(QRegExpValidator(QRegExp('[1-9]{0,9}')))
pin.setMaximumWidth(100)
hbox_pin = QHBoxLayout()
hbox_pin.addWidget(QLabel(_("Enter your PIN (digits 1-9):")))
hbox_pin.addWidget(pin)
hbox_pin.addStretch(1)
if method in [TIM_NEW, TIM_RECOVER]:
vbox.addWidget(WWLabel(RECOMMEND_PIN))
vbox.addWidget(cb_pin)
else:
vbox.addLayout(hbox_pin)
passphrase_msg = WWLabel(PASSPHRASE_HELP_SHORT)
passphrase_warning = WWLabel(PASSPHRASE_NOT_PIN)
passphrase_warning.setStyleSheet("color: red")
cb_phrase = QCheckBox(_('Enable passphrases'))
cb_phrase.setChecked(False)
vbox.addWidget(passphrase_msg)
vbox.addWidget(passphrase_warning)
vbox.addWidget(cb_phrase)
wizard.exec_layout(vbox, next_enabled=next_enabled)
if method in [TIM_NEW, TIM_RECOVER]:
item = bg.checkedId()
pin = cb_pin.isChecked()
else:
item = ' '.join(str(clean_text(text)).split())
pin = str(pin.text())
return (item, name.text(), pin, cb_phrase.isChecked())
class Plugin(SafeTPlugin, QtPlugin):
icon_unpaired = "safe-t_unpaired.png"
icon_paired = "safe-t.png"
def create_handler(self, window):
return QtHandler(window, self.pin_matrix_widget_class(), self.device)
@classmethod
def pin_matrix_widget_class(self):
from safetlib.qt.pinmatrix import PinMatrixWidget
return PinMatrixWidget
class SettingsDialog(WindowModalDialog):
'''This dialog doesn't require a device be paired with a wallet.
We want users to be able to wipe a device even if they've forgotten
their PIN.'''
def __init__(self, window, plugin, keystore, device_id):
title = _("{} Settings").format(plugin.device)
super(SettingsDialog, self).__init__(window, title)
self.setMaximumWidth(540)
devmgr = plugin.device_manager()
config = devmgr.config
handler = keystore.handler
thread = keystore.thread
hs_cols, hs_rows = (128, 64)
def invoke_client(method, *args, **kw_args):
unpair_after = kw_args.pop('unpair_after', False)
def task():
client = devmgr.client_by_id(device_id)
if not client:
raise RuntimeError("Device not connected")
if method:
getattr(client, method)(*args, **kw_args)
if unpair_after:
devmgr.unpair_id(device_id)
return client.features
thread.add(task, on_success=update)
def update(features):
self.features = features
set_label_enabled()
if features.bootloader_hash:
bl_hash = bh2u(features.bootloader_hash)
bl_hash = "\n".join([bl_hash[:32], bl_hash[32:]])
else:
bl_hash = "N/A"
noyes = [_("No"), _("Yes")]
endis = [_("Enable Passphrases"), _("Disable Passphrases")]
disen = [_("Disabled"), _("Enabled")]
setchange = [_("Set a PIN"), _("Change PIN")]
version = "%d.%d.%d" % (features.major_version,
features.minor_version,
features.patch_version)
device_label.setText(features.label)
pin_set_label.setText(noyes[features.pin_protection])
passphrases_label.setText(disen[features.passphrase_protection])
bl_hash_label.setText(bl_hash)
label_edit.setText(features.label)
device_id_label.setText(features.device_id)
initialized_label.setText(noyes[features.initialized])
version_label.setText(version)
clear_pin_button.setVisible(features.pin_protection)
clear_pin_warning.setVisible(features.pin_protection)
pin_button.setText(setchange[features.pin_protection])
pin_msg.setVisible(not features.pin_protection)
passphrase_button.setText(endis[features.passphrase_protection])
language_label.setText(features.language)
def set_label_enabled():
label_apply.setEnabled(label_edit.text() != self.features.label)
def rename():
invoke_client('change_label', label_edit.text())
def toggle_passphrase():
title = _("Confirm Toggle Passphrase Protection")
currently_enabled = self.features.passphrase_protection
if currently_enabled:
msg = _("After disabling passphrases, you can only pair this "
"Electrum wallet if it had an empty passphrase. "
"If its passphrase was not empty, you will need to "
"create a new wallet with the install wizard. You "
"can use this wallet again at any time by re-enabling "
"passphrases and entering its passphrase.")
else:
msg = _("Your current Electrum wallet can only be used with "
"an empty passphrase. You must create a separate "
"wallet with the install wizard for other passphrases "
"as each one generates a new set of addresses.")
msg += "\n\n" + _("Are you sure you want to proceed?")
if not self.question(msg, title=title):
return
invoke_client('toggle_passphrase', unpair_after=currently_enabled)
def change_homescreen():
filename = getOpenFileName(
parent=self,
title=_("Choose Homescreen"),
config=config,
)
if not filename:
return # user cancelled
if filename.endswith('.toif'):
img = open(filename, 'rb').read()
if img[:8] != b'TOIf\x90\x00\x90\x00':
handler.show_error('File is not a TOIF file with size of 144x144')
return
else:
from PIL import Image # FIXME
im = Image.open(filename)
if im.size != (128, 64):
handler.show_error('Image must be 128 x 64 pixels')
return
im = im.convert('1')
pix = im.load()
img = bytearray(1024)
for j in range(64):
for i in range(128):
if pix[i, j]:
o = (i + j * 128)
img[o // 8] |= (1 << (7 - o % 8))
img = bytes(img)
invoke_client('change_homescreen', img)
def clear_homescreen():
invoke_client('change_homescreen', b'\x00')
def set_pin():
invoke_client('set_pin', remove=False)
def clear_pin():
invoke_client('set_pin', remove=True)
def wipe_device():
wallet = window.wallet
if wallet and sum(wallet.get_balance()):
title = _("Confirm Device Wipe")
msg = _("Are you SURE you want to wipe the device?\n"
"Your wallet still has litecoins in it!")
if not self.question(msg, title=title,
icon=QMessageBox.Critical):
return
invoke_client('wipe_device', unpair_after=True)
def slider_moved():
mins = timeout_slider.sliderPosition()
timeout_minutes.setText(_("{:2d} minutes").format(mins))
def slider_released():
config.set_session_timeout(timeout_slider.sliderPosition() * 60)
# Information tab
info_tab = QWidget()
info_layout = QVBoxLayout(info_tab)
info_glayout = QGridLayout()
info_glayout.setColumnStretch(2, 1)
device_label = QLabel()
pin_set_label = QLabel()
passphrases_label = QLabel()
version_label = QLabel()
device_id_label = QLabel()
bl_hash_label = QLabel()
bl_hash_label.setWordWrap(True)
language_label = QLabel()
initialized_label = QLabel()
rows = [
(_("Device Label"), device_label),
(_("PIN set"), pin_set_label),
(_("Passphrases"), passphrases_label),
(_("Firmware Version"), version_label),
(_("Device ID"), device_id_label),
(_("Bootloader Hash"), bl_hash_label),
(_("Language"), language_label),
(_("Initialized"), initialized_label),
]
for row_num, (label, widget) in enumerate(rows):
info_glayout.addWidget(QLabel(label), row_num, 0)
info_glayout.addWidget(widget, row_num, 1)
info_layout.addLayout(info_glayout)
# Settings tab
settings_tab = QWidget()
settings_layout = QVBoxLayout(settings_tab)
settings_glayout = QGridLayout()
# Settings tab - Label
label_msg = QLabel(_("Name this {}. If you have multiple devices "
"their labels help distinguish them.")
.format(plugin.device))
label_msg.setWordWrap(True)
label_label = QLabel(_("Device Label"))
label_edit = QLineEdit()
label_edit.setMinimumWidth(150)
label_edit.setMaxLength(plugin.MAX_LABEL_LEN)
label_apply = QPushButton(_("Apply"))
label_apply.clicked.connect(rename)
label_edit.textChanged.connect(set_label_enabled)
settings_glayout.addWidget(label_label, 0, 0)
settings_glayout.addWidget(label_edit, 0, 1, 1, 2)
settings_glayout.addWidget(label_apply, 0, 3)
settings_glayout.addWidget(label_msg, 1, 1, 1, -1)
# Settings tab - PIN
pin_label = QLabel(_("PIN Protection"))
pin_button = QPushButton()
pin_button.clicked.connect(set_pin)
settings_glayout.addWidget(pin_label, 2, 0)
settings_glayout.addWidget(pin_button, 2, 1)
pin_msg = QLabel(_("PIN protection is strongly recommended. "
"A PIN is your only protection against someone "
"stealing your litecoins if they obtain physical "
"access to your {}.").format(plugin.device))
pin_msg.setWordWrap(True)
pin_msg.setStyleSheet("color: red")
settings_glayout.addWidget(pin_msg, 3, 1, 1, -1)
# Settings tab - Homescreen
homescreen_label = QLabel(_("Homescreen"))
homescreen_change_button = QPushButton(_("Change..."))
homescreen_clear_button = QPushButton(_("Reset"))
homescreen_change_button.clicked.connect(change_homescreen)
try:
import PIL
except ImportError:
homescreen_change_button.setDisabled(True)
homescreen_change_button.setToolTip(
_("Required package 'PIL' is not available - Please install it.")
)
homescreen_clear_button.clicked.connect(clear_homescreen)
homescreen_msg = QLabel(_("You can set the homescreen on your "
"device to personalize it. You must "
"choose a {} x {} monochrome black and "
"white image.").format(hs_cols, hs_rows))
homescreen_msg.setWordWrap(True)
settings_glayout.addWidget(homescreen_label, 4, 0)
settings_glayout.addWidget(homescreen_change_button, 4, 1)
settings_glayout.addWidget(homescreen_clear_button, 4, 2)
settings_glayout.addWidget(homescreen_msg, 5, 1, 1, -1)
# Settings tab - Session Timeout
timeout_label = QLabel(_("Session Timeout"))
timeout_minutes = QLabel()
timeout_slider = QSlider(Qt.Horizontal)
timeout_slider.setRange(1, 60)
timeout_slider.setSingleStep(1)
timeout_slider.setTickInterval(5)
timeout_slider.setTickPosition(QSlider.TicksBelow)
timeout_slider.setTracking(True)
timeout_msg = QLabel(
_("Clear the session after the specified period "
"of inactivity. Once a session has timed out, "
"your PIN and passphrase (if enabled) must be "
"re-entered to use the device."))
timeout_msg.setWordWrap(True)
timeout_slider.setSliderPosition(config.get_session_timeout() // 60)
slider_moved()
timeout_slider.valueChanged.connect(slider_moved)
timeout_slider.sliderReleased.connect(slider_released)
settings_glayout.addWidget(timeout_label, 6, 0)
settings_glayout.addWidget(timeout_slider, 6, 1, 1, 3)
settings_glayout.addWidget(timeout_minutes, 6, 4)
settings_glayout.addWidget(timeout_msg, 7, 1, 1, -1)
settings_layout.addLayout(settings_glayout)
settings_layout.addStretch(1)
# Advanced tab
advanced_tab = QWidget()
advanced_layout = QVBoxLayout(advanced_tab)
advanced_glayout = QGridLayout()
# Advanced tab - clear PIN
clear_pin_button = QPushButton(_("Disable PIN"))
clear_pin_button.clicked.connect(clear_pin)
clear_pin_warning = QLabel(
_("If you disable your PIN, anyone with physical access to your "
"{} device can spend your litecoins.").format(plugin.device))
clear_pin_warning.setWordWrap(True)
clear_pin_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(clear_pin_button, 0, 2)
advanced_glayout.addWidget(clear_pin_warning, 1, 0, 1, 5)
# Advanced tab - toggle passphrase protection
passphrase_button = QPushButton()
passphrase_button.clicked.connect(toggle_passphrase)
passphrase_msg = WWLabel(PASSPHRASE_HELP)
passphrase_warning = WWLabel(PASSPHRASE_NOT_PIN)
passphrase_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(passphrase_button, 3, 2)
advanced_glayout.addWidget(passphrase_msg, 4, 0, 1, 5)
advanced_glayout.addWidget(passphrase_warning, 5, 0, 1, 5)
# Advanced tab - wipe device
wipe_device_button = QPushButton(_("Wipe Device"))
wipe_device_button.clicked.connect(wipe_device)
wipe_device_msg = QLabel(
_("Wipe the device, removing all data from it. The firmware "
"is left unchanged."))
wipe_device_msg.setWordWrap(True)
wipe_device_warning = QLabel(
_("Only wipe a device if you have the recovery seed written down "
"and the device wallet(s) are empty, otherwise the litecoins "
"will be lost forever."))
wipe_device_warning.setWordWrap(True)
wipe_device_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(wipe_device_button, 6, 2)
advanced_glayout.addWidget(wipe_device_msg, 7, 0, 1, 5)
advanced_glayout.addWidget(wipe_device_warning, 8, 0, 1, 5)
advanced_layout.addLayout(advanced_glayout)
advanced_layout.addStretch(1)
tabs = QTabWidget(self)
tabs.addTab(info_tab, _("Information"))
tabs.addTab(settings_tab, _("Settings"))
tabs.addTab(advanced_tab, _("Advanced"))
dialog_vbox = QVBoxLayout(self)
dialog_vbox.addWidget(tabs)
dialog_vbox.addLayout(Buttons(CloseButton(self)))
# Update information
invoke_client(None)
| |
import numpy as np
import pandas as pd
import datetime
import matplotlib as mpl
import matplotlib.pyplot as plt
import pytest
@pytest.fixture(scope="session", autouse=True)
def remove_pandas_unit_conversion():
# Prior to pandas 1.0, it registered its own datetime converters,
# but they are less powerful than what matplotlib added in 2.2,
# and we rely on that functionality in seaborn.
# https://github.com/matplotlib/matplotlib/pull/9779
# https://github.com/pandas-dev/pandas/issues/27036
mpl.units.registry[np.datetime64] = mpl.dates.DateConverter()
mpl.units.registry[datetime.date] = mpl.dates.DateConverter()
mpl.units.registry[datetime.datetime] = mpl.dates.DateConverter()
@pytest.fixture(autouse=True)
def close_figs():
yield
plt.close("all")
@pytest.fixture(autouse=True)
def random_seed():
seed = sum(map(ord, "seaborn random global"))
np.random.seed(seed)
@pytest.fixture()
def rng():
seed = sum(map(ord, "seaborn random object"))
return np.random.RandomState(seed)
@pytest.fixture
def wide_df(rng):
columns = list("abc")
index = pd.Int64Index(np.arange(10, 50, 2), name="wide_index")
values = rng.normal(size=(len(index), len(columns)))
return pd.DataFrame(values, index=index, columns=columns)
@pytest.fixture
def wide_array(wide_df):
# Requires panads >= 0.24
# return wide_df.to_numpy()
return np.asarray(wide_df)
@pytest.fixture
def flat_series(rng):
index = pd.Int64Index(np.arange(10, 30), name="t")
return pd.Series(rng.normal(size=20), index, name="s")
@pytest.fixture
def flat_array(flat_series):
# Requires panads >= 0.24
# return flat_series.to_numpy()
return np.asarray(flat_series)
@pytest.fixture
def flat_list(flat_series):
# Requires panads >= 0.24
# return flat_series.to_list()
return flat_series.tolist()
@pytest.fixture(params=["series", "array", "list"])
def flat_data(rng, request):
index = pd.Int64Index(np.arange(10, 30), name="t")
series = pd.Series(rng.normal(size=20), index, name="s")
if request.param == "series":
data = series
elif request.param == "array":
try:
data = series.to_numpy() # Requires pandas >= 0.24
except AttributeError:
data = np.asarray(series)
elif request.param == "list":
try:
data = series.to_list() # Requires pandas >= 0.24
except AttributeError:
data = series.tolist()
return data
@pytest.fixture
def wide_list_of_series(rng):
return [pd.Series(rng.normal(size=20), np.arange(20), name="a"),
pd.Series(rng.normal(size=10), np.arange(5, 15), name="b")]
@pytest.fixture
def wide_list_of_arrays(wide_list_of_series):
# Requires pandas >= 0.24
# return [s.to_numpy() for s in wide_list_of_series]
return [np.asarray(s) for s in wide_list_of_series]
@pytest.fixture
def wide_list_of_lists(wide_list_of_series):
# Requires pandas >= 0.24
# return [s.to_list() for s in wide_list_of_series]
return [s.tolist() for s in wide_list_of_series]
@pytest.fixture
def wide_dict_of_series(wide_list_of_series):
return {s.name: s for s in wide_list_of_series}
@pytest.fixture
def wide_dict_of_arrays(wide_list_of_series):
# Requires pandas >= 0.24
# return {s.name: s.to_numpy() for s in wide_list_of_series}
return {s.name: np.asarray(s) for s in wide_list_of_series}
@pytest.fixture
def wide_dict_of_lists(wide_list_of_series):
# Requires pandas >= 0.24
# return {s.name: s.to_list() for s in wide_list_of_series}
return {s.name: s.tolist() for s in wide_list_of_series}
@pytest.fixture
def long_df(rng):
n = 100
df = pd.DataFrame(dict(
x=rng.uniform(0, 20, n).round().astype("int"),
y=rng.normal(size=n),
a=rng.choice(list("abc"), n),
b=rng.choice(list("mnop"), n),
c=rng.choice([0, 1], n, [.3, .7]),
t=rng.choice(np.arange("2004-07-30", "2007-07-30", dtype="datetime64[Y]"), n),
s=rng.choice([2, 4, 8], n),
f=rng.choice([0.2, 0.3], n),
))
a_cat = df["a"].astype("category")
new_categories = np.roll(a_cat.cat.categories, 1)
df["a_cat"] = a_cat.cat.reorder_categories(new_categories)
df["s_cat"] = df["s"].astype("category")
df["s_str"] = df["s"].astype(str)
return df
@pytest.fixture
def long_dict(long_df):
return long_df.to_dict()
@pytest.fixture
def repeated_df(rng):
n = 100
return pd.DataFrame(dict(
x=np.tile(np.arange(n // 2), 2),
y=rng.normal(size=n),
a=rng.choice(list("abc"), n),
u=np.repeat(np.arange(2), n // 2),
))
@pytest.fixture
def missing_df(rng, long_df):
df = long_df.copy()
for col in df:
idx = rng.permutation(df.index)[:10]
df.loc[idx, col] = np.nan
return df
@pytest.fixture
def null_series():
return pd.Series(index=np.arange(20), dtype='float64')
| |
import os
from copy import copy
from decimal import Decimal
from django.utils.unittest import TestCase
from django.contrib.gis.gdal import DataSource, OGRException
from django.contrib.gis.tests.utils import mysql
from django.contrib.gis.utils.layermapping import LayerMapping, LayerMapError, InvalidDecimal, MissingForeignKey
from models import \
City, County, CountyFeat, Interstate, ICity1, ICity2, Invalid, State, \
city_mapping, co_mapping, cofeat_mapping, inter_mapping
shp_path = os.path.realpath(os.path.join(os.path.dirname(__file__), os.pardir, 'data'))
city_shp = os.path.join(shp_path, 'cities', 'cities.shp')
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
inter_shp = os.path.join(shp_path, 'interstates', 'interstates.shp')
invalid_shp = os.path.join(shp_path, 'invalid', 'emptypoints.shp')
# Dictionaries to hold what's expected in the county shapefile.
NAMES = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
NUMS = [1, 2, 1, 19, 1] # Number of polygons for each.
STATES = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
class LayerMapTest(TestCase):
def test01_init(self):
"Testing LayerMapping initialization."
# Model field that does not exist.
bad1 = copy(city_mapping)
bad1['foobar'] = 'FooField'
# Shapefile field that does not exist.
bad2 = copy(city_mapping)
bad2['name'] = 'Nombre'
# Nonexistent geographic field type.
bad3 = copy(city_mapping)
bad3['point'] = 'CURVE'
# Incrementing through the bad mapping dictionaries and
# ensuring that a LayerMapError is raised.
for bad_map in (bad1, bad2, bad3):
try:
lm = LayerMapping(City, city_shp, bad_map)
except LayerMapError:
pass
else:
self.fail('Expected a LayerMapError.')
# A LookupError should be thrown for bogus encodings.
try:
lm = LayerMapping(City, city_shp, city_mapping, encoding='foobar')
except LookupError:
pass
else:
self.fail('Expected a LookupError')
def test02_simple_layermap(self):
"Test LayerMapping import of a simple point shapefile."
# Setting up for the LayerMapping.
lm = LayerMapping(City, city_shp, city_mapping)
lm.save()
# There should be three cities in the shape file.
self.assertEqual(3, City.objects.count())
# Opening up the shapefile, and verifying the values in each
# of the features made it to the model.
ds = DataSource(city_shp)
layer = ds[0]
for feat in layer:
city = City.objects.get(name=feat['Name'].value)
self.assertEqual(feat['Population'].value, city.population)
self.assertEqual(Decimal(str(feat['Density'])), city.density)
self.assertEqual(feat['Created'].value, city.dt)
# Comparing the geometries.
pnt1, pnt2 = feat.geom, city.point
self.assertAlmostEqual(pnt1.x, pnt2.x, 6)
self.assertAlmostEqual(pnt1.y, pnt2.y, 6)
def test03_layermap_strict(self):
"Testing the `strict` keyword, and import of a LineString shapefile."
# When the `strict` keyword is set an error encountered will force
# the importation to stop.
try:
lm = LayerMapping(Interstate, inter_shp, inter_mapping)
lm.save(silent=True, strict=True)
except InvalidDecimal:
# No transactions for geoms on MySQL; delete added features.
if mysql: Interstate.objects.all().delete()
else:
self.fail('Should have failed on strict import with invalid decimal values.')
# This LayerMapping should work b/c `strict` is not set.
lm = LayerMapping(Interstate, inter_shp, inter_mapping)
lm.save(silent=True)
# Two interstate should have imported correctly.
self.assertEqual(2, Interstate.objects.count())
# Verifying the values in the layer w/the model.
ds = DataSource(inter_shp)
# Only the first two features of this shapefile are valid.
valid_feats = ds[0][:2]
for feat in valid_feats:
istate = Interstate.objects.get(name=feat['Name'].value)
if feat.fid == 0:
self.assertEqual(Decimal(str(feat['Length'])), istate.length)
elif feat.fid == 1:
# Everything but the first two decimal digits were truncated,
# because the Interstate model's `length` field has decimal_places=2.
self.assertAlmostEqual(feat.get('Length'), float(istate.length), 2)
for p1, p2 in zip(feat.geom, istate.path):
self.assertAlmostEqual(p1[0], p2[0], 6)
self.assertAlmostEqual(p1[1], p2[1], 6)
def county_helper(self, county_feat=True):
"Helper function for ensuring the integrity of the mapped County models."
for name, n, st in zip(NAMES, NUMS, STATES):
# Should only be one record b/c of `unique` keyword.
c = County.objects.get(name=name)
self.assertEqual(n, len(c.mpoly))
self.assertEqual(st, c.state.name) # Checking ForeignKey mapping.
# Multiple records because `unique` was not set.
if county_feat:
qs = CountyFeat.objects.filter(name=name)
self.assertEqual(n, qs.count())
def test04_layermap_unique_multigeometry_fk(self):
"Testing the `unique`, and `transform`, geometry collection conversion, and ForeignKey mappings."
# All the following should work.
try:
# Telling LayerMapping that we want no transformations performed on the data.
lm = LayerMapping(County, co_shp, co_mapping, transform=False)
# Specifying the source spatial reference system via the `source_srs` keyword.
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269)
lm = LayerMapping(County, co_shp, co_mapping, source_srs='NAD83')
# Unique may take tuple or string parameters.
for arg in ('name', ('name', 'mpoly')):
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique=arg)
except:
self.fail('No exception should be raised for proper use of keywords.')
# Testing invalid params for the `unique` keyword.
for e, arg in ((TypeError, 5.0), (ValueError, 'foobar'), (ValueError, ('name', 'mpolygon'))):
self.assertRaises(e, LayerMapping, County, co_shp, co_mapping, transform=False, unique=arg)
# No source reference system defined in the shapefile, should raise an error.
if not mysql:
self.assertRaises(LayerMapError, LayerMapping, County, co_shp, co_mapping)
# Passing in invalid ForeignKey mapping parameters -- must be a dictionary
# mapping for the model the ForeignKey points to.
bad_fk_map1 = copy(co_mapping); bad_fk_map1['state'] = 'name'
bad_fk_map2 = copy(co_mapping); bad_fk_map2['state'] = {'nombre' : 'State'}
self.assertRaises(TypeError, LayerMapping, County, co_shp, bad_fk_map1, transform=False)
self.assertRaises(LayerMapError, LayerMapping, County, co_shp, bad_fk_map2, transform=False)
# There exist no State models for the ForeignKey mapping to work -- should raise
# a MissingForeignKey exception (this error would be ignored if the `strict`
# keyword is not set).
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
self.assertRaises(MissingForeignKey, lm.save, silent=True, strict=True)
# Now creating the state models so the ForeignKey mapping may work.
co, hi, tx = State(name='Colorado'), State(name='Hawaii'), State(name='Texas')
co.save(), hi.save(), tx.save()
# If a mapping is specified as a collection, all OGR fields that
# are not collections will be converted into them. For example,
# a Point column would be converted to MultiPoint. Other things being done
# w/the keyword args:
# `transform=False`: Specifies that no transform is to be done; this
# has the effect of ignoring the spatial reference check (because the
# county shapefile does not have implicit spatial reference info).
#
# `unique='name'`: Creates models on the condition that they have
# unique county names; geometries from each feature however will be
# appended to the geometry collection of the unique model. Thus,
# all of the various islands in Honolulu county will be in in one
# database record with a MULTIPOLYGON type.
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
lm.save(silent=True, strict=True)
# A reference that doesn't use the unique keyword; a new database record will
# created for each polygon.
lm = LayerMapping(CountyFeat, co_shp, cofeat_mapping, transform=False)
lm.save(silent=True, strict=True)
# The county helper is called to ensure integrity of County models.
self.county_helper()
def test05_test_fid_range_step(self):
"Tests the `fid_range` keyword and the `step` keyword of .save()."
# Function for clearing out all the counties before testing.
def clear_counties(): County.objects.all().delete()
# Initializing the LayerMapping object to use in these tests.
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
# Bad feature id ranges should raise a type error.
clear_counties()
bad_ranges = (5.0, 'foo', co_shp)
for bad in bad_ranges:
self.assertRaises(TypeError, lm.save, fid_range=bad)
# Step keyword should not be allowed w/`fid_range`.
fr = (3, 5) # layer[3:5]
self.assertRaises(LayerMapError, lm.save, fid_range=fr, step=10)
lm.save(fid_range=fr)
# Features IDs 3 & 4 are for Galveston County, Texas -- only
# one model is returned because the `unique` keyword was set.
qs = County.objects.all()
self.assertEqual(1, qs.count())
self.assertEqual('Galveston', qs[0].name)
# Features IDs 5 and beyond for Honolulu County, Hawaii, and
# FID 0 is for Pueblo County, Colorado.
clear_counties()
lm.save(fid_range=slice(5, None), silent=True, strict=True) # layer[5:]
lm.save(fid_range=slice(None, 1), silent=True, strict=True) # layer[:1]
# Only Pueblo & Honolulu counties should be present because of
# the `unique` keyword. Have to set `order_by` on this QuerySet
# or else MySQL will return a different ordering than the other dbs.
qs = County.objects.order_by('name')
self.assertEqual(2, qs.count())
hi, co = tuple(qs)
hi_idx, co_idx = tuple(map(NAMES.index, ('Honolulu', 'Pueblo')))
self.assertEqual('Pueblo', co.name); self.assertEqual(NUMS[co_idx], len(co.mpoly))
self.assertEqual('Honolulu', hi.name); self.assertEqual(NUMS[hi_idx], len(hi.mpoly))
# Testing the `step` keyword -- should get the same counties
# regardless of we use a step that divides equally, that is odd,
# or that is larger than the dataset.
for st in (4,7,1000):
clear_counties()
lm.save(step=st, strict=True)
self.county_helper(county_feat=False)
def test06_model_inheritance(self):
"Tests LayerMapping on inherited models. See #12093."
icity_mapping = {'name' : 'Name',
'population' : 'Population',
'density' : 'Density',
'point' : 'POINT',
'dt' : 'Created',
}
# Parent model has geometry field.
lm1 = LayerMapping(ICity1, city_shp, icity_mapping)
lm1.save()
# Grandparent has geometry field.
lm2 = LayerMapping(ICity2, city_shp, icity_mapping)
lm2.save()
self.assertEqual(6, ICity1.objects.count())
self.assertEqual(3, ICity2.objects.count())
def test07_invalid_layer(self):
"Tests LayerMapping on invalid geometries. See #15378."
invalid_mapping = {'point': 'POINT'}
lm = LayerMapping(Invalid, invalid_shp, invalid_mapping,
source_srs=4326)
lm.save(silent=True)
| |
"""
Find intermediate evalutation results in assert statements through builtin AST.
"""
import ast
import sys
import _pytest._code
import py
from _pytest.assertion import util
u = py.builtin._totext
class AssertionError(util.BuiltinAssertionError):
def __init__(self, *args):
util.BuiltinAssertionError.__init__(self, *args)
if args:
# on Python2.6 we get len(args)==2 for: assert 0, (x,y)
# on Python2.7 and above we always get len(args) == 1
# with args[0] being the (x,y) tuple.
if len(args) > 1:
toprint = args
else:
toprint = args[0]
try:
self.msg = u(toprint)
except Exception:
self.msg = u(
"<[broken __repr__] %s at %0xd>"
% (toprint.__class__, id(toprint)))
else:
f = _pytest._code.Frame(sys._getframe(1))
try:
source = f.code.fullsource
if source is not None:
try:
source = source.getstatement(f.lineno, assertion=True)
except IndexError:
source = None
else:
source = str(source.deindent()).strip()
except py.error.ENOENT:
source = None
# this can also occur during reinterpretation, when the
# co_filename is set to "<run>".
if source:
self.msg = reinterpret(source, f, should_fail=True)
else:
self.msg = "<could not determine information>"
if not self.args:
self.args = (self.msg,)
if sys.version_info > (3, 0):
AssertionError.__module__ = "builtins"
if sys.platform.startswith("java"):
# See http://bugs.jython.org/issue1497
_exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
"ListComp", "GeneratorExp", "Yield", "Compare", "Call",
"Repr", "Num", "Str", "Attribute", "Subscript", "Name",
"List", "Tuple")
_stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign",
"AugAssign", "Print", "For", "While", "If", "With", "Raise",
"TryExcept", "TryFinally", "Assert", "Import", "ImportFrom",
"Exec", "Global", "Expr", "Pass", "Break", "Continue")
_expr_nodes = set(getattr(ast, name) for name in _exprs)
_stmt_nodes = set(getattr(ast, name) for name in _stmts)
def _is_ast_expr(node):
return node.__class__ in _expr_nodes
def _is_ast_stmt(node):
return node.__class__ in _stmt_nodes
else:
def _is_ast_expr(node):
return isinstance(node, ast.expr)
def _is_ast_stmt(node):
return isinstance(node, ast.stmt)
try:
_Starred = ast.Starred
except AttributeError:
# Python 2. Define a dummy class so isinstance() will always be False.
class _Starred(object): pass
class Failure(Exception):
"""Error found while interpreting AST."""
def __init__(self, explanation=""):
self.cause = sys.exc_info()
self.explanation = explanation
def reinterpret(source, frame, should_fail=False):
mod = ast.parse(source)
visitor = DebugInterpreter(frame)
try:
visitor.visit(mod)
except Failure:
failure = sys.exc_info()[1]
return getfailure(failure)
if should_fail:
return ("(assertion failed, but when it was re-run for "
"printing intermediate values, it did not fail. Suggestions: "
"compute assert expression before the assert or use --assert=plain)")
def run(offending_line, frame=None):
if frame is None:
frame = _pytest._code.Frame(sys._getframe(1))
return reinterpret(offending_line, frame)
def getfailure(e):
explanation = util.format_explanation(e.explanation)
value = e.cause[1]
if str(value):
lines = explanation.split('\n')
lines[0] += " << %s" % (value,)
explanation = '\n'.join(lines)
text = "%s: %s" % (e.cause[0].__name__, explanation)
if text.startswith('AssertionError: assert '):
text = text[16:]
return text
operator_map = {
ast.BitOr : "|",
ast.BitXor : "^",
ast.BitAnd : "&",
ast.LShift : "<<",
ast.RShift : ">>",
ast.Add : "+",
ast.Sub : "-",
ast.Mult : "*",
ast.Div : "/",
ast.FloorDiv : "//",
ast.Mod : "%",
ast.Eq : "==",
ast.NotEq : "!=",
ast.Lt : "<",
ast.LtE : "<=",
ast.Gt : ">",
ast.GtE : ">=",
ast.Pow : "**",
ast.Is : "is",
ast.IsNot : "is not",
ast.In : "in",
ast.NotIn : "not in"
}
unary_map = {
ast.Not : "not %s",
ast.Invert : "~%s",
ast.USub : "-%s",
ast.UAdd : "+%s"
}
class DebugInterpreter(ast.NodeVisitor):
"""Interpret AST nodes to gleam useful debugging information. """
def __init__(self, frame):
self.frame = frame
def generic_visit(self, node):
# Fallback when we don't have a special implementation.
if _is_ast_expr(node):
mod = ast.Expression(node)
co = self._compile(mod)
try:
result = self.frame.eval(co)
except Exception:
raise Failure()
explanation = self.frame.repr(result)
return explanation, result
elif _is_ast_stmt(node):
mod = ast.Module([node])
co = self._compile(mod, "exec")
try:
self.frame.exec_(co)
except Exception:
raise Failure()
return None, None
else:
raise AssertionError("can't handle %s" %(node,))
def _compile(self, source, mode="eval"):
return compile(source, "<assertion interpretation>", mode)
def visit_Expr(self, expr):
return self.visit(expr.value)
def visit_Module(self, mod):
for stmt in mod.body:
self.visit(stmt)
def visit_Name(self, name):
explanation, result = self.generic_visit(name)
# See if the name is local.
source = "%r in locals() is not globals()" % (name.id,)
co = self._compile(source)
try:
local = self.frame.eval(co)
except Exception:
# have to assume it isn't
local = None
if local is None or not self.frame.is_true(local):
return name.id, result
return explanation, result
def visit_Compare(self, comp):
left = comp.left
left_explanation, left_result = self.visit(left)
for op, next_op in zip(comp.ops, comp.comparators):
next_explanation, next_result = self.visit(next_op)
op_symbol = operator_map[op.__class__]
explanation = "%s %s %s" % (left_explanation, op_symbol,
next_explanation)
source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_left=left_result,
__exprinfo_right=next_result)
except Exception:
raise Failure(explanation)
try:
if not self.frame.is_true(result):
break
except KeyboardInterrupt:
raise
except:
break
left_explanation, left_result = next_explanation, next_result
if util._reprcompare is not None:
res = util._reprcompare(op_symbol, left_result, next_result)
if res:
explanation = res
return explanation, result
def visit_BoolOp(self, boolop):
is_or = isinstance(boolop.op, ast.Or)
explanations = []
for operand in boolop.values:
explanation, result = self.visit(operand)
explanations.append(explanation)
if result == is_or:
break
name = is_or and " or " or " and "
explanation = "(" + name.join(explanations) + ")"
return explanation, result
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_explanation, operand_result = self.visit(unary.operand)
explanation = pattern % (operand_explanation,)
co = self._compile(pattern % ("__exprinfo_expr",))
try:
result = self.frame.eval(co, __exprinfo_expr=operand_result)
except Exception:
raise Failure(explanation)
return explanation, result
def visit_BinOp(self, binop):
left_explanation, left_result = self.visit(binop.left)
right_explanation, right_result = self.visit(binop.right)
symbol = operator_map[binop.op.__class__]
explanation = "(%s %s %s)" % (left_explanation, symbol,
right_explanation)
source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_left=left_result,
__exprinfo_right=right_result)
except Exception:
raise Failure(explanation)
return explanation, result
def visit_Call(self, call):
func_explanation, func = self.visit(call.func)
arg_explanations = []
ns = {"__exprinfo_func" : func}
arguments = []
for arg in call.args:
arg_explanation, arg_result = self.visit(arg)
if isinstance(arg, _Starred):
arg_name = "__exprinfo_star"
ns[arg_name] = arg_result
arguments.append("*%s" % (arg_name,))
arg_explanations.append("*%s" % (arg_explanation,))
else:
arg_name = "__exprinfo_%s" % (len(ns),)
ns[arg_name] = arg_result
arguments.append(arg_name)
arg_explanations.append(arg_explanation)
for keyword in call.keywords:
arg_explanation, arg_result = self.visit(keyword.value)
if keyword.arg:
arg_name = "__exprinfo_%s" % (len(ns),)
keyword_source = "%s=%%s" % (keyword.arg)
arguments.append(keyword_source % (arg_name,))
arg_explanations.append(keyword_source % (arg_explanation,))
else:
arg_name = "__exprinfo_kwds"
arguments.append("**%s" % (arg_name,))
arg_explanations.append("**%s" % (arg_explanation,))
ns[arg_name] = arg_result
if getattr(call, 'starargs', None):
arg_explanation, arg_result = self.visit(call.starargs)
arg_name = "__exprinfo_star"
ns[arg_name] = arg_result
arguments.append("*%s" % (arg_name,))
arg_explanations.append("*%s" % (arg_explanation,))
if getattr(call, 'kwargs', None):
arg_explanation, arg_result = self.visit(call.kwargs)
arg_name = "__exprinfo_kwds"
ns[arg_name] = arg_result
arguments.append("**%s" % (arg_name,))
arg_explanations.append("**%s" % (arg_explanation,))
args_explained = ", ".join(arg_explanations)
explanation = "%s(%s)" % (func_explanation, args_explained)
args = ", ".join(arguments)
source = "__exprinfo_func(%s)" % (args,)
co = self._compile(source)
try:
result = self.frame.eval(co, **ns)
except Exception:
raise Failure(explanation)
pattern = "%s\n{%s = %s\n}"
rep = self.frame.repr(result)
explanation = pattern % (rep, rep, explanation)
return explanation, result
def _is_builtin_name(self, name):
pattern = "%r not in globals() and %r not in locals()"
source = pattern % (name.id, name.id)
co = self._compile(source)
try:
return self.frame.eval(co)
except Exception:
return False
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
source_explanation, source_result = self.visit(attr.value)
explanation = "%s.%s" % (source_explanation, attr.attr)
source = "__exprinfo_expr.%s" % (attr.attr,)
co = self._compile(source)
try:
try:
result = self.frame.eval(co, __exprinfo_expr=source_result)
except AttributeError:
# Maybe the attribute name needs to be mangled?
if not attr.attr.startswith("__") or attr.attr.endswith("__"):
raise
source = "getattr(__exprinfo_expr.__class__, '__name__', '')"
co = self._compile(source)
class_name = self.frame.eval(co, __exprinfo_expr=source_result)
mangled_attr = "_" + class_name + attr.attr
source = "__exprinfo_expr.%s" % (mangled_attr,)
co = self._compile(source)
result = self.frame.eval(co, __exprinfo_expr=source_result)
except Exception:
raise Failure(explanation)
explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
self.frame.repr(result),
source_explanation, attr.attr)
# Check if the attr is from an instance.
source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
source = source % (attr.attr,)
co = self._compile(source)
try:
from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
except Exception:
from_instance = None
if from_instance is None or self.frame.is_true(from_instance):
rep = self.frame.repr(result)
pattern = "%s\n{%s = %s\n}"
explanation = pattern % (rep, rep, explanation)
return explanation, result
def visit_Assert(self, assrt):
test_explanation, test_result = self.visit(assrt.test)
explanation = "assert %s" % (test_explanation,)
if not self.frame.is_true(test_result):
try:
raise util.BuiltinAssertionError
except Exception:
raise Failure(explanation)
return explanation, test_result
def visit_Assign(self, assign):
value_explanation, value_result = self.visit(assign.value)
explanation = "... = %s" % (value_explanation,)
name = ast.Name("__exprinfo_expr", ast.Load(),
lineno=assign.value.lineno,
col_offset=assign.value.col_offset)
new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
col_offset=assign.col_offset)
mod = ast.Module([new_assign])
co = self._compile(mod, "exec")
try:
self.frame.exec_(co, __exprinfo_expr=value_result)
except Exception:
raise Failure(explanation)
return explanation, value_result
| |
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
vizdtestdir = sys.path[0]
import urllib2
import xmltodict
import json
import requests
import socket
from lxml import etree
from opserver.introspect_util import *
from opserver_results import *
from opserver.opserver_util import OpServerUtils
class VerificationOpsSrv (IntrospectUtilBase):
def __init__(self, ip, port=8081):
super(VerificationOpsSrv, self).__init__(ip, port)
def get_ops_vm(self, vm='default-virtual-machine'):
vm_dict = self.dict_get('analytics/virtual-machine/' + vm)
return OpVMResult(vm_dict)
def get_ops_vn(self, vn='default-virtual-network'):
res = None
try:
vn_dict = self.dict_get('analytics/virtual-network/' + vn)
res = OpVNResult(vn_dict)
except Exception as e:
print e
finally:
return res
def get_ops_collector(self, col=None):
if (col is None):
col = socket.gethostname()
res = None
try:
#import pdb; pdb.set_trace()
col_dict = self.dict_get('analytics/collector/' + col)
res = OpCollectorResult(col_dict)
except Exception as e:
print e
finally:
return res
def send_tracebuffer_req(self, src, mod, instance, buf_name):
return self.dict_get('analytics/send-tracebuffer/%s/%s/%s/%s' \
% (src, mod, instance, buf_name))
def get_table_column_values(self, table, col_name):
return self.dict_get('analytics/table/%s/column-values/%s' \
% (table, col_name))
def uve_query(self, query):
return self.dict_get('analytics/uves/%s' % query)
def post_uve_request(self, table, json_body):
url = 'http://%s:%s/analytics/uves/%s' % (self._ip, str(self._port), table)
try:
res = OpServerUtils.post_url_http(url, json_body, sync=True)
res = json.loads(res)
except Exception as e:
print 'Error: POST uve request: %s' % str(e)
return None
else:
return res
# end post_uve_request
def get_alarms(self, query):
return self.dict_get('analytics/alarms/%s' % query)
# end get_alarms
def post_alarm_request(self, table, json_body):
url = 'http://%s:%s/analytics/alarms/%s' % (self._ip, str(self._port), table)
try:
res = OpServerUtils.post_url_http(url, json_body, sync=True)
res = json.loads(res)
except Exception as e:
print 'Error: POST alarm request: %s' % str(e)
return None
else:
return res
# end post_alarm_request
def get_redis_uve_info(self):
path = 'Snh_RedisUVERequest'
xpath = '/RedisUVEResponse/redis_uve_info'
p = self.dict_get(path, XmlDrv)
return EtreeToDict(xpath).get_all_entry(p)
def post_query_json(self, json_str, sync=True):
'''
this module is to support raw query given in json format
'''
res = None
try:
flows_url = OpServerUtils.opserver_query_url(self._ip, str(self._port))
print flows_url
print "query is: ", json_str
res = []
resp = OpServerUtils.post_url_http(flows_url, json_str, sync)
if sync:
if resp is not None:
res = json.loads(resp)
res = res['value']
else:
if resp is not None:
resp = json.loads(resp)
qid = resp['href'].rsplit('/', 1)[1]
result = OpServerUtils.get_query_result(self._ip, str(self._port), qid, 30)
for item in result:
res.append(item)
except Exception as e:
print str(e)
finally:
return res
def post_purge_query_json(self, json_str, sync=True):
'''
this module is to support raw purge query given in json format
'''
res = None
try:
purge_request_url = \
OpServerUtils.opserver_database_purge_query_url(
self._ip, str(self._port))
print purge_request_url
print "query is: ", json_str
resp = OpServerUtils.post_url_http(
purge_request_url, json_str, sync)
if resp is not None:
res = json.loads(resp)
res = res['status']
except Exception as e:
print str(e)
finally:
return res
def post_query(self, table, start_time=None, end_time=None,
select_fields=None, where_clause=None,
sort_fields=None, sort=None, limit=None,
filter=None, sync=True,dir=None):
res = None
try:
flows_url = OpServerUtils.opserver_query_url(
self._ip, str(self._port))
print flows_url
query_dict = OpServerUtils.get_query_dict(
table, start_time, end_time,
select_fields,
where_clause,
sort_fields, sort, limit, filter, dir)
print json.dumps(query_dict)
res = []
resp = OpServerUtils.post_url_http(
flows_url, json.dumps(query_dict), sync)
if sync:
if resp is not None:
res = json.loads(resp)
res = res['value']
else:
if resp is not None:
resp = json.loads(resp)
qid = resp['href'].rsplit('/', 1)[1]
result = OpServerUtils.get_query_result(
self._ip, str(self._port), qid, 30)
for item in result:
res.append(item)
except Exception as e:
print str(e)
finally:
return res
if __name__ == '__main__':
vns = VerificationOpsSrv('127.0.0.1')
vn = vns.get_ops_vn(vn='abc-corp:vn02')
print "*** Verify VN Cfg ***"
print vn.get_attr('Config', 'attached_policies', 'abc-default-policy')
'''
[{u'vnp_major': u'10', u'vnp_name': u'abc-default-policy',
u'vnp_minor': u'50'}]
'''
print vn.get_attr('Config', 'connected_networks')
'''
[u'abc-corp:vn04']
'''
print vn.get_attr('Config', 'total_interfaces')
'''
10
'''
print vn.get_attr('Config', 'total_acl_rules')
'''
60
'''
print "*** Verify VN Agt ***"
print vn.get_attr('Agent', 'total_acl_rules')
'''
55
'''
print vn.get_attr('Agent', 'in_tpkts')
'''
240
'''
print vn.get_attr('Agent', 'in_stats', 'abc-corp:map-reduce-02')
'''
[{u'bytes': u'7200', u'other_vn': u'abc-corp:map-reduce-02',
u'tpkts': u'60'}]
'''
vm = vns.get_ops_vm(vm='abc-corp:vm-web-fe01')
print "*** Verify VM Cfg ***"
print vm.get_attr('Config', 'vrouter')
'''
rack01-host04
'''
print vm.get_attr('Config', 'attached_groups')
'''
[u'abc-grp01']
'''
print vm.get_attr('Config', 'interface_list', 'abc-corp:vn-fe')
'''
[{u'virtual_network': u'abc-corp:vn-fe', u'ip_address': u'10.1.1.2',
u'floating_ips': [u'67.1.1.2', u'67.1.1.3']}]
'''
print "*** Verify VM Agt ***"
print vm.get_attr('Agent', 'vrouter')
'''
rack01-host04
'''
print vm.get_attr('Agent', 'attached_groups')
'''
[u'abc-grp01']
'''
print vm.get_attr('Agent', 'interface_list')
'''
[{u'in_bytes': u'1000', u'out_bytes': u'10000',
u'floating_ips': [u'67.1.1.2', u'67.1.1.3'],
u'out_pkts': u'20', u'virtual_network': u'abc-corp:vn-fe',
u'in_pkts': u'5', u'ip_address': u'10.1.1.2'}]
'''
col = vns.get_ops_collector()
print col.get_attr('Analytics', 'generator_infos')
'''
[{u'gen_attr': {u'http_port': u'8089', u'in_clear': u'false',
u'pid': u'57160', u'connects': u'1', u'clears': u'1',
u'resets': u'0'},
u'source': u'sa-nc-mfg-30.static.jnpr.net',
u'msgtype_stats': {u'SandeshStats':
[{u'bytes': u'1363005',
u'messages': u'431',
u'message_type': u'CollectorInfo'}]},
u'module_id': u'Collector'},
{u'gen_attr': {u'http_port': u'0', u'in_clear': u'false',
u'pid': u'0', u'connects': u'1', u'clears': u'0',
u'resets': u'0'},
u'source': u'sa-nc-mfg-30.static.jnpr.net', u'msgtype_stats': {},
u'module_id': u'OpServer'},
{u'gen_attr': {u'http_port': u'8091', u'in_clear': u'false',
u'pid': u'57200', u'connects': u'2', u'clears': u'2',
u'resets': u'1'},
u'source': u'sa-nc-mfg-30.static.jnpr.net',
u'msgtype_stats': {u'SandeshStats': [{u'bytes': u'16771',
u'messages': u'66',
u'message_type': u'QELog'},
{u'bytes': u'12912',
u'messages': u'32',
u'message_type': u'QEQueryLog'}]},
u'module_id': u'QueryEngine'}]
'''
print col.get_attr('Analytics', 'generator_infos',
[('module_id', 'OpServer'),
('source', "sa-nc-mfg-30.static.jnpr.net")])
'''
[{u'gen_attr': {u'http_port': u'0', u'in_clear': u'false', u'pid': u'0',
u'connects': u'1', u'clears': u'0', u'resets': u'0'},
u'source': u'sa-nc-mfg-30.static.jnpr.net', u'msgtype_stats': {},
u'module_id': u'OpServer'}]
'''
print col.get_attr('Analytics', 'cpu_info')
'''
{u'num_cpu': u'4', u'cpu_share': u'0.00833056',
u'meminfo': {u'virt': u'2559582208', u'peakvirt': u'2559582208',
u'res': u'2805760'}}
'''
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs perf tests.
Our buildbot infrastructure requires each slave to run steps serially.
This is sub-optimal for android, where these steps can run independently on
multiple connected devices.
The buildbots will run this script multiple times per cycle:
- First: all steps listed in --steps in will be executed in parallel using all
connected devices. Step results will be pickled to disk. Each step has a unique
name. The result code will be ignored if the step name is listed in
--flaky-steps.
The buildbot will treat this step as a regular step, and will not process any
graph data.
- Then, with -print-step STEP_NAME: at this stage, we'll simply print the file
with the step results previously saved. The buildbot will then process the graph
data accordingly.
The JSON steps file contains a dictionary in the format:
{ "version": int,
"steps": {
"foo": {
"device_affinity": int,
"cmd": "script_to_execute foo"
},
"bar": {
"device_affinity": int,
"cmd": "script_to_execute bar"
}
}
}
The JSON flaky steps file contains a list with step names which results should
be ignored:
[
"step_name_foo",
"step_name_bar"
]
Note that script_to_execute necessarily have to take at least the following
option:
--device: the serial number to be passed to all adb commands.
"""
import collections
import datetime
import json
import logging
import os
import pickle
import shutil
import sys
import tempfile
import threading
import time
from pylib import cmd_helper
from pylib import constants
from pylib import forwarder
from pylib.base import base_test_result
from pylib.base import base_test_runner
from pylib.device import battery_utils
from pylib.device import device_errors
def GetPersistedResult(test_name):
file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name)
if not os.path.exists(file_name):
logging.error('File not found %s', file_name)
return None
with file(file_name, 'r') as f:
return pickle.loads(f.read())
def OutputJsonList(json_input, json_output):
with file(json_input, 'r') as i:
all_steps = json.load(i)
step_values = []
for k, v in all_steps['steps'].iteritems():
data = {'test': k, 'device_affinity': v['device_affinity']}
persisted_result = GetPersistedResult(k)
if persisted_result:
data['total_time'] = persisted_result['total_time']
step_values.append(data)
with file(json_output, 'w') as o:
o.write(json.dumps(step_values))
return 0
def PrintTestOutput(test_name, json_file_name=None):
"""Helper method to print the output of previously executed test_name.
Args:
test_name: name of the test that has been previously executed.
json_file_name: name of the file to output chartjson data to.
Returns:
exit code generated by the test step.
"""
persisted_result = GetPersistedResult(test_name)
if not persisted_result:
return 1
logging.info('*' * 80)
logging.info('Output from:')
logging.info(persisted_result['cmd'])
logging.info('*' * 80)
print persisted_result['output']
if json_file_name:
with file(json_file_name, 'w') as f:
f.write(persisted_result['chartjson'])
return persisted_result['exit_code']
def PrintSummary(test_names):
logging.info('*' * 80)
logging.info('Sharding summary')
device_total_time = collections.defaultdict(int)
for test_name in test_names:
file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name)
if not os.path.exists(file_name):
logging.info('%s : No status file found', test_name)
continue
with file(file_name, 'r') as f:
result = pickle.loads(f.read())
logging.info('%s : exit_code=%d in %d secs at %s',
result['name'], result['exit_code'], result['total_time'],
result['device'])
device_total_time[result['device']] += result['total_time']
for device, device_time in device_total_time.iteritems():
logging.info('Total for device %s : %d secs', device, device_time)
logging.info('Total steps time: %d secs', sum(device_total_time.values()))
class _HeartBeatLogger(object):
# How often to print the heartbeat on flush().
_PRINT_INTERVAL = 30.0
def __init__(self):
"""A file-like class for keeping the buildbot alive."""
self._len = 0
self._tick = time.time()
self._stopped = threading.Event()
self._timer = threading.Thread(target=self._runner)
self._timer.start()
def _runner(self):
while not self._stopped.is_set():
self.flush()
self._stopped.wait(_HeartBeatLogger._PRINT_INTERVAL)
def write(self, data):
self._len += len(data)
def flush(self):
now = time.time()
if now - self._tick >= _HeartBeatLogger._PRINT_INTERVAL:
self._tick = now
print '--single-step output length %d' % self._len
sys.stdout.flush()
def stop(self):
self._stopped.set()
class TestRunner(base_test_runner.BaseTestRunner):
def __init__(self, test_options, device, shard_index, max_shard, tests,
flaky_tests):
"""A TestRunner instance runs a perf test on a single device.
Args:
test_options: A PerfOptions object.
device: Device to run the tests.
shard_index: the index of this device.
max_shards: the maximum shard index.
tests: a dict mapping test_name to command.
flaky_tests: a list of flaky test_name.
"""
super(TestRunner, self).__init__(device, None)
self._options = test_options
self._shard_index = shard_index
self._max_shard = max_shard
self._tests = tests
self._flaky_tests = flaky_tests
self._output_dir = None
self._device_battery = battery_utils.BatteryUtils(self.device)
@staticmethod
def _IsBetter(result):
if result['actual_exit_code'] == 0:
return True
pickled = os.path.join(constants.PERF_OUTPUT_DIR,
result['name'])
if not os.path.exists(pickled):
return True
with file(pickled, 'r') as f:
previous = pickle.loads(f.read())
return result['actual_exit_code'] < previous['actual_exit_code']
@staticmethod
def _SaveResult(result):
if TestRunner._IsBetter(result):
with file(os.path.join(constants.PERF_OUTPUT_DIR,
result['name']), 'w') as f:
f.write(pickle.dumps(result))
def _CheckDeviceAffinity(self, test_name):
"""Returns True if test_name has affinity for this shard."""
affinity = (self._tests['steps'][test_name]['device_affinity'] %
self._max_shard)
if self._shard_index == affinity:
return True
logging.info('Skipping %s on %s (affinity is %s, device is %s)',
test_name, self.device_serial, affinity, self._shard_index)
return False
def _CleanupOutputDirectory(self):
if self._output_dir:
shutil.rmtree(self._output_dir, ignore_errors=True)
self._output_dir = None
def _ReadChartjsonOutput(self):
if not self._output_dir:
return ''
json_output_path = os.path.join(self._output_dir, 'results-chart.json')
try:
with open(json_output_path) as f:
return f.read()
except IOError:
logging.exception('Exception when reading chartjson.')
logging.error('This usually means that telemetry did not run, so it could'
' not generate the file. Please check the device running'
' the test.')
return ''
def _LaunchPerfTest(self, test_name):
"""Runs a perf test.
Args:
test_name: the name of the test to be executed.
Returns:
A tuple containing (Output, base_test_result.ResultType)
"""
if not self._CheckDeviceAffinity(test_name):
return '', base_test_result.ResultType.PASS
try:
logging.warning('Unmapping device ports')
forwarder.Forwarder.UnmapAllDevicePorts(self.device)
self.device.old_interface.RestartAdbdOnDevice()
except Exception as e:
logging.error('Exception when tearing down device %s', e)
cmd = ('%s --device %s' %
(self._tests['steps'][test_name]['cmd'],
self.device_serial))
if self._options.collect_chartjson_data:
self._output_dir = tempfile.mkdtemp()
cmd = cmd + ' --output-dir=%s' % self._output_dir
logging.info(
'temperature: %s (0.1 C)',
str(self._device_battery.GetBatteryInfo().get('temperature')))
if self._options.max_battery_temp:
self._device_battery.LetBatteryCoolToTemperature(
self._options.max_battery_temp)
logging.info('%s : %s', test_name, cmd)
start_time = datetime.datetime.now()
timeout = self._tests['steps'][test_name].get('timeout', 5400)
if self._options.no_timeout:
timeout = None
logging.info('Timeout for %s test: %s', test_name, timeout)
full_cmd = cmd
if self._options.dry_run:
full_cmd = 'echo %s' % cmd
logfile = sys.stdout
if self._options.single_step:
# Just print a heart-beat so that the outer buildbot scripts won't timeout
# without response.
logfile = _HeartBeatLogger()
cwd = os.path.abspath(constants.DIR_SOURCE_ROOT)
if full_cmd.startswith('src/'):
cwd = os.path.abspath(os.path.join(constants.DIR_SOURCE_ROOT, os.pardir))
try:
exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout(
full_cmd, timeout, cwd=cwd, shell=True, logfile=logfile)
json_output = self._ReadChartjsonOutput()
except cmd_helper.TimeoutError as e:
exit_code = -1
output = str(e)
json_output = ''
finally:
self._CleanupOutputDirectory()
if self._options.single_step:
logfile.stop()
end_time = datetime.datetime.now()
if exit_code is None:
exit_code = -1
logging.info('%s : exit_code=%d in %d secs at %s',
test_name, exit_code, (end_time - start_time).seconds,
self.device_serial)
if exit_code == 0:
result_type = base_test_result.ResultType.PASS
else:
result_type = base_test_result.ResultType.FAIL
# Since perf tests use device affinity, give the device a chance to
# recover if it is offline after a failure. Otherwise, the master sharder
# will remove it from the pool and future tests on this device will fail.
try:
self.device.WaitUntilFullyBooted(timeout=120)
except device_errors.CommandTimeoutError as e:
logging.error('Device failed to return after %s: %s' % (test_name, e))
actual_exit_code = exit_code
if test_name in self._flaky_tests:
# The exit_code is used at the second stage when printing the
# test output. If the test is flaky, force to "0" to get that step green
# whilst still gathering data to the perf dashboards.
# The result_type is used by the test_dispatcher to retry the test.
exit_code = 0
persisted_result = {
'name': test_name,
'output': output,
'chartjson': json_output,
'exit_code': exit_code,
'actual_exit_code': actual_exit_code,
'result_type': result_type,
'total_time': (end_time - start_time).seconds,
'device': self.device_serial,
'cmd': cmd,
}
self._SaveResult(persisted_result)
return (output, result_type)
def RunTest(self, test_name):
"""Run a perf test on the device.
Args:
test_name: String to use for logging the test result.
Returns:
A tuple of (TestRunResults, retry).
"""
_, result_type = self._LaunchPerfTest(test_name)
results = base_test_result.TestRunResults()
results.AddResult(base_test_result.BaseTestResult(test_name, result_type))
retry = None
if not results.DidRunPass():
retry = test_name
return results, retry
| |
# coding=utf-8
# Copyright 2017 The DLT2T Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for trainer binary."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
# Dependency imports
from DLT2T import models # pylint: disable=unused-import
from DLT2T.data_generators import all_problems # pylint: disable=unused-import
from DLT2T.utils import data_reader
from DLT2T.utils import decoding
from DLT2T.utils import devices
from DLT2T.utils import input_fn_builder
from DLT2T.utils import model_builder
from DLT2T.utils import registry
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.python import debug
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_bool("registry_help", False,
"If True, logs the contents of the registry and exits.")
flags.DEFINE_bool("tfdbg", False,
"If True, use the TF debugger CLI on train/eval.")
flags.DEFINE_bool("export_saved_model", False,
"Whether to export a SavedModel for serving.")
flags.DEFINE_bool("dbgprofile", False,
"If True, record the timeline for chrome://tracing/.")
flags.DEFINE_string("model", "", "Which model to use.")
flags.DEFINE_string("hparams_set", "", "Which parameters to use.")
flags.DEFINE_string("hparams_range", "", "Parameters range.")
flags.DEFINE_string(
"hparams", "",
"""A comma-separated list of `name=value` hyperparameter values. This flag
is used to override hyperparameter settings either when manually selecting
hyperparameters or when using Vizier. If a hyperparameter setting is
specified by this flag then it must be a valid hyperparameter name for the
model.""")
flags.DEFINE_string("problems", "", "Dash separated list of problems to "
"solve.")
flags.DEFINE_string("data_dir", None, "Directory with training data.")
flags.DEFINE_string("train_mode", None,"Train mode: pretrain_A2B, pretrian_B2A, or dual.")
flags.DEFINE_string("infer_mode", None,"Infer mode: A2B or B2A.")
flags.DEFINE_integer("train_steps", 250000,
"The number of steps to run training for.")
flags.DEFINE_bool("eval_run_autoregressive", False,
"Run eval autoregressively where we condition on previous"
"generated output instead of the actual target.")
flags.DEFINE_bool("eval_use_test_set", False,
"Whether to use the '-test' data for EVAL (and PREDICT).")
flags.DEFINE_integer("keep_checkpoint_max", 20,
"How many recent checkpoints to keep.")
flags.DEFINE_bool("experimental_optimize_placement", False,
"Optimize ops placement with experimental session options.")
flags.DEFINE_integer("keep_checkpoint_every_n_hours", 10000,
"Number of hours between each checkpoint to be saved. "
"The default value 10,000 hours effectively disables it.")
flags.DEFINE_integer("save_checkpoints_secs", 0,
"Save checkpoints every this many seconds. "
"Default=0 means let tensorflow.contrib.learn.python.learn"
" decide, which is currently set to 600 = 10 minutes.")
flags.DEFINE_bool("log_device_placement", False,
"Whether to log device placement.")
# Distributed training flags
flags.DEFINE_integer("local_eval_frequency", 2000,
"Run evaluation every this steps during local training.")
flags.DEFINE_bool("locally_shard_to_cpu", False,
"Use CPU as a sharding device running locally. This allows "
"to test sharded model construction on a machine with 1 GPU.")
flags.DEFINE_bool("daisy_chain_variables", True,
"copy variables around in a daisy chain")
flags.DEFINE_bool("sync", False, "Sync compute on PS.")
flags.DEFINE_string("worker_job", "/job:localhost", "name of worker job")
flags.DEFINE_integer("worker_gpu", 1, "How many GPUs to use.")
flags.DEFINE_integer("worker_replicas", 1, "How many workers to use.")
flags.DEFINE_integer("worker_id", 0, "Which worker task are we.")
flags.DEFINE_float("worker_gpu_memory_fraction", 0.95,
"Fraction of GPU memory to allocate.")
flags.DEFINE_integer("ps_gpu", 0, "How many GPUs to use per ps.")
flags.DEFINE_string("gpu_order", "", "Optional order for daisy-chaining gpus."
" e.g. \"1 3 2 4\"")
flags.DEFINE_string("ps_job", "/job:ps", "name of ps job")
flags.DEFINE_integer("ps_replicas", 0, "How many ps replicas.")
# Decoding flags
flags.DEFINE_string(
"decode_hparams", "",
"Comma-separated list of name=value pairs to control decode behavior. "
"See decoding.decode_hparams for defaults.")
def make_experiment_fn(data_dir, model_name, train_steps, eval_steps):
"""Returns experiment_fn for learn_runner. Wraps create_experiment."""
def experiment_fn(run_config, hparams):
return create_experiment(
data_dir,
model_name=model_name,
train_steps=train_steps,
eval_steps=eval_steps,
hparams=hparams,
run_config=run_config)
return experiment_fn
def create_experiment(data_dir, model_name, train_steps, eval_steps, hparams,
run_config):
"""Create Experiment."""
estimator, input_fns = create_experiment_components(
data_dir=data_dir,
model_name=model_name,
hparams=hparams,
run_config=run_config)
train_monitors = []
eval_hooks = []
if FLAGS.tfdbg:
hook = debug.LocalCLIDebugHook()
train_monitors.append(hook)
eval_hooks.append(hook)
if FLAGS.dbgprofile:
# Recorded traces can be visualized with chrome://tracing/
# The memory/tensor lifetime is also profiled
train_monitors.append(
tf.contrib.hooks.ProfilerHook(
save_steps=10,
output_dir=run_config.model_dir,
show_dataflow=True,
show_memory=True,))
optional_kwargs = {}
if FLAGS.export_saved_model:
assert len(hparams.problem_instances) == 1
problem = hparams.problem_instances[0]
optional_kwargs["export_strategies"] = [
make_export_strategy(problem, hparams)
]
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=input_fns[tf.estimator.ModeKeys.TRAIN],
eval_input_fn=input_fns[tf.estimator.ModeKeys.EVAL],
train_steps=train_steps,
eval_steps=eval_steps,
min_eval_frequency=FLAGS.local_eval_frequency,
train_monitors=train_monitors,
eval_hooks=eval_hooks,
**optional_kwargs)
def make_export_strategy(problem, hparams):
return tf.contrib.learn.make_export_strategy(
lambda: data_reader.serving_input_fn(problem, hparams), as_text=True)
def create_experiment_components(data_dir, model_name, hparams, run_config):
"""Constructs and returns Estimator and train/eval input functions."""
tf.logging.info("Creating experiment, storing model files in %s",
run_config.model_dir)
add_problem_hparams(hparams, FLAGS.problems)
# hparams batch_size is used as minibatch size instead of tokens in batch
batch_size = (hparams.use_fixed_batch_size and hparams.batch_size) or None
num_datashards = devices.data_parallelism().n
train_input_fn = input_fn_builder.build_input_fn(
mode=tf.estimator.ModeKeys.TRAIN,
train_mode=FLAGS.train_mode,
infer_mode=FLAGS.infer_mode,
hparams=hparams,
data_dir=data_dir,
num_datashards=num_datashards,
worker_replicas=FLAGS.worker_replicas,
worker_id=FLAGS.worker_id,
batch_size=batch_size)
eval_input_fn = input_fn_builder.build_input_fn(
mode=tf.estimator.ModeKeys.EVAL,
train_mode=FLAGS.train_mode,
infer_mode=FLAGS.infer_mode,
hparams=hparams,
data_dir=data_dir,
num_datashards=num_datashards,
worker_replicas=FLAGS.worker_replicas,
worker_id=FLAGS.worker_id,
dataset_split="test" if FLAGS.eval_use_test_set else None)
model_fn = model_builder.build_model_fn(
model_name,
train_mode=FLAGS.train_mode,
infer_mode=FLAGS.infer_mode,
problem_names=FLAGS.problems.split("-"),
train_steps=FLAGS.train_steps,
worker_id=FLAGS.worker_id,
worker_replicas=FLAGS.worker_replicas,
eval_run_autoregressive=FLAGS.eval_run_autoregressive,
decode_hparams=decoding.decode_hparams(FLAGS.decode_hparams))
estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=run_config.model_dir,
params=hparams,
config=run_config)
return estimator, {
tf.estimator.ModeKeys.TRAIN: train_input_fn,
tf.estimator.ModeKeys.EVAL: eval_input_fn
}
def log_registry():
if FLAGS.registry_help:
tf.logging.info(registry.help_string())
sys.exit(0)
def add_problem_hparams(hparams, problems):
"""Add problem hparams for the problems."""
hparams.problems = []
hparams.problem_instances = []
for problem_name in problems.split("-"):
try:
problem = registry.problem(problem_name)
except LookupError:
all_problem_names = sorted(registry.list_problems())
error_lines = ["%s not in the set of supported problems:" % problem_name
] + all_problem_names
error_msg = "\n * ".join(error_lines)
raise LookupError(error_msg)
p_hparams = problem.get_hparams(hparams)
hparams.problem_instances.append(problem)
hparams.problems.append(p_hparams)
def save_metadata(output_dir, hparams):
"""Saves FLAGS and hparams to output_dir."""
# Save FLAGS in txt file
if hasattr(FLAGS, "flags_into_string"):
flags_str = FLAGS.flags_into_string()
t2t_flags_str = "\n".join([
"--%s=%s" % (f.name, f.value)
for f in FLAGS.flags_by_module_dict()[
"DLT2T.utils.trainer_utils"]
])
else:
flags_dict = FLAGS.__dict__["__flags"]
flags_str = "\n".join(
["--%s=%s" % (name, str(f)) for (name, f) in flags_dict.items()])
t2t_flags_str = None
flags_txt = os.path.join(output_dir, "flags.txt")
with tf.gfile.Open(flags_txt, "w") as f:
f.write(flags_str)
if t2t_flags_str:
t2t_flags_txt = os.path.join(output_dir, "flags_t2t.txt")
with tf.gfile.Open(t2t_flags_txt, "w") as f:
f.write(t2t_flags_str)
# Save hparams as hparams.json
hparams_fname = os.path.join(output_dir, "hparams.json")
with tf.gfile.Open(hparams_fname, "w") as f:
f.write(hparams.to_json())
def create_hparams(params_id, data_dir, passed_hparams=None):
"""Returns hyperparameters, including any flag value overrides.
If the hparams FLAG is set, then it will use any values specified in
hparams to override any individually-set hyperparameter. This logic
allows tuners to override hyperparameter settings to find optimal values.
Args:
params_id: which set of parameters to choose (must be in _PARAMS above).
data_dir: the directory containing the training data.
passed_hparams: command-line overrides for some hparams.
Returns:
The hyperparameters as a tf.contrib.training.HParams object.
"""
hparams = registry.hparams(params_id)()
hparams.add_hparam("data_dir", data_dir)
# Command line flags override any of the preceding hyperparameter values.
if passed_hparams:
hparams = hparams.parse(passed_hparams)
return hparams
def create_run_config(output_dir):
"""Create a RunConfig object."""
run_config = tf.contrib.learn.RunConfig(
model_dir=output_dir,
master=FLAGS.master,
gpu_memory_fraction=FLAGS.worker_gpu_memory_fraction,
session_config=session_config(),
keep_checkpoint_max=FLAGS.keep_checkpoint_max,
keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours,
save_checkpoints_secs=FLAGS.save_checkpoints_secs)
return run_config
def run(data_dir, model, output_dir, train_steps, eval_steps, schedule):
"""Runs an Estimator locally or distributed.
Args:
data_dir: The directory the data can be found in.
model: The name of the model to use.
output_dir: The directory to store outputs in.
train_steps: The number of steps to run training for.
eval_steps: The number of steps to run evaluation for.
schedule: (str) The schedule to run. The value here must
be the name of one of Experiment's methods.
"""
exp_fn = make_experiment_fn(
data_dir=data_dir,
model_name=model,
train_steps=train_steps,
eval_steps=eval_steps)
# Create hparams and run_config
run_config = create_run_config(output_dir)
hparams = create_hparams(
FLAGS.hparams_set, data_dir, passed_hparams=FLAGS.hparams)
if is_chief():
save_metadata(output_dir, hparams)
learn_runner.run(
experiment_fn=exp_fn,
schedule=schedule,
run_config=run_config,
hparams=hparams)
def validate_flags():
if not FLAGS.model:
raise ValueError("Must specify a model with --model.")
if not FLAGS.problems:
raise ValueError("Must specify a set of problems with --problems.")
if not (FLAGS.hparams_set or FLAGS.hparams_range):
raise ValueError("Must specify either --hparams_set or --hparams_range.")
if not FLAGS.schedule:
raise ValueError("Must specify --schedule.")
if not FLAGS.output_dir:
FLAGS.output_dir = "/tmp/DLT2T"
tf.logging.warning("It is strongly recommended to specify --output_dir. "
"Using default output_dir=%s.", FLAGS.output_dir)
def is_chief():
schedules = ["train", "train_and_evaluate"]
return FLAGS.worker_id == 0 and FLAGS.schedule in schedules
def session_config():
"""The TensorFlow Session config to use."""
graph_options = tf.GraphOptions(optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1, do_function_inlining=False))
if FLAGS.experimental_optimize_placement:
rewrite_options = tf.RewriterConfig(optimize_tensor_layout=True)
rewrite_options.optimizers.append("pruning")
rewrite_options.optimizers.append("constfold")
rewrite_options.optimizers.append("layout")
graph_options = tf.GraphOptions(
rewrite_options=rewrite_options, infer_shapes=True)
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=FLAGS.worker_gpu_memory_fraction)
config = tf.ConfigProto(
allow_soft_placement=True,
graph_options=graph_options,
gpu_options=gpu_options,
log_device_placement=FLAGS.log_device_placement)
return config
| |
#!/usr/local/bin/python
"""
cryptography_utilities.py
@author Elliot and Erica
"""
import random
BYTE_LENGTH = 8
def decimal_to_binary(decimal):
"""Convert an integer into a binary string. E.g. 5 -> '101'."""
return format(decimal, 'b')
def binary_to_decimal(binary):
"""Convert a binary string into an integer. E.g. '101' -> 5."""
return int(binary, 2)
def hex_to_binary(hex):
"""Convert a hexadecimal string into a binary string. E.g. 'A1'
-> '10100001'.
"""
return decimal_to_binary(int(hex, 16))
def string_to_binary(string):
"""Coerce a string of text into a binary string."""
return ''.join([left_pad(decimal_to_binary(ord(char)), size=8)
for char in string])
def binary_to_string(binary):
"""Coerce a binary string into a string of text."""
return ''.join([chr(binary_to_decimal(binary[place:place + 8]))
for place in xrange(0, len(binary), 8)])
def file_to_binary(path):
"""Open a file and dump the contents into a binary string."""
with open(path, 'r') as f:
return string_to_binary(f.read())
def binary_to_file(text, path):
"""Write a binary string into a file as ASCII text."""
with open(path, 'w') as f:
f.write(binary_to_string(text))
def left_pad(string, size):
"""Add zeros to the front of a string to reach a certain length."""
return string.zfill(size)
def right_pad(string, size):
"""Add zeros to the end of a string to reach a certain length."""
return string.ljust(size, '0')
def wrap_bits_left(binary, amount):
"""Move the characters of the binary string to the left. Bits will
be wrapped. E.g. shift_bits('1011', 1) -> '0111'.
"""
return ''.join([binary[(place + amount) % len(binary)]
for place in range(len(binary))])
def wrap_bits_right(binary, amount):
"""Move the characters of the binary string to the left. Bits will
be wrapped. E.g. shift_bits('1011', 1) -> '0111'.
"""
return ''.join([binary[(place - amount) % len(binary)]
for place in range(len(binary))])
def shift_bits_left(binary, amount):
"""Add a specific number of zeroes to the end of a binary string."""
return right_pad(binary[amount:], len(binary))
def shift_bits_right(binary, amount):
"""Move the bits of a binary string to the right while maintaining
the sign bit on the left.
"""
sign_bit = binary[0]
return (sign_bit * amount) + binary[:-amount]
def bitwise_operation(operation, binaries):
"""Generally apply a function to a list of binary strings. The
operation should take two bit characters as input and output a
single bit character.
"""
final_length = min(map(len, binaries))
return ''.join(reduce(operation,
map(lambda binary: binary[index],
binaries))
for index in range(final_length))
def bitwise_xor(*binaries):
"""Perform an XOR with the bits of any number of binary strings. The
output's final length is equal to the shortest binary string.
"""
def bit_xor(bit1, bit2):
return '1' if int(bit1) != int(bit2) else '0'
return bitwise_operation(bit_xor, binaries)
def bitwise_and(*binaries):
"""Perform an AND with the bits of any number of binary strings. The
output's final length is equal to the shortest binary string.
"""
def bit_and(bit1, bit2):
return '1' if int(bit1) & int(bit2) else '0'
return bitwise_operation(bit_and, binaries)
def bitwise_or(*binaries):
"""Perform an OR with the bits of any number of binary strings. The
output's final length is equal to the shortest binary string.
"""
def bit_or(bit1, bit2):
return '1' if int(bit1) | int(bit2) else '0'
return bitwise_operation(bit_or, binaries)
def bitwise_not(binary):
"""Perform a unary NOT operation on the bits of a binary string."""
return ''.join('1' if bit == '0' else '0' for bit in binary)
def pad_plaintext(text, block_size=64):
"""Make the length of the text evenly divisible by the block size by
potentially padding with zeroes. The last byte of the result denotes
the number of bytes added.
"""
padding_amount = block_size - (len(text) % block_size)
return text + left_pad(decimal_to_binary(padding_amount / BYTE_LENGTH),
padding_amount)
def unpad_plaintext(text):
"""Remove padding bits. The last byte of the text should indicate
the number of bits to get rid of.
"""
padding_amount = binary_to_decimal(text[-BYTE_LENGTH:])
return text[:-(padding_amount * BYTE_LENGTH)]
def block_split(text, block_size=64):
"""Divide a string into a list of substrings.
PRECONDITION: text % block_size == 0"""
return [text[index:index + block_size]
for index in xrange(0, len(text), block_size)]
def rotate(list, places):
"""Shift the elements in a list. A positive place will move the list
to the left, a negative place to the right."""
return list[places:] + list[:places]
def fermat_test(n):
"""Statistically test the primality of a number using the Fermat
algorithm.
"""
return (2**(n - 1) % n) == 1
def miller_rabin_test(n):
"""Statistically test the primality of a number using the Miller-Rabin
algorithm.
"""
k, m = 0, n - 1
while True:
if m % 2 != 0:
break
else:
k += 1
m /= 2
b = 2**m % n
if (b - n) == -1 or b == 1:
return True
b = b**2 % n
if (b - n) == -1:
return True
for _ in range(2, k):
b = b**2 % n
if (b - n) == -1:
return True
if b == 1:
return False
return False
def primep(n):
"""Combine both the Fermat and Miller-Rabin primality tests into a
single function. The predicate should indicate primality with a high
likelihood of success.
"""
return fermat_test(n) and miller_rabin_test(n)
def random_number(bits):
"""Generate a random integer that will cleanly fit in a number of bits."""
max, min = 2**bits - 1, 2**(bits - 1)
return random.randint(min, max)
def random_prime(bits):
"""Generate a random prime that will cleanly fit in a number of bits."""
while True:
n = random_number(bits)
if n % 2 == 0 or n % 3 == 0:
continue
if primep(n):
return n
def coprimep(x, y):
return gcd(x, y) == 1
def gcd(a, b):
"""Greatest common divisor of x and y computed with the Euclidean
algorithm.
"""
return gcd(b, a % b) if b else a
def extended_gcd(a, b):
"""Extended Euclidean algorithm. Provides a tuple of (g, x, y)
from ax + by = gcd(a, b). Function taken from Wikibooks.
"""
if a == 0:
return (b, 0, 1)
else:
g, y, x = extended_gcd(b % a, a)
return (g, x - (b // a) * y, y)
def modular_inverse(x, modulus):
"""Compute x^-1 (mod modulus)."""
return extended_gcd(x, modulus)[1]
def modular_sqrt(x, modulus):
"""Compute sqrt(x) (mod modulus). The modulus must be a prime
number.
"""
potential_sqrt = pow(x, ((modulus + 1) / 4), modulus)
if (potential_sqrt**2 % modulus) == (x % modulus):
return int(potential_sqrt)
else:
raise AssertionError('Composite modulus')
def random_relative_prime(prime, bits):
"""Find a number relatively prime (gcd of 1) number randomly."""
max, min = 2**bits - 1, 2**(bits - 1)
while True:
random_int = random.randint(min, max)
if gcd(random_int, prime) == 1:
return random_int
def group_exponentiation(base, power, n):
"""Raise a number to a given power in the GF(2^8) finite field."""
bits = decimal_to_binary(power)
result = 1
for bit_index, bit in zip(range(len(bits) - 1, -1, -1), bits):
if bit == '1':
result *= base**(2**bit_index) % n
return result % n
| |
# Copyright 2009 Shikhar Bhushan
# Copyright 2011 Leonidas Poulopoulos
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module is a thin layer of abstraction around the library.
It exposes all core functionality.
"""
from ncclient import capabilities
from ncclient import operations
from ncclient import transport
import six
import logging
from ncclient.xml_ import *
logger = logging.getLogger('ncclient.manager')
OPERATIONS = {
"get": operations.Get,
"get_config": operations.GetConfig,
"get_schema": operations.GetSchema,
"dispatch": operations.Dispatch,
"edit_config": operations.EditConfig,
"copy_config": operations.CopyConfig,
"validate": operations.Validate,
"commit": operations.Commit,
"discard_changes": operations.DiscardChanges,
"delete_config": operations.DeleteConfig,
"lock": operations.Lock,
"unlock": operations.Unlock,
"create_subscription": operations.CreateSubscription,
"close_session": operations.CloseSession,
"kill_session": operations.KillSession,
"poweroff_machine": operations.PoweroffMachine,
"reboot_machine": operations.RebootMachine,
}
"""
Dictionary of base method names and corresponding :class:`~ncclient.operations.RPC`
subclasses. It is used to lookup operations, e.g. `get_config` is mapped to
:class:`~ncclient.operations.GetConfig`. It is thus possible to add additional
operations to the :class:`Manager` API.
"""
VENDOR_OPERATIONS = {}
def make_device_handler(device_params):
"""
Create a device handler object that provides device specific parameters and
functions, which are called in various places throughout our code.
If no device_params are defined or the "name" in the parameter dict is not
known then a default handler will be returned.
"""
if device_params is None:
device_params = {}
device_name = device_params.get("name", "default")
# Attempt to import device handler class. All device handlers are
# in a module called "ncclient.devices.<devicename>" and in a class named
# "<devicename>DeviceHandler", with the first letter capitalized.
class_name = "%sDeviceHandler" % device_name.capitalize()
devices_module_name = "ncclient.devices.%s" % device_name
dev_module_obj = __import__(devices_module_name)
handler_module_obj = getattr(getattr(dev_module_obj, "devices"), device_name)
class_obj = getattr(handler_module_obj, class_name)
handler_obj = class_obj(device_params)
return handler_obj
def connect_ssh(*args, **kwds):
"""
Initialize a :class:`Manager` over the SSH transport.
For documentation of arguments see :meth:`ncclient.transport.SSHSession.connect`.
The underlying :class:`ncclient.transport.SSHSession` is created with
:data:`CAPABILITIES`. It is first instructed to
:meth:`~ncclient.transport.SSHSession.load_known_hosts` and then
all the provided arguments are passed directly to its implementation
of :meth:`~ncclient.transport.SSHSession.connect`.
To invoke advanced vendor related operation add device_params =
{'name':'<vendor_alias>'} in connection paramerers. For the time,
'junos' and 'nexus' are supported for Juniper and Cisco Nexus respectively.
"""
# Extract device parameter dict, if it was passed into this function. Need to
# remove it from kwds, since the session.connect() doesn't like extra stuff in
# there.
if "device_params" in kwds:
device_params = kwds["device_params"]
del kwds["device_params"]
else:
device_params = None
device_handler = make_device_handler(device_params)
device_handler.add_additional_ssh_connect_params(kwds)
global VENDOR_OPERATIONS
VENDOR_OPERATIONS.update(device_handler.add_additional_operations())
session = transport.SSHSession(device_handler)
if "hostkey_verify" not in kwds or kwds["hostkey_verify"]:
session.load_known_hosts()
try:
session.connect(*args, **kwds)
except Exception as ex:
if session.transport:
session.close()
raise
return Manager(session, device_handler, **kwds)
def connect_ioproc(*args, **kwds):
if "device_params" in kwds:
device_params = kwds["device_params"]
del kwds["device_params"]
import_string = 'ncclient.transport.third_party.'
import_string += device_params['name'] + '.ioproc'
third_party_import = __import__(import_string, fromlist=['IOProc'])
else:
device_params = None
device_handler = make_device_handler(device_params)
global VENDOR_OPERATIONS
VENDOR_OPERATIONS.update(device_handler.add_additional_operations())
session = third_party_import.IOProc(device_handler)
session.connect()
return Manager(session, device_handler, **kwds)
def connect(*args, **kwds):
if "host" in kwds:
host = kwds["host"]
device_params = kwds.get('device_params', {})
if host == 'localhost' and device_params.get('name') == 'junos' \
and device_params.get('local'):
return connect_ioproc(*args, **kwds)
else:
return connect_ssh(*args, **kwds)
class OpExecutor(type):
def __new__(cls, name, bases, attrs):
def make_wrapper(op_cls):
def wrapper(self, *args, **kwds):
return self.execute(op_cls, *args, **kwds)
wrapper.__doc__ = op_cls.request.__doc__
return wrapper
for op_name, op_cls in six.iteritems(OPERATIONS):
attrs[op_name] = make_wrapper(op_cls)
return super(OpExecutor, cls).__new__(cls, name, bases, attrs)
def __call__(cls, *args, **kwargs):
def make_wrapper(op_cls):
def wrapper(self, *args, **kwds):
return self.execute(op_cls, *args, **kwds)
wrapper.__doc__ = op_cls.request.__doc__
return wrapper
if VENDOR_OPERATIONS:
for op_name, op_cls in six.iteritems(VENDOR_OPERATIONS):
setattr(cls, op_name, make_wrapper(op_cls))
return super(OpExecutor, cls).__call__(*args, **kwargs)
class Manager(six.with_metaclass(OpExecutor, object)):
"""
For details on the expected behavior of the operations and their
parameters refer to :rfc:`4741`.
Manager instances are also context managers so you can use it like this::
with manager.connect("host") as m:
# do your stuff
... or like this::
m = manager.connect("host")
try:
# do your stuff
finally:
m.close_session()
"""
# __metaclass__ = OpExecutor
def __init__(self, session, device_handler, timeout=30, *args, **kwargs):
self._session = session
self._async_mode = False
self._timeout = timeout
self._raise_mode = operations.RaiseMode.ALL
self._device_handler = device_handler
def __enter__(self):
return self
def __exit__(self, *args):
self.close_session()
return False
def __set_timeout(self, timeout):
self._timeout = timeout
def __set_async_mode(self, mode):
self._async_mode = mode
def __set_raise_mode(self, mode):
assert(mode in (operations.RaiseMode.NONE, operations.RaiseMode.ERRORS, operations.RaiseMode.ALL))
self._raise_mode = mode
def execute(self, cls, *args, **kwds):
return cls(self._session,
device_handler=self._device_handler,
async=self._async_mode,
timeout=self._timeout,
raise_mode=self._raise_mode).request(*args, **kwds)
def locked(self, target):
"""Returns a context manager for a lock on a datastore, where
*target* is the name of the configuration datastore to lock, e.g.::
with m.locked("running"):
# do your stuff
... instead of::
m.lock("running")
try:
# do your stuff
finally:
m.unlock("running")
"""
return operations.LockContext(self._session, self._device_handler, target)
def scp(self):
return self._session.scp()
def session(self):
raise NotImplementedError
def __getattr__(self, method):
"""Parse args/kwargs correctly in order to build XML element"""
def _missing(*args, **kwargs):
m = method.replace('_', '-')
root = new_ele(m)
if args:
for arg in args:
sub_ele(root, arg)
r = self.rpc(root)
return r
return _missing
def take_notification(self, block=True, timeout=None):
"""Attempt to retrieve one notification from the queue of received
notifications.
If block is True, the call will wait until a notification is
received.
If timeout is a number greater than 0, the call will wait that
many seconds to receive a notification before timing out.
If there is no notification available when block is False or
when the timeout has elapse, None will be returned.
Otherwise a :class:`~ncclient.operations.notify.Notification`
object will be returned.
"""
return self._session.take_notification(block, timeout)
@property
def client_capabilities(self):
""":class:`~ncclient.capabilities.Capabilities` object representing
the client's capabilities."""
return self._session._client_capabilities
@property
def server_capabilities(self):
""":class:`~ncclient.capabilities.Capabilities` object representing
the server's capabilities."""
return self._session._server_capabilities
@property
def channel_id(self):
return self._session._channel_id
@property
def channel_name(self):
return self._session._channel_name
@property
def session_id(self):
"""`session-id` assigned by the NETCONF server."""
return self._session.id
@property
def connected(self):
"""Whether currently connected to the NETCONF server."""
return self._session.connected
async_mode = property(fget=lambda self: self._async_mode,
fset=__set_async_mode)
"""Specify whether operations are executed asynchronously (`True`) or
synchronously (`False`) (the default)."""
timeout = property(fget=lambda self: self._timeout, fset=__set_timeout)
"""Specify the timeout for synchronous RPC requests."""
raise_mode = property(fget=lambda self: self._raise_mode,
fset=__set_raise_mode)
"""Specify which errors are raised as :exc:`~ncclient.operations.RPCError`
exceptions. Valid values are the constants defined in
:class:`~ncclient.operations.RaiseMode`.
The default value is :attr:`~ncclient.operations.RaiseMode.ALL`."""
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Generate MXNet implementation of CapsNet
Reference 1: https://www.cs.toronto.edu/~fritz/absps/transauto6.pdf
Reference 2: https://arxiv.org/pdf/1710.09829.pdf
"""
import os
import re
import gzip
import struct
import numpy as np
import scipy.ndimage as ndi
import mxnet as mx
from capsulelayers import primary_caps, CapsuleLayer
from mxboard import SummaryWriter
def margin_loss(y_true, y_pred):
loss = y_true * mx.sym.square(mx.sym.maximum(0., 0.9 - y_pred)) +\
0.5 * (1 - y_true) * mx.sym.square(mx.sym.maximum(0., y_pred - 0.1))
return mx.sym.mean(data=mx.sym.sum(loss, 1))
def capsnet(batch_size, n_class, num_routing, recon_loss_weight):
"""Create CapsNet"""
# data.shape = [batch_size, 1, 28, 28]
data = mx.sym.Variable('data')
input_shape = (1, 28, 28)
# Conv2D layer
# net.shape = [batch_size, 256, 20, 20]
conv1 = mx.sym.Convolution(data=data,
num_filter=256,
kernel=(9, 9),
layout='NCHW',
name='conv1')
conv1 = mx.sym.Activation(data=conv1, act_type='relu', name='conv1_act')
# net.shape = [batch_size, 256, 6, 6]
primarycaps = primary_caps(data=conv1,
dim_vector=8,
n_channels=32,
kernel=(9, 9),
strides=[2, 2],
name='primarycaps')
primarycaps.infer_shape(data=(batch_size, 1, 28, 28))
# CapsuleLayer
kernel_initializer = mx.init.Xavier(rnd_type='uniform', factor_type='avg', magnitude=3)
bias_initializer = mx.init.Zero()
digitcaps = CapsuleLayer(num_capsule=10,
dim_vector=16,
batch_size=batch_size,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
num_routing=num_routing)(primarycaps)
# out_caps : (batch_size, 10)
out_caps = mx.sym.sqrt(data=mx.sym.sum(mx.sym.square(digitcaps), 2))
out_caps.infer_shape(data=(batch_size, 1, 28, 28))
y = mx.sym.Variable('softmax_label', shape=(batch_size,))
y_onehot = mx.sym.one_hot(y, n_class)
y_reshaped = mx.sym.Reshape(data=y_onehot, shape=(batch_size, -4, n_class, -1))
y_reshaped.infer_shape(softmax_label=(batch_size,))
# inputs_masked : (batch_size, 16)
inputs_masked = mx.sym.linalg_gemm2(y_reshaped, digitcaps, transpose_a=True)
inputs_masked = mx.sym.Reshape(data=inputs_masked, shape=(-3, 0))
x_recon = mx.sym.FullyConnected(data=inputs_masked, num_hidden=512, name='x_recon')
x_recon = mx.sym.Activation(data=x_recon, act_type='relu', name='x_recon_act')
x_recon = mx.sym.FullyConnected(data=x_recon, num_hidden=1024, name='x_recon2')
x_recon = mx.sym.Activation(data=x_recon, act_type='relu', name='x_recon_act2')
x_recon = mx.sym.FullyConnected(data=x_recon, num_hidden=np.prod(input_shape), name='x_recon3')
x_recon = mx.sym.Activation(data=x_recon, act_type='sigmoid', name='x_recon_act3')
data_flatten = mx.sym.flatten(data=data)
squared_error = mx.sym.square(x_recon-data_flatten)
recon_error = mx.sym.mean(squared_error)
recon_error_stopped = recon_error
recon_error_stopped = mx.sym.BlockGrad(recon_error_stopped)
loss = mx.symbol.MakeLoss((1-recon_loss_weight)*margin_loss(y_onehot, out_caps)+recon_loss_weight*recon_error)
out_caps_blocked = out_caps
out_caps_blocked = mx.sym.BlockGrad(out_caps_blocked)
return mx.sym.Group([out_caps_blocked, loss, recon_error_stopped])
def download_data(url, force_download=False):
fname = url.split("/")[-1]
if force_download or not os.path.exists(fname):
mx.test_utils.download(url, fname)
return fname
def read_data(label_url, image_url):
with gzip.open(download_data(label_url)) as flbl:
magic, num = struct.unpack(">II", flbl.read(8))
label = np.fromstring(flbl.read(), dtype=np.int8)
with gzip.open(download_data(image_url), 'rb') as fimg:
magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
image = np.fromstring(fimg.read(), dtype=np.uint8)
np.reshape(image, len(label), (rows, cols))
return label, image
def to4d(img):
return img.reshape(img.shape[0], 1, 28, 28).astype(np.float32)/255
class LossMetric(mx.metric.EvalMetric):
"""Evaluate the loss function"""
def __init__(self, batch_size, num_gpus):
super(LossMetric, self).__init__('LossMetric')
self.batch_size = batch_size
self.num_gpu = num_gpus
self.sum_metric = 0
self.num_inst = 0
self.loss = 0.0
self.batch_sum_metric = 0
self.batch_num_inst = 0
self.batch_loss = 0.0
self.recon_loss = 0.0
self.n_batch = 0
def update(self, labels, preds):
"""Update the hyper-parameters and loss of CapsNet"""
batch_sum_metric = 0
batch_num_inst = 0
for label, pred_outcaps in zip(labels[0], preds[0]):
label_np = int(label.asnumpy())
pred_label = int(np.argmax(pred_outcaps.asnumpy()))
batch_sum_metric += int(label_np == pred_label)
batch_num_inst += 1
batch_loss = preds[1].asnumpy()
recon_loss = preds[2].asnumpy()
self.sum_metric += batch_sum_metric
self.num_inst += batch_num_inst
self.loss += batch_loss
self.recon_loss += recon_loss
self.batch_sum_metric = batch_sum_metric
self.batch_num_inst = batch_num_inst
self.batch_loss = batch_loss
self.n_batch += 1
def get_name_value(self):
acc = float(self.sum_metric)/float(self.num_inst)
mean_loss = self.loss / float(self.n_batch)
mean_recon_loss = self.recon_loss / float(self.n_batch)
return acc, mean_loss, mean_recon_loss
def get_batch_log(self, n_batch):
print("n_batch :"+str(n_batch)+" batch_acc:" +
str(float(self.batch_sum_metric) / float(self.batch_num_inst)) +
' batch_loss:' + str(float(self.batch_loss)/float(self.batch_num_inst)))
self.batch_sum_metric = 0
self.batch_num_inst = 0
self.batch_loss = 0.0
def reset(self):
self.sum_metric = 0
self.num_inst = 0
self.loss = 0.0
self.recon_loss = 0.0
self.n_batch = 0
class SimpleLRScheduler(mx.lr_scheduler.LRScheduler):
"""A simple lr schedule that simply return `dynamic_lr`. We will set `dynamic_lr`
dynamically based on performance on the validation set.
"""
def __init__(self, learning_rate=0.001):
super(SimpleLRScheduler, self).__init__()
self.learning_rate = learning_rate
def __call__(self, num_update):
return self.learning_rate
def do_training(num_epoch, optimizer, kvstore, learning_rate, model_prefix, decay):
"""Perform CapsNet training"""
summary_writer = SummaryWriter(args.tblog_dir)
lr_scheduler = SimpleLRScheduler(learning_rate)
optimizer_params = {'lr_scheduler': lr_scheduler}
module.init_params()
module.init_optimizer(kvstore=kvstore,
optimizer=optimizer,
optimizer_params=optimizer_params)
n_epoch = 0
while True:
if n_epoch >= num_epoch:
break
train_iter.reset()
val_iter.reset()
loss_metric.reset()
for n_batch, data_batch in enumerate(train_iter):
module.forward_backward(data_batch)
module.update()
module.update_metric(loss_metric, data_batch.label)
loss_metric.get_batch_log(n_batch)
train_acc, train_loss, train_recon_err = loss_metric.get_name_value()
loss_metric.reset()
for n_batch, data_batch in enumerate(val_iter):
module.forward(data_batch)
module.update_metric(loss_metric, data_batch.label)
loss_metric.get_batch_log(n_batch)
val_acc, val_loss, val_recon_err = loss_metric.get_name_value()
summary_writer.add_scalar('train_acc', train_acc, n_epoch)
summary_writer.add_scalar('train_loss', train_loss, n_epoch)
summary_writer.add_scalar('train_recon_err', train_recon_err, n_epoch)
summary_writer.add_scalar('val_acc', val_acc, n_epoch)
summary_writer.add_scalar('val_loss', val_loss, n_epoch)
summary_writer.add_scalar('val_recon_err', val_recon_err, n_epoch)
print('Epoch[%d] train acc: %.4f loss: %.6f recon_err: %.6f' % (n_epoch, train_acc, train_loss,
train_recon_err))
print('Epoch[%d] val acc: %.4f loss: %.6f recon_err: %.6f' % (n_epoch, val_acc, val_loss, val_recon_err))
print('SAVE CHECKPOINT')
module.save_checkpoint(prefix=model_prefix, epoch=n_epoch)
n_epoch += 1
lr_scheduler.learning_rate = learning_rate * (decay ** n_epoch)
def apply_transform(x, transform_matrix, fill_mode='nearest', cval=0.):
"""Apply transform on nd.array"""
x = np.rollaxis(x, 0, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=0,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, 0 + 1)
return x
def random_shift(x, width_shift_fraction, height_shift_fraction):
tx = np.random.uniform(-height_shift_fraction, height_shift_fraction) * x.shape[2]
ty = np.random.uniform(-width_shift_fraction, width_shift_fraction) * x.shape[1]
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
x = apply_transform(x, shift_matrix, 'nearest')
return x
def _shuffle(data, idx):
"""Shuffle the data."""
shuffle_data = []
for idx_k, idx_v in data:
shuffle_data.append((idx_k, mx.ndarray.array(idx_v.asnumpy()[idx], idx_v.context)))
return shuffle_data
class MNISTCustomIter(mx.io.NDArrayIter):
"""Create custom iterator of mnist dataset"""
def __init__(self, data, label, batch_size, shuffle):
self.data = data
self.label = label
self.batch_size = batch_size
self.shuffle = shuffle
self.cursor = None
def reset(self):
"""Reset class MNISTCustomIter(mx.io.NDArrayIter):"""
# shuffle data
if self.is_train:
np.random.shuffle(self.idx)
self.data = _shuffle(self.data, self.idx)
self.label = _shuffle(self.label, self.idx)
if self.last_batch_handle == 'roll_over' and self.cursor > self.num_data:
self.cursor = -self.batch_size + (self.cursor % self.num_data) % self.batch_size
else:
self.cursor = -self.batch_size
def set_is_train(self, is_train):
"""Set training flag"""
self.is_train = is_train
def next(self):
"""Generate next of iterator"""
if self.iter_next():
if self.is_train:
data_raw_list = self.getdata()
data_shifted = []
for data_raw in data_raw_list[0]:
data_shifted.append(random_shift(data_raw.asnumpy(), 0.1, 0.1))
return mx.io.DataBatch(data=[mx.nd.array(data_shifted)], label=self.getlabel(),
pad=self.getpad(), index=None)
else:
return mx.io.DataBatch(data=self.getdata(), label=self.getlabel(), pad=self.getpad(), index=None)
else:
raise StopIteration
if __name__ == "__main__":
# Read mnist data set
path = 'http://yann.lecun.com/exdb/mnist/'
(train_lbl, train_img) = read_data(path + 'train-labels-idx1-ubyte.gz', path + 'train-images-idx3-ubyte.gz')
(val_lbl, val_img) = read_data(path + 't10k-labels-idx1-ubyte.gz', path + 't10k-images-idx3-ubyte.gz')
# set batch size
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=100, type=int)
parser.add_argument('--devices', default='gpu0', type=str)
parser.add_argument('--num_epoch', default=100, type=int)
parser.add_argument('--lr', default=0.001, type=float)
parser.add_argument('--num_routing', default=3, type=int)
parser.add_argument('--model_prefix', default='capsnet', type=str)
parser.add_argument('--decay', default=0.9, type=float)
parser.add_argument('--tblog_dir', default='tblog', type=str)
parser.add_argument('--recon_loss_weight', default=0.392, type=float)
args = parser.parse_args()
for k, v in sorted(vars(args).items()):
print("{0}: {1}".format(k, v))
contexts = re.split(r'\W+', args.devices)
for i, ctx in enumerate(contexts):
if ctx[:3] == 'gpu':
contexts[i] = mx.context.gpu(int(ctx[3:]))
else:
contexts[i] = mx.context.cpu()
num_gpu = len(contexts)
if args.batch_size % num_gpu != 0:
raise Exception('num_gpu should be positive divisor of batch_size')
# generate train_iter, val_iter
train_iter = MNISTCustomIter(data=to4d(train_img), label=train_lbl, batch_size=int(args.batch_size), shuffle=True)
train_iter.set_is_train(True)
val_iter = MNISTCustomIter(data=to4d(val_img), label=val_lbl, batch_size=int(args.batch_size), shuffle=True)
val_iter.set_is_train(False)
# define capsnet
final_net = capsnet(batch_size=int(args.batch_size/num_gpu),
n_class=10,
num_routing=args.num_routing,
recon_loss_weight=args.recon_loss_weight)
# set metric
loss_metric = LossMetric(args.batch_size/num_gpu, 1)
# run model
module = mx.mod.Module(symbol=final_net, context=contexts, data_names=('data',), label_names=('softmax_label',))
module.bind(data_shapes=train_iter.provide_data,
label_shapes=val_iter.provide_label,
for_training=True)
do_training(num_epoch=args.num_epoch, optimizer='adam', kvstore='device', learning_rate=args.lr,
model_prefix=args.model_prefix, decay=args.decay)
| |
"""Test the VLC media player Telnet config flow."""
from __future__ import annotations
from typing import Any
from unittest.mock import patch
from aiovlc.exceptions import AuthError, ConnectError
import pytest
from homeassistant import config_entries
from homeassistant.components.vlc_telnet.const import DOMAIN
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from tests.common import MockConfigEntry
# mypy: allow-untyped-calls
@pytest.mark.parametrize(
"input_data, entry_data",
[
(
{
"password": "test-password",
"host": "1.1.1.1",
"port": 8888,
},
{
"password": "test-password",
"host": "1.1.1.1",
"port": 8888,
},
),
(
{
"password": "test-password",
},
{
"password": "test-password",
"host": "localhost",
"port": 4212,
},
),
],
)
async def test_user_flow(
hass: HomeAssistant, input_data: dict[str, Any], entry_data: dict[str, Any]
) -> None:
"""Test successful user flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with patch("homeassistant.components.vlc_telnet.config_flow.Client.connect"), patch(
"homeassistant.components.vlc_telnet.config_flow.Client.login"
), patch(
"homeassistant.components.vlc_telnet.config_flow.Client.disconnect"
), patch(
"homeassistant.components.vlc_telnet.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
input_data,
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == entry_data["host"]
assert result["data"] == entry_data
assert len(mock_setup_entry.mock_calls) == 1
async def test_import_flow(hass: HomeAssistant) -> None:
"""Test successful import flow."""
test_data = {
"password": "test-password",
"host": "1.1.1.1",
"port": 8888,
"name": "custom name",
}
with patch("homeassistant.components.vlc_telnet.config_flow.Client.connect"), patch(
"homeassistant.components.vlc_telnet.config_flow.Client.login"
), patch(
"homeassistant.components.vlc_telnet.config_flow.Client.disconnect"
), patch(
"homeassistant.components.vlc_telnet.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=test_data,
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == test_data["name"]
assert result["data"] == test_data
assert len(mock_setup_entry.mock_calls) == 1
@pytest.mark.parametrize(
"source", [config_entries.SOURCE_USER, config_entries.SOURCE_IMPORT]
)
async def test_abort_already_configured(hass: HomeAssistant, source: str) -> None:
"""Test we handle already configured host."""
entry_data = {
"password": "test-password",
"host": "1.1.1.1",
"port": 8888,
"name": "custom name",
}
entry = MockConfigEntry(domain=DOMAIN, data=entry_data)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": source},
data=entry_data,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
@pytest.mark.parametrize(
"source", [config_entries.SOURCE_USER, config_entries.SOURCE_IMPORT]
)
@pytest.mark.parametrize(
"error, connect_side_effect, login_side_effect",
[
("invalid_auth", None, AuthError),
("cannot_connect", ConnectError, None),
("unknown", Exception, None),
],
)
async def test_errors(
hass: HomeAssistant,
error: str,
connect_side_effect: Exception | None,
login_side_effect: Exception | None,
source: str,
) -> None:
"""Test we handle form errors."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": source}
)
with patch(
"homeassistant.components.vlc_telnet.config_flow.Client.connect",
side_effect=connect_side_effect,
), patch(
"homeassistant.components.vlc_telnet.config_flow.Client.login",
side_effect=login_side_effect,
), patch(
"homeassistant.components.vlc_telnet.config_flow.Client.disconnect"
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"password": "test-password"},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": error}
async def test_reauth_flow(hass: HomeAssistant) -> None:
"""Test successful reauth flow."""
entry_data = {
"password": "old-password",
"host": "1.1.1.1",
"port": 8888,
"name": "custom name",
}
entry = MockConfigEntry(domain=DOMAIN, data=entry_data)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"entry_id": entry.entry_id,
"unique_id": entry.unique_id,
},
data=entry_data,
)
with patch("homeassistant.components.vlc_telnet.config_flow.Client.connect"), patch(
"homeassistant.components.vlc_telnet.config_flow.Client.login"
), patch(
"homeassistant.components.vlc_telnet.config_flow.Client.disconnect"
), patch(
"homeassistant.components.vlc_telnet.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"password": "new-password"},
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "reauth_successful"
assert len(mock_setup_entry.mock_calls) == 1
assert dict(entry.data) == {**entry_data, "password": "new-password"}
@pytest.mark.parametrize(
"error, connect_side_effect, login_side_effect",
[
("invalid_auth", None, AuthError),
("cannot_connect", ConnectError, None),
("unknown", Exception, None),
],
)
async def test_reauth_errors(
hass: HomeAssistant,
error: str,
connect_side_effect: Exception | None,
login_side_effect: Exception | None,
) -> None:
"""Test we handle reauth errors."""
entry_data = {
"password": "old-password",
"host": "1.1.1.1",
"port": 8888,
"name": "custom name",
}
entry = MockConfigEntry(domain=DOMAIN, data=entry_data)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"entry_id": entry.entry_id,
"unique_id": entry.unique_id,
},
data=entry_data,
)
with patch(
"homeassistant.components.vlc_telnet.config_flow.Client.connect",
side_effect=connect_side_effect,
), patch(
"homeassistant.components.vlc_telnet.config_flow.Client.login",
side_effect=login_side_effect,
), patch(
"homeassistant.components.vlc_telnet.config_flow.Client.disconnect"
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"password": "test-password"},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": error}
async def test_hassio_flow(hass: HomeAssistant) -> None:
"""Test successful hassio flow."""
with patch("homeassistant.components.vlc_telnet.config_flow.Client.connect"), patch(
"homeassistant.components.vlc_telnet.config_flow.Client.login"
), patch(
"homeassistant.components.vlc_telnet.config_flow.Client.disconnect"
), patch(
"homeassistant.components.vlc_telnet.async_setup_entry",
return_value=True,
) as mock_setup_entry:
test_data = {
"password": "test-password",
"host": "1.1.1.1",
"port": 8888,
"name": "custom name",
"addon": "vlc",
}
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HASSIO},
data=test_data,
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == test_data["name"]
assert result2["data"] == test_data
assert len(mock_setup_entry.mock_calls) == 1
async def test_hassio_already_configured(hass: HomeAssistant) -> None:
"""Test successful hassio flow."""
entry_data = {
"password": "test-password",
"host": "1.1.1.1",
"port": 8888,
"name": "custom name",
"addon": "vlc",
}
entry = MockConfigEntry(domain=DOMAIN, data=entry_data, unique_id="hassio")
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HASSIO},
data=entry_data,
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_ABORT
@pytest.mark.parametrize(
"error, connect_side_effect, login_side_effect",
[
("invalid_auth", None, AuthError),
("cannot_connect", ConnectError, None),
("unknown", Exception, None),
],
)
async def test_hassio_errors(
hass: HomeAssistant,
error: str,
connect_side_effect: Exception | None,
login_side_effect: Exception | None,
) -> None:
"""Test we handle hassio errors."""
with patch(
"homeassistant.components.vlc_telnet.config_flow.Client.connect",
side_effect=connect_side_effect,
), patch(
"homeassistant.components.vlc_telnet.config_flow.Client.login",
side_effect=login_side_effect,
), patch(
"homeassistant.components.vlc_telnet.config_flow.Client.disconnect"
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HASSIO},
data={
"password": "test-password",
"host": "1.1.1.1",
"port": 8888,
"name": "custom name",
"addon": "vlc",
},
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_FORM
result2 = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result2["type"] == RESULT_TYPE_ABORT
assert result2["reason"] == error
| |
# -*- test-case-name: xquotient.test.test_inbox -*-
import itertools
from datetime import timedelta
from zope.interface import implements
from twisted.python.components import registerAdapter
from twisted.internet import defer
from twisted.internet.task import coiterate
from nevow import tags as T, inevow, athena
from nevow.page import renderer
from nevow.athena import expose, LiveElement
from axiom.item import Item, transacted, declareLegacyItem
from axiom import tags
from axiom import attributes
from axiom.upgrade import registerUpgrader, registerAttributeCopyingUpgrader
from axiom.dependency import dependsOn, installOn, installedOn, _DependencyConnector
from xmantissa import ixmantissa, webnav, people, webtheme
from xmantissa.webapp import PrivateApplication
from xmantissa.fragmentutils import dictFillSlots
from xmantissa.publicresource import getLoader
from xmantissa.scrolltable import Scrollable, ScrollableView
from xquotient import renderers, spam
from xquotient.filter import Focus
from xquotient.exmess import (Message, getMessageSources, MailboxSelector,
MessageActions, ActionlessMessageDetail)
from xquotient.exmess import (READ_STATUS, UNREAD_STATUS, CLEAN_STATUS,
INBOX_STATUS, ARCHIVE_STATUS, DEFERRED_STATUS,
OUTBOX_STATUS, BOUNCED_STATUS, SENT_STATUS,
SPAM_STATUS, TRASH_STATUS, DRAFT_STATUS,
FOCUS_STATUS)
from xquotient.compose import Composer, ComposeFragment
from xquotient.mail import MessageSource, DeliveryAgent
from xquotient.quotientapp import QuotientPreferenceCollection, MessageDisplayPreferenceCollection
# Views that the user may select.
VIEWS = [FOCUS_STATUS, INBOX_STATUS, ARCHIVE_STATUS, u'all', DEFERRED_STATUS,
DRAFT_STATUS, OUTBOX_STATUS, BOUNCED_STATUS, SENT_STATUS, SPAM_STATUS,
TRASH_STATUS]
# The subset of all views that should use 'touch-once' message order; oldest
# messages first
TOUCH_ONCE_VIEWS = [INBOX_STATUS, FOCUS_STATUS]
def _viewSelectionToMailboxSelector(store, viewSelection):
"""
Convert a 'view selection' object, sent from the client, into a MailboxSelector
object which will be used to view the mailbox.
@param store: an L{axiom.store.Store} that contains some messages.
@param viewSelection: a dictionary with 4 keys: 'view', 'tag', 'person',
'account'. This dictionary represents the selections that users have
made in the 4-section 'complexity 3' filtering UI. Each key may have a
string value, or None. If the value is None, the user has selected
'All' for that key in the UI; if the value is a string, the user has
selected that string.
@return: a L{MailboxSelector} object.
"""
view, tag, personWebID, account = map(
viewSelection.__getitem__,
[u"view", u"tag", u"person", u"account"])
sq = MailboxSelector(store)
sq.setLimit(None)
if view in TOUCH_ONCE_VIEWS:
sq.setOldestFirst()
else:
sq.setNewestFirst()
if view == u'all':
view = CLEAN_STATUS
sq.refineByStatus(view) # 'view' is really a status! and the names
# even line up!
if tag is not None:
sq.refineByTag(tag)
if account is not None:
sq.refineBySource(account)
if personWebID is not None:
person = ixmantissa.IWebTranslator(store).fromWebID(personWebID)
sq.refineByPerson(person)
return sq
class Inbox(Item):
implements(ixmantissa.INavigableElement)
typeName = 'quotient_inbox'
schemaVersion = 6
powerupInterfaces = (ixmantissa.INavigableElement,)
privateApplication = dependsOn(PrivateApplication)
messageSource = dependsOn(MessageSource)
quotientPrefs = dependsOn(QuotientPreferenceCollection)
deliveryAgent = dependsOn(DeliveryAgent)
messageDisplayPrefs = dependsOn(MessageDisplayPreferenceCollection)
filter = dependsOn(spam.Filter)
focus = dependsOn(Focus)
# uiComplexity should be an integer between 1 and 3, where 1 is the least
# complex and 3 is the most complex. the value of this attribute
# determines what portions of the inbox UI will be visible each time it is
# loaded (and so should be updated each time the user changes the setting)
uiComplexity = attributes.integer(default=1)
# showMoreDetail is a boolean which indicates whether messages should be
# loaded with the "More Detail" pane expanded.
showMoreDetail = attributes.boolean(default=False)
def __init__(self, **kw):
super(Inbox, self).__init__(**kw)
def getTabs(self):
return [webnav.Tab('Mail', self.storeID, 0.75, children=
[webnav.Tab('Inbox', self.storeID, 0.4)],
authoritative=True)]
def getPeople(self):
"""
Find all of the people in C{self.store}, excluding
L{people.Organizer.storeOwnerPerson} if there is an L{people.Organizer}
in our store.
@return: some people.
@rtype: iterable of L{people.Person}.
"""
organizer = self.store.findUnique(people.Organizer, default=None)
if organizer is None:
return iter(())
return iter(self.store.query(
people.Person, sort=people.Person.name.ascending))
def getBaseComparison(self, viewSelection):
"""
Return an IComparison to be used as the basic restriction for a view
onto the mailbox with the given parameters.
@param viewSelection: a dictionary with 4 keys: 'view', 'tag', person',
'account'. This dictionary represents the selections that users have
made in the 4-section 'complexity 3' filtering UI. Each key may have a
string value, or None. If the value is None, the user has selected
'All' for that key in the UI; if the value is a string, the user has
selected that string.
@return: an IComparison which can be used to generate a query for
messages matching the selection represented by the viewSelection
criterea.
"""
return _viewSelectionToMailboxSelector(self.store,
viewSelection)._getComparison()
def getComparisonForBatchType(self, batchType, viewSelection):
"""
Return an IComparison to be used as the restriction for a particular
batch of messages from a view onto the mailbox with the given
parameters.
"""
sq = _viewSelectionToMailboxSelector(self.store, viewSelection)
if batchType in (UNREAD_STATUS, READ_STATUS):
sq.refineByStatus(batchType)
return sq._getComparison()
def messagesForBatchType(self, batchType, viewSelection, exclude=()):
"""
Return an iterable of L{exmess.Message} items which belong to the
specified batch.
@param batchType: A string defining a particular batch. For example,
C{"read"} or C{"unread"}.
@param exclude: messages to exclude from the batch selection.
defaults to no messages.
@type exclude: iterable of L{xquotient.exmess.Message}
@rtype: iterable
"""
it = self.store.query(
Message,
self.getComparisonForBatchType(
batchType, viewSelection)).paginate()
exclude = set(m.storeID for m in exclude)
return itertools.ifilter(lambda m: m.storeID not in exclude, it)
def action_archive(self, message):
"""
Move the given message to the archive.
"""
message.archive()
def action_unarchive(self, message):
"""
Move the given message out of the archive.
"""
message.unarchive()
def action_delete(self, message):
"""
Move the given message to the trash.
"""
message.moveToTrash()
def action_undelete(self, message):
"""
Move the given message out of the trash.
"""
message.removeFromTrash()
def action_defer(self, message, days, hours, minutes):
"""
Change the state of the given message to Deferred and schedule it to
be changed back after the given interval has elapsed.
"""
return message.deferFor(timedelta(days=days, hours=hours, minutes=minutes))
def action_trainSpam(self, message):
"""
Train the message filter using the given message as an example of
spam.
"""
message.trainSpam()
def action_trainHam(self, message):
"""
Train the message filter using the given message as an example of
ham.
"""
message.trainClean()
def _getActionMethod(self, actionName):
return getattr(self, 'action_' + actionName)
def _performManyAct(self, action, args, messages, D):
"""
Call C{action} on each message in C{messages}, passing the keyword
arguments C{args}, and calling back the deferred C{D} with the number
of read and unread messages when done
@param action: the action to call
@type action: function
@param args: extra arguments to pass to the action
@type args: C{dict}
@param messages: the messages to act on
@type messages: iterable of L{xquotient.exmess.Message}
@param D: the deferred to call when we're done
@type D: L{twisted.internet.defer.Deferred}
@return: deferred firing with pair of (read count, unread count)
@type: L{twisted.internet.defer.Deferred}
"""
readCount = 0
i = -1
for message in messages:
if message.read:
readCount += 1
yield action(message, **args)
i += 1
D.callback((readCount, i+1-readCount))
def performMany(self, actionName, messages, args=None,
scheduler=coiterate):
"""
Perform the action with name C{actionName} on the messages in
C{messages}, passing C{args} as extra arguments to the action method
@param actionName: name of an action, e.g. "archive".
@type actionName: C{str}
@param messages: the messages to act on
@type messages: iterable of L{xquotient.exmess.Message}
@param args: extra arguments to pass to the action method
@type args: None or a C{dict}
@param scheduler: callable which takes an iterator of deferreds and
consumes them appropriately. expected to return a deferred.
@type scheduler: callable
@return: the number of affected messages which have been read and the
number of affected messages which haven't
@rtype: pair
"""
if args is None:
args = {}
action = self._getActionMethod(actionName)
D = defer.Deferred()
coopDeferred = scheduler(self._performManyAct(action, args, messages, D))
coopDeferred.addErrback(D.errback)
return D
def upgradeInbox1to2(oldInbox):
"""
Create the extra state tracking items necessary for efficiently determining
distinct source addresses.
"""
newInbox = oldInbox.upgradeVersion(
'quotient_inbox', 1, 2,
installedOn=oldInbox.installedOn,
uiComplexity=oldInbox.uiComplexity)
return newInbox
registerUpgrader(upgradeInbox1to2, 'quotient_inbox', 1, 2)
declareLegacyItem(Inbox.typeName, 2,
dict(installedOn=attributes.reference(),
uiComplexity=attributes.integer(),
catalog=attributes.reference()))
registerAttributeCopyingUpgrader(Inbox, 2, 3)
declareLegacyItem(Inbox.typeName, 3,
dict(installedOn=attributes.reference(),
catalog=attributes.reference(),
uiComplexity=attributes.integer(),
showMoreDetail=attributes.boolean()))
def inbox3to4(old):
"""
Copy over all attributes except for 'installedOn' and 'catalog', which
have been deleted.
To avoid triggering an Axiom bug where installOn will load the Inbox
instance being upgraded and re-entrantly run its remaining upgraders,
rely on inbox4to5 to set the 'filter' attribute which was added in this
version of the schema either to a L{xquotient.spam.Filter} that exists
in the store, or to a new one.
"""
# The PrivateApplication might not have been upgraded yet. If not, look
# backward through older schema versions to try to find it. Axiom makes no
# guarantees about the order in which upgraders are run (not even that it
# will be the same order for two different upgrade runs).
from xmantissa.webapp import PrivateApplicationV2, PrivateApplicationV3
privAppTypes = [
PrivateApplication, PrivateApplicationV3, PrivateApplicationV2]
for privAppType in privAppTypes:
privapp = old.store.findFirst(privAppType)
if privapp is not None:
break
else:
# Nominally an error! But not all of the upgrader tests create a
# realistic database (ie, they don't create a PrivateApplication). So
# cannot treat this as an error. -exarkun
pass
new = old.upgradeVersion(
Inbox.typeName, 3, 4,
privateApplication=privapp,
messageSource=old.store.findOrCreate(MessageSource),
quotientPrefs=old.store.findOrCreate(QuotientPreferenceCollection),
deliveryAgent=old.store.findOrCreate(DeliveryAgent),
messageDisplayPrefs=old.store.findOrCreate(MessageDisplayPreferenceCollection),
uiComplexity=old.uiComplexity,
showMoreDetail=old.showMoreDetail)
return new
registerUpgrader(inbox3to4, Inbox.typeName, 3, 4)
declareLegacyItem(Inbox.typeName, 4,
dict(privateApplication=attributes.reference(),
scheduler=attributes.reference(),
messageSource=attributes.reference(),
quotientPrefs=attributes.reference(),
deliveryAgent=attributes.reference(),
messageDisplayPrefs=attributes.reference(),
uiComplexity=attributes.integer(),
showMoreDetail=attributes.boolean(),
filter=attributes.reference()))
def inbox4to5(old):
"""
Copy over all attributes and add a reference to a newly created Focus item.
Focus did not exist prior to the addition of this dependency, so there is
no way one could exist in the store of an existing Inbox.
Additionally, find or create a spam.Filter. See inbox3to4.
"""
focus = Focus(store=old.store)
new = old.upgradeVersion(
Inbox.typeName, 4, 5,
privateApplication=old.privateApplication,
messageSource=old.messageSource,
quotientPrefs=old.quotientPrefs,
deliveryAgent=old.deliveryAgent,
messageDisplayPrefs=old.messageDisplayPrefs,
uiComplexity=old.uiComplexity,
showMoreDetail=old.showMoreDetail)
src = old.store.findUnique(MessageSource)
if installedOn(src) is None:
#MessageSource was created in pre-dependency-system days
_DependencyConnector(installee=src, target=old.store,
explicitlyInstalled=True,
store=old.store)
filter = new.store.findFirst(spam.Filter, default=None)
if filter is None:
filter = spam.Filter(store=new.store)
new.filter = filter
new.focus = focus
return new
registerUpgrader(inbox4to5, Inbox.typeName, 4, 5)
declareLegacyItem(Inbox.typeName, 5,
dict(privateApplication=attributes.reference(),
scheduler=attributes.reference(),
messageSource=attributes.reference(),
quotientPrefs=attributes.reference(),
deliveryAgent=attributes.reference(),
messageDisplayPrefs=attributes.reference(),
uiComplexity=attributes.integer(),
showMoreDetail=attributes.boolean(),
filter=attributes.reference(),
focus=attributes.reference()))
def inbox5to6(old):
"""
Copy over all attributes except C{scheduler}.
"""
new = old.upgradeVersion(
Inbox.typeName, 5, 6,
privateApplication=old.privateApplication,
messageSource=old.messageSource,
quotientPrefs=old.quotientPrefs,
messageDisplayPrefs=old.messageDisplayPrefs,
deliveryAgent=old.deliveryAgent,
uiComplexity=old.uiComplexity,
showMoreDetail=old.showMoreDetail,
filter=old.filter,
focus=old.focus)
# If the old item was original schema version 5 in the database, focus and
# filter have already been installed, because the 4 to 5 upgrader used to
# install them. However, now that 5 is not the newest version of Inbox, it
# cannot do that. Only the upgrader to the newest version can. So do it
# here, instead, if it is necessary (which is when the original schema
# version was older than 5).
if installedOn(new.filter) is None:
installOn(new.filter, new.store)
if installedOn(new.focus) is None:
installOn(new.focus, new.store)
return new
registerUpgrader(inbox5to6, Inbox.typeName, 5, 6)
class MailboxScrollingFragment(Scrollable, ScrollableView, LiveElement):
"""
Specialized ScrollingFragment which supports client-side requests to alter
the query constraints.
"""
jsClass = u'Quotient.Mailbox.ScrollingWidget'
def __init__(self, store):
Scrollable.__init__(self, ixmantissa.IWebTranslator(store, None),
columns=(Message.sender,
Message.senderDisplay,
Message.recipient,
Message.subject,
Message.receivedWhen,
Message.read,
Message.sentWhen,
Message.attachments,
Message.everDeferred),
defaultSortColumn=Message.receivedWhen,
defaultSortAscending=False)
LiveElement.__init__(self)
self.store = store
self.setViewSelection({u"view": "inbox", u"tag": None, u"person": None, u"account": None})
def getInitialArguments(self):
return [self.getTableMetadata(self.viewSelection)]
def setViewSelection(self, viewSelection):
self.viewSelection = dict(
(k.encode('ascii'), v)
for (k, v)
in viewSelection.iteritems())
self.statusQuery = _viewSelectionToMailboxSelector(
self.store, viewSelection)
def getTableMetadata(self, viewSelection):
self.setViewSelection(viewSelection)
return super(MailboxScrollingFragment, self).getTableMetadata()
expose(getTableMetadata)
def performQuery(self, rangeBegin, rangeEnd):
"""
This scrolling fragment should perform queries using MailboxSelector, not
the normal store query machinery, because it is more efficient.
@param rangeBegin: an integer, the start of the range to retrieve.
@param rangeEnd: an integer, the end of the range to retrieve.
"""
return self.statusQuery.offsetQuery(rangeBegin, rangeEnd-rangeBegin)
def performCount(self):
"""
This scrolling fragment should perform counts using MailboxSelector, not the
normal store query machinery, because it is more efficient.
NB: it isn't actually more efficient. But it could at least be changed
to be.
"""
return self.statusQuery.count()
def requestRowRange(self, viewSelection, firstRow, lastRow):
self.setViewSelection(viewSelection)
return super(MailboxScrollingFragment, self).requestRowRange(
firstRow, lastRow)
expose(requestRowRange)
def requestCurrentSize(self, viewSelection=None):
if viewSelection is not None:
self.setViewSelection(viewSelection)
return super(MailboxScrollingFragment, self).requestCurrentSize()
expose(requestCurrentSize)
class InboxScreen(webtheme.ThemedElement, renderers.ButtonRenderingMixin):
"""
Renderer for boxes for of email.
@ivar store: The L{axiom.store.Store} containing the state this instance
renders.
@ivar inbox: The L{Inbox} which serves as the model for this view.
@ivar messageDetailFragmentFactory: the class which should be used to
render L{xquotient.exmess.Message} objects. Defaults to
L{ActionlessMessageDetail}
"""
implements(ixmantissa.INavigableFragment)
fragmentName = 'inbox'
live = 'athena'
title = ''
jsClass = u'Quotient.Mailbox.Controller'
translator = None
# A dictionary mapping view parameters to their current state. Valid keys
# in this dictionary are:
#
# view - mapped to one of "all", "trash", "sent", "spam", "deferred", or "inbox"
# tag - mapped to a tag name or None
# person - mapped to a person name or None
# account - mapped to an account name or None
viewSelection = None
def __init__(self, inbox):
athena.LiveElement.__init__(self)
self.translator = ixmantissa.IWebTranslator(inbox.store)
self.store = inbox.store
self.inbox = inbox
self.viewSelection = {
"view": "inbox",
"tag": None,
"person": None,
"account": None}
self.scrollingFragment = self._createScrollingFragment()
self.scrollingFragment.setFragmentParent(self)
def _createScrollingFragment(self):
"""
Create a Fragment which will display a mailbox.
"""
f = MailboxScrollingFragment(self.store)
f.docFactory = getLoader(f.fragmentName)
return f
def getInitialArguments(self):
"""
Return the initial view complexity for the mailbox.
"""
return (self.inbox.uiComplexity,)
messageDetailFragmentFactory = ActionlessMessageDetail
def _messageFragment(self, message):
"""
Return a fragment which will render C{message}
@param message: the message to render
@type message: L{xquotient.exmess.Message}
@rtype: L{messageDetailFragmentFactory}
"""
f = self.messageDetailFragmentFactory(message)
f.setFragmentParent(self)
return f
def _currentAsFragment(self, currentMessage):
if currentMessage is None:
return ''
return self._messageFragment(currentMessage)
def messageActions(self, request, tag):
"""
Renderer which returns a fragment which renders actions for the inbox
@rtype: L{MessageActions}
"""
f = MessageActions()
f.setFragmentParent(self)
return f
renderer(messageActions)
def scroller(self, request, tag):
return self.scrollingFragment
renderer(scroller)
def getUserTagNames(self):
"""
Return an alphabetically sorted list of unique tag names as unicode
strings.
"""
names = list(self.inbox.store.findOrCreate(tags.Catalog).tagNames())
names.sort()
return names
def viewPane(self, request, tag):
attrs = tag.attributes
iq = inevow.IQ(self.docFactory)
if 'open' in attrs:
paneBodyPattern = 'open-pane-body'
else:
paneBodyPattern = 'pane-body'
paneBodyPattern = iq.onePattern(paneBodyPattern)
return dictFillSlots(iq.onePattern('view-pane'),
{'name': attrs['name'],
'pane-body': paneBodyPattern.fillSlots(
'renderer', T.directive(attrs['renderer']))})
renderer(viewPane)
def personChooser(self, request, tag):
select = inevow.IQ(self.docFactory).onePattern('personChooser')
option = inevow.IQ(select).patternGenerator('personChoice')
selectedOption = inevow.IQ(select).patternGenerator('selectedPersonChoice')
for person in [None] + list(self.inbox.getPeople()):
if person == self.viewSelection["person"]:
p = selectedOption
else:
p = option
if person:
name = person.getDisplayName()
key = self.translator.toWebID(person)
else:
name = key = 'all'
opt = p().fillSlots(
'personName', name).fillSlots(
'personKey', key)
select[opt]
return select
renderer(personChooser)
# This is the largest unread count allowed. Counts larger than this will
# not be reported, to save on database work. This is, I hope, a temporary
# feature which will be replaced once counts can be done truly efficiently,
# by saving the intended results in the DB.
countLimit = 1000
def getUnreadMessageCount(self, viewSelection):
"""
@return: number of unread messages in current view
"""
sq = _viewSelectionToMailboxSelector(self.inbox.store, viewSelection)
sq.refineByStatus(UNREAD_STATUS)
sq.setLimit(self.countLimit)
lsq = sq.count()
return lsq
def mailViewChooser(self, request, tag):
select = inevow.IQ(self.docFactory).onePattern('mailViewChooser')
option = inevow.IQ(select).patternGenerator('mailViewChoice')
selectedOption = inevow.IQ(select).patternGenerator('selectedMailViewChoice')
counts = self.mailViewCounts()
counts = sorted(counts.iteritems(), key=lambda (v, c): VIEWS.index(v))
curview = self.viewSelection["view"]
for (view, count) in counts:
if view == curview:
p = selectedOption
else:
p = option
select[p().fillSlots(
'mailViewName', view.title()).fillSlots(
'count', count)]
return select
renderer(mailViewChooser)
def tagChooser(self, request, tag):
select = inevow.IQ(self.docFactory).onePattern('tagChooser')
option = inevow.IQ(select).patternGenerator('tagChoice')
selectedOption = inevow.IQ(select).patternGenerator('selectedTagChoice')
for tag in [None] + self.getUserTagNames():
if tag == self.viewSelection["tag"]:
p = selectedOption
else:
p = option
opt = p().fillSlots('tagName', tag or 'all')
select[opt]
return select
renderer(tagChooser)
def _accountNames(self):
return getMessageSources(self.inbox.store)
def accountChooser(self, request, tag):
select = inevow.IQ(self.docFactory).onePattern('accountChooser')
option = inevow.IQ(select).patternGenerator('accountChoice')
selectedOption = inevow.IQ(select).patternGenerator('selectedAccountChoice')
for acc in [None] + list(self._accountNames()):
if acc == self.viewSelection["account"]:
p = selectedOption
else:
p = option
opt = p().fillSlots('accountName', acc or 'all')
select[opt]
return select
renderer(accountChooser)
def head(self):
return None
# remote methods
def setComplexity(self, n):
self.inbox.uiComplexity = n
expose(setComplexity)
setComplexity = transacted(setComplexity)
def fastForward(self, viewSelection, webID):
"""
Retrieve message detail information for the specified message as well
as look-ahead information for the next message. Mark the specified
message as read.
"""
currentMessage = self.translator.fromWebID(webID)
currentMessage.markRead()
return self._messageFragment(currentMessage)
expose(fastForward)
fastForward = transacted(fastForward)
def mailViewCounts(self):
counts = {}
viewSelection = dict(self.viewSelection)
for v in VIEWS:
viewSelection["view"] = v
counts[v] = self.getUnreadMessageCount(viewSelection)
return counts
def _messagePreview(self, msg):
if msg is not None:
return {
u'subject': msg.subject}
return None
def actOnMessageIdentifierList(self, action, messageIdentifiers, extraArguments=None):
"""
Perform an action on list of messages specified by their web
identifier.
@type action: C{unicode}
@param action: The name of the action to perform. This may be any
string which can be prepended with C{'action_'} to name a method
defined on this class.
@type currentMessageIdentifier: C{unicode}
@param currentMessageIdentifier: The web ID for the message which is
currently being displayed on the client.
@type messageIdentifiers: C{list} of C{unicode}
@param messageIdentifiers: A list of web IDs for messages on which to act.
@type extraArguments: C{None} or C{dict}
@param extraArguments: Additional keyword arguments to pass on to the
action handler.
"""
msgs = map(self.translator.fromWebID, messageIdentifiers)
if extraArguments is not None:
extraArguments = dict((k.encode('ascii'), v)
for (k, v) in extraArguments.iteritems())
return self.inbox.performMany(action, msgs, args=extraArguments)
expose(actOnMessageIdentifierList)
def actOnMessageBatch(self, action, viewSelection, batchType, include,
exclude, extraArguments=None):
"""
Perform an action on a set of messages defined by a common
characteristic or which are specifically included but not specifically
excluded.
"""
msgs = self.inbox.messagesForBatchType(
batchType, viewSelection,
exclude=[self.translator.fromWebID(webID)
for webID in exclude])
msgs = itertools.chain(
msgs,
(self.translator.fromWebID(webID) for webID in include))
if extraArguments is not None:
extraArguments = dict((k.encode('ascii'), v)
for (k, v) in extraArguments.iteritems())
return self.inbox.performMany(action, msgs, args=extraArguments)
expose(actOnMessageBatch)
def getComposer(self):
"""
Return an inline L{xquotient.compose.ComposeFragment} instance with
empty to address, subject, message body and attacments
"""
f = ComposeFragment(
self.inbox.store.findUnique(Composer),
recipients=None,
subject=u'',
messageBody=u'',
attachments=(),
inline=True)
f.setFragmentParent(self)
f.docFactory = getLoader(f.fragmentName)
return f
expose(getComposer)
registerAdapter(InboxScreen, Inbox, ixmantissa.INavigableFragment)
| |
"""
Ridge regression
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com>
# Fabian Pedregosa <fabian@fseoane.net>
# Michael Eickenberg <michael.eickenberg@nsup.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from .sag_fast import get_max_squared_sum
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
Notes
-----
This function won't compute the intercept.
"""
# SAG needs X and y columns to be C-contiguous and np.float64
if solver == 'sag':
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
y = check_array(y, dtype='numeric', ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver != 'sag':
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver == 'sag':
# precompute max_squared_sum for all targets
max_squared_sum = get_max_squared_sum(X)
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i,
max_iter, tol, verbose, random_state, False, max_squared_sum,
dict())
coef[i] = coef_
n_iter[i] = n_iter_
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is faster than other solvers when both
n_samples and n_features are large.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used, else, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| |
import os
import re
import sys
from sipconstants import *
path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(path+'/../'))
from logConf import *
# one sample
# https://github.com/sparkslabs/kamaelia/blob/master/Code/Python/Kamaelia/Kamaelia/Protocol/SDP.py
class SdpMsg(dict):
def __init__(self,*arg, **kw):
super(SdpMsg, self).__init__(*arg, **kw)
self['version'] = 0
origin = dict()
origin['usrname'] = None
origin['sessid'] = 0
origin['sessver'] = 0
origin['nettype'] = None
origin['addrtype'] = None
origin['uniaddr'] = None
self['origin'] = origin
self['sessname'] = None
self['sessinfo'] = None
self['uri'] = None
self['email'] = None
self['phone'] = None
conndata = dict()
conndata['nettype'] = None
conndata['addrtype'] = None
conndata['connaddr'] = None
self['conndata'] = conndata
bw = dict()
bw['bwtype'] = None
bw['bw'] = 0
self['bw'] = bw
time = dict()
time['start'] = 0
time['end'] = 0
self['time'] = time
#repeat and timezone is not used and complex.
repeat = dict()
repeat['rinterval'] = 0
repeat['activeduation'] = 0
repeat['offsets'] = 0
self['repeat'] = repeat
self['tz'] = list() #adjust time + offset
encrypt = dict()
encrypt['method'] = None
encrypt['enkey'] = None
self['encrypt'] = encrypt
# most complex
self['attrlist'] = list()
media = dict()
media['media'] = None
media['port'] = 0
media['numports'] = 0
media['proto'] = None
media['fmt'] = None
self['media'] = media
class SdpParser(object):
def __init__(self):
#blocks splited by media line
self.blocks = list()
self.sdpmsgs = list()
def parse(self, msg):
# 1. split the sdp audio/media part: m= line as the seperator
# 2. codec, payload type
# 3. precondition/stream direct
#
#scan the whole body and split
self.splitSdp(msg)
#start to parse
sdppattern = SipPattern['sdppattern']
sdpregex = re.compile(sdppattern)
for block in self.blocks:
onesdpmsg = SdpMsg()
for index, line in enumerate(block):
sdp = sdpregex.search(line)
if sdp:
type = sdp.group(1).strip()
value = sdp.group(2).strip()
print type + '=' + value
self.parsesdp(type, value, onesdpmsg)
self.sdpmsgs.append(onesdpmsg)
def splitSdp(self, msg):
start = 0
mediaregex = re.compile(SipPattern['mediapattern'])
for index, line in enumerate(msg):
if mediaregex.search(line):
print 'media line is ' + line
oneblock = msg[start:index]
self.blocks.append(oneblock)
start = index
oneblock = msg[start:len(msg)]
self.blocks.append(oneblock)
"""
for block in self.blocks:
print 'oneblock is ' +repr(block)
"""
def parsesdp(self, type, value, onesdpmsg):
if type == 'v':
onesdpmsg['version'] = value
elif type == 'o':
originpattern="(\S+) (\d+) (\d+) (IN) (IP[46]) (.+)"
origin = dict()
origin['usrname'] = None
origin['sessid'] = 0
origin['sessver'] = 0
origin['nettype'] = None
origin['addrtype'] = None
origin['uniaddr'] = None
origin['usrname'], origin['sessid'], origin['sessver'], origin['nettype'], origin['addrtype'], origin['uniaddr'] \
= re.match(originpattern, value).groups()
onesdpmsg['origin'] = origin
elif type == 's':
onesdpmsg['sessname'] = value
elif type == 'i':
onesdpmsg['sessinfo'] = value
elif type == 'u':
onesdpmsg['uri'] = value
elif type == 'e':
onesdpmsg['email'] = value
elif type == 'p':
onesdpmsg['email'] = value
elif type == 'c':
conndata = dict()
conndata['nettype'] = None
conndata['addrtype'] = None
conndata['connaddr'] = None
cpattern = "(IN) (IP[46]) (.+)"
conndata['nettype'],conndata['addrtype'], conndata['connaddr'] = re.match(cpattern, value).groups()
onesdpmsg['conndata'] = conndata
elif type == 'b':
bw = dict()
bw['bwtype'] = None
bw['bw'] = 0
bwpattern = "(\w+):(\d+)"
bw['bwtype'], bw['bw'] = re.match(bwpattern, value).groups()
onesdpmsg['bw'] = bw
elif type == 't':
time = dict()
time['start'] = 0
time['end'] = 0
tpattern = "(\d+) (\d+)"
time['start'], time['end'] = re.match(tpattern, value).groups()
onesdpmsg['time'] = time
elif type == 'k':
encrypt = dict()
encrypt['method'] = None
encrypt['enkey'] = None
kpattern="(\w+)(?::(.*))"
kgroups = re.match(kpattern, value).groups()
if kgroups:
encrypt['method'] = kgroups[0]
if kgroups[1]:
encrypt['enkey'] = kgroups[1]
onesdpmsg['encrypt'] = encrypt
elif type == 'm':
media = dict()
media['media'] = None
media['port'] = 0
media['numports'] = 1
media['proto'] = None
media['fmt'] = None
mpattern = "(\w+) (\d+)(?:[/](\d+))? ([^ ]+) (.+)"
media['media'], media['port'], media['numports'], media['proto'], media['fmt'] = re.match(mpattern, value).groups()
if not media['numports']:
media['numports'] = 1
onesdpmsg['media'] = media
elif type == 'a':
onesdpmsg['attrlist'].append(value)
def parseAttr(self):
pass
def parseMediaPayload(self):
pass
def getsdp(self):
return self.sdpmsgs
def dump(self):
for sdpmsg in self.sdpmsgs:
print repr(sdpmsg)
if __name__ == "__main__":
onesdp = list()
onesdp.append("v=0")
onesdp.append("o=anritsu 809 951810 IN IP4 192.168.1.12")
onesdp.append("s=-")
onesdp.append("i=A VOIP Session")
onesdp.append("c=IN IP4 192.168.1.12")
onesdp.append("t=0 0")
onesdp.append("m=audio 60000 RTP/AVP 96 97 98 99 100 8 0 101 102")
onesdp.append("b=AS:153")
onesdp.append("b=RS:800")
onesdp.append("b=RR:2400")
onesdp.append("a=ptime:20")
onesdp.append("a=maxptime:240")
onesdp.append("a=rtpmap:96 EVS/16000/1")
onesdp.append("a=fmtp:96 max-red=220")
onesdp.append("a=rtpmap:97 AMR-WB/16000/1")
onesdp.append("a=fmtp:97 mode-change-capability=2")
onesdp.append("a=rtpmap:98 AMR-WB/16000/1")
onesdp.append("a=fmtp:98 octet-align=1; mode-change-capability=2")
onesdp.append("a=rtpmap:99 AMR/8000/1")
onesdp.append("a=fmtp:99 mode-change-capability=2")
onesdp.append("a=rtpmap:100 AMR/8000/1")
onesdp.append("a=fmtp:100 octet-align=1; mode-change-capability=2")
onesdp.append("a=rtpmap:8 PCMA/8000/1")
onesdp.append("a=rtpmap:0 PCMU/8000/1")
onesdp.append("a=rtpmap:101 telephone-event/16000")
onesdp.append("a=fmtp:101 0-15")
onesdp.append("a=rtpmap:102 telephone-event/8000")
onesdp.append("a=fmtp:102 0-15")
onesdp.append("a=mid:0")
onesdp.append("a=rtcp:60001")
onesdp.append("a=curr:qos local none")
onesdp.append("a=curr:qos remote none")
onesdp.append("a=des:qos mandatory local sendrecv")
onesdp.append("a=des:qos optional remote sendrecv")
onesdp.append("a=sendrecv")
onesdp.append("m=video 60002 RTP/AVP 113 114 34")
onesdp.append("b=AS:416")
onesdp.append("b=RR:2000")
onesdp.append("b=RS:600")
onesdp.append("a=rtpmap:113 H264/90000")
onesdp.append("a=fmtp:113 profile-level-id=42000B;packetization-mode=0")
onesdp.append("a=rtpmap:114 H264/90000")
onesdp.append("a=fmtp:114 profile-level-id=42000B;packetization-mode=1")
onesdp.append("a=rtpmap:34 H263/90000")
onesdp.append("a=fmtp:34 profile=0;level=10")
onesdp.append("a=sendrecv")
onesdp.append("a=curr:qos local none")
onesdp.append("a=curr:qos remote none")
onesdp.append("a=des:qos mandatory local sendrecv")
onesdp.append("a=des:qos optional remote sendrecv")
onesdp.append("a=rtcp:60003")
sp = SdpParser()
sp.parse(msg=onesdp)
sp.dump()
| |
##
# Copyright (c) 2008-2010 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
import collections
from importkit.yaml import validator
from importkit.yaml.validator.tests.base import SchemaTest, raises, result
class TestTypes(SchemaTest):
def setUp(self):
super().setUp()
self.schema = self.get_schema('types.Schema')
@raises(validator.SchemaValidationError, 'expected none')
def test_validator_types_none_fail1(self):
"""
none: '12'
"""
@result(key='none', value=None)
def test_validator_types_none_result(self):
"""
none:
"""
@raises(validator.SchemaValidationError, 'expected integer')
def test_validator_types_int_fail1(self):
"""
int: '12'
"""
@raises(validator.SchemaValidationError, 'expected integer')
def test_validator_types_int_fail2(self):
"""
int: 123.2
"""
@result(key='int', value=31415)
def test_validator_types_int_result(self):
"""
int: 31415
"""
@raises(validator.SchemaValidationError, 'expected number (int or float)')
def test_validator_types_number_fail1(self):
"""
number: [123, 1]
"""
@result(key='number', value=31415)
def test_validator_types_number_int_result(self):
"""
number: 31415
"""
@result(key='number', value=31415.2)
def test_validator_types_number_float_result(self):
"""
number: 31415.2
"""
@raises(validator.SchemaValidationError, 'expected text (number or str)')
def test_validator_types_text_fail1(self):
"""
text: [123, 1]
"""
@result(key='text', value='31415')
def test_validator_types_text_int_result(self):
"""
text: 31415
"""
@result(key='text', value='31415.123')
def test_validator_types_text_float_result(self):
"""
text: 31415.123
"""
@result(key='bool', value=True)
def test_validator_types_bool_yes_result(self):
"""
bool: yes
"""
@result(key='bool', value=True)
def test_validator_types_bool_True_result(self):
"""
bool: True
"""
@result(key='bool', value=True)
def test_validator_types_bool_true_result(self):
"""
bool: true
"""
@result(key='bool', value=False)
def test_validator_types_bool_yes_result2(self):
"""
bool: no
"""
@result(key='bool', value=False)
def test_validator_types_bool_True_result2(self):
"""
bool: false
"""
@raises(validator.SchemaValidationError, 'expected bool')
def test_validator_types_bool_fail1(self):
"""
bool: 1
"""
@raises(validator.SchemaValidationError, 'expected bool')
def test_validator_types_bool_fail2(self):
"""
bool: 'yes'
"""
@raises(validator.SchemaValidationError, 'mapping expected')
def test_validator_types_map_fail1(self):
"""
dict: 'WRONG'
"""
@raises(validator.SchemaValidationError, "unexpected key 'wrongkey'")
def test_validator_types_map_fail2(self):
"""
dict:
wrongkey: 1
"""
@result(key='dict', value={'test1': 3, 'test2': 'a'})
def test_validator_types_map_defaults(self):
"""
dict:
"""
@raises(validator.SchemaValidationError, 'the number of elements in mapping must not be less than 2')
def test_validator_types_map_constraints1(self):
"""
fdict:
a: "1"
"""
@raises(validator.SchemaValidationError, 'the number of elements in mapping must not exceed 3')
def test_validator_types_map_constraints2(self):
"""
fdict:
a: "1"
b: "2"
c: "3"
d: "4"
"""
@result(key='fdict', value={'a': "1", 'b': "2"})
def test_validator_types_map_constraints_ok(self):
"""
fdict:
a: "1"
b: "2"
"""
@raises(validator.SchemaValidationError, "duplicate mapping key 'A'")
def test_validator_types_map_duplicate_key_check(self):
"""
fdict:
A: "1"
A: "2"
"""
@result(key='fdict', value={'a': "1", ('b', 'c'): "2"})
def test_validator_types_map_nonscalar_key(self):
"""
fdict:
a: "1"
[b, c]: "2"
"""
@result(key='redict', value={'UPPERCASE': 10, 'lowercase': '10', '12345': True})
def test_validator_type_map_pattern_key_ok(self):
"""
redict:
UPPERCASE: 10
lowercase: '10'
"""
@raises(validator.SchemaValidationError, "unexpected key '1'")
def test_validator_type_map_pattern_key_fail(self):
"""
redict:
1: 10
"""
@result(key='minmax', value=3)
def test_validator_types_int_minmax(self):
"""
minmax: 3
"""
@raises(validator.SchemaValidationError, 'range-min validation failed')
def test_validator_types_int_minmax_fail(self):
"""
minmax: 2
"""
@raises(validator.SchemaValidationError, 'range-max-ex validation failed')
def test_validator_types_int_minmax_fail2(self):
"""
minmax: 20
"""
@result(key='odict', value=collections.OrderedDict([('A', 1), ('B', 2), ('C', 3), ('D', 4)]))
def test_validator_types_ordered_map(self):
"""
odict:
A: 1
B: 2
C: 3
D: 4
"""
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Models to be used when accessing app specific datastore usage statistics.
These entities cannot be created by users, but are populated in the
application's datastore by offline processes run by the Google App Engine team.
"""
from google.appengine.ext import db
class BaseStatistic(db.Model):
"""Base Statistic Model class.
Attributes:
bytes: the total number of bytes taken up in the datastore for the
statistic instance.
count: attribute is the total number of occurrences of the statistic
in the datastore.
timestamp: the time the statistic instance was written to the datastore.
"""
STORED_KIND_NAME = '__BaseStatistic__'
bytes = db.IntegerProperty()
count = db.IntegerProperty()
timestamp = db.DateTimeProperty()
@classmethod
def kind(cls):
"""Kind name override."""
return cls.STORED_KIND_NAME
class BaseKindStatistic(BaseStatistic):
"""Base Statistic Model class for stats associated with kinds.
Attributes:
kind_name: the name of the kind associated with the statistic instance.
entity_bytes: the number of bytes taken up to store the statistic
in the datastore minus the cost of storing indices.
"""
STORED_KIND_NAME = '__BaseKindStatistic__'
kind_name = db.StringProperty()
entity_bytes = db.IntegerProperty(default=0)
class GlobalStat(BaseStatistic):
"""An aggregate of all entities across the entire application.
This statistic only has a single instance in the datastore that contains the
total number of entities stored and the total number of bytes they take up.
Attributes:
entity_bytes: the number of bytes taken up to store the statistic
in the datastore minus the cost of storing indices.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
composite_index_bytes: the number of bytes taken up to store composite
index entries
composite_index_count: the number of composite index entries.
"""
STORED_KIND_NAME = '__Stat_Total__'
entity_bytes = db.IntegerProperty(default=0)
builtin_index_bytes = db.IntegerProperty(default=0)
builtin_index_count = db.IntegerProperty(default=0)
composite_index_bytes = db.IntegerProperty(default=0)
composite_index_count = db.IntegerProperty(default=0)
class NamespaceStat(BaseStatistic):
"""An aggregate of all entities across an entire namespace.
This statistic has one instance per namespace. The key_name is the
represented namespace. NamespaceStat entities will only be found
in the namespace "" (empty string). It contains the total
number of entities stored and the total number of bytes they take up.
Attributes:
subject_namespace: the namespace associated with the statistic instance.
entity_bytes: the number of bytes taken up to store the statistic
in the datastore minus the cost of storing indices.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
composite_index_bytes: the number of bytes taken up to store composite
index entries
composite_index_count: the number of composite index entries.
"""
STORED_KIND_NAME = '__Stat_Namespace__'
subject_namespace = db.StringProperty()
entity_bytes = db.IntegerProperty(default=0)
builtin_index_bytes = db.IntegerProperty(default=0)
builtin_index_count = db.IntegerProperty(default=0)
composite_index_bytes = db.IntegerProperty(default=0)
composite_index_count = db.IntegerProperty(default=0)
class KindStat(BaseKindStatistic):
"""An aggregate of all entities at the granularity of their Kind.
There is an instance of the KindStat for every Kind that is in the
application's datastore. This stat contains per-Kind statistics.
Attributes:
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
composite_index_bytes: the number of bytes taken up to store composite
index entries
composite_index_count: the number of composite index entries.
"""
STORED_KIND_NAME = '__Stat_Kind__'
builtin_index_bytes = db.IntegerProperty(default=0)
builtin_index_count = db.IntegerProperty(default=0)
composite_index_bytes = db.IntegerProperty(default=0)
composite_index_count = db.IntegerProperty(default=0)
class KindRootEntityStat(BaseKindStatistic):
"""Statistics of the number of root entities in the datastore by Kind.
There is an instance of the KindRootEntityState for every Kind that is in the
application's datastore and has an instance that is a root entity. This stat
contains statistics regarding these root entity instances.
"""
STORED_KIND_NAME = '__Stat_Kind_IsRootEntity__'
class KindNonRootEntityStat(BaseKindStatistic):
"""Statistics of the number of non root entities in the datastore by Kind.
There is an instance of the KindNonRootEntityStat for every Kind that is in
the application's datastore that is a not a root entity. This stat contains
statistics regarding thse non root entity instances.
"""
STORED_KIND_NAME = '__Stat_Kind_NotRootEntity__'
class PropertyTypeStat(BaseStatistic):
"""An aggregate of all properties across the entire application by type.
There is an instance of the PropertyTypeStat for every property type
(google.appengine.api.datastore_types._PROPERTY_TYPES) in use by the
application in its datastore.
Attributes:
property_type: the property type associated with the statistic instance.
entity_bytes: the number of bytes taken up to store the statistic
in the datastore minus the cost of storing indices.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
"""
STORED_KIND_NAME = '__Stat_PropertyType__'
property_type = db.StringProperty()
entity_bytes = db.IntegerProperty(default=0)
builtin_index_bytes = db.IntegerProperty(default=0)
builtin_index_count = db.IntegerProperty(default=0)
class KindPropertyTypeStat(BaseKindStatistic):
"""Statistics on (kind, property_type) tuples in the app's datastore.
There is an instance of the KindPropertyTypeStat for every
(kind, property_type) tuple in the application's datastore.
Attributes:
property_type: the property type associated with the statistic instance.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
"""
STORED_KIND_NAME = '__Stat_PropertyType_Kind__'
property_type = db.StringProperty()
builtin_index_bytes = db.IntegerProperty(default=0)
builtin_index_count = db.IntegerProperty(default=0)
class KindPropertyNameStat(BaseKindStatistic):
"""Statistics on (kind, property_name) tuples in the app's datastore.
There is an instance of the KindPropertyNameStat for every
(kind, property_name) tuple in the application's datastore.
Attributes:
property_name: the name of the property associated with the statistic
instance.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
"""
STORED_KIND_NAME = '__Stat_PropertyName_Kind__'
property_name = db.StringProperty()
builtin_index_bytes = db.IntegerProperty(default=0)
builtin_index_count = db.IntegerProperty(default=0)
class KindPropertyNamePropertyTypeStat(BaseKindStatistic):
"""Statistic on (kind, property_name, property_type) tuples in the datastore.
There is an instance of the KindPropertyNamePropertyTypeStat for every
(kind, property_name, property_type) tuple in the application's datastore.
Attributes:
property_type: the property type associated with the statistic instance.
property_name: the name of the property associated with the statistic
instance.
builtin_index_bytes: the number of bytes taken up to store builtin-in
index entries
builtin_index_count: the number of built-in index entries.
"""
STORED_KIND_NAME = '__Stat_PropertyType_PropertyName_Kind__'
property_type = db.StringProperty()
property_name = db.StringProperty()
builtin_index_bytes = db.IntegerProperty(default=0)
builtin_index_count = db.IntegerProperty(default=0)
class KindCompositeIndexStat(BaseStatistic):
"""Statistic on (kind, composite_index_id) tuples in the datastore.
There is an instance of the KindCompositeIndexStat for every unique
(kind, composite_index_id) tuple in the application's datastore indexes.
Attributes:
index_id: the id of the composite index associated with the statistic
instance.
kind_name: the name of the kind associated with the statistic instance.
"""
STORED_KIND_NAME = '__Stat_Kind_CompositeIndex__'
index_id = db.IntegerProperty()
kind_name = db.StringProperty()
class NamespaceGlobalStat(GlobalStat):
"""GlobalStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_Total__'
class NamespaceKindStat(KindStat):
"""KindStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_Kind__'
class NamespaceKindRootEntityStat(KindRootEntityStat):
"""KindRootEntityStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_Kind_IsRootEntity__'
class NamespaceKindNonRootEntityStat(KindNonRootEntityStat):
"""KindNonRootEntityStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_Kind_NotRootEntity__'
class NamespacePropertyTypeStat(PropertyTypeStat):
"""PropertyTypeStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_PropertyType__'
class NamespaceKindPropertyTypeStat(KindPropertyTypeStat):
"""KindPropertyTypeStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_PropertyType_Kind__'
class NamespaceKindPropertyNameStat(KindPropertyNameStat):
"""KindPropertyNameStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_PropertyName_Kind__'
class NamespaceKindPropertyNamePropertyTypeStat(
KindPropertyNamePropertyTypeStat):
"""KindPropertyNamePropertyTypeStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_PropertyType_PropertyName_Kind__'
class NamespaceKindCompositeIndexStat(KindCompositeIndexStat):
"""KindCompositeIndexStat equivalent for a specific namespace.
These may be found in each specific namespace and represent stats for
that particular namespace.
"""
STORED_KIND_NAME = '__Stat_Ns_Kind_CompositeIndex__'
_DATASTORE_STATS_CLASSES_BY_KIND = {
GlobalStat.STORED_KIND_NAME: GlobalStat,
NamespaceStat.STORED_KIND_NAME: NamespaceStat,
KindStat.STORED_KIND_NAME: KindStat,
KindRootEntityStat.STORED_KIND_NAME: KindRootEntityStat,
KindNonRootEntityStat.STORED_KIND_NAME: KindNonRootEntityStat,
PropertyTypeStat.STORED_KIND_NAME: PropertyTypeStat,
KindPropertyTypeStat.STORED_KIND_NAME: KindPropertyTypeStat,
KindPropertyNameStat.STORED_KIND_NAME: KindPropertyNameStat,
KindPropertyNamePropertyTypeStat.STORED_KIND_NAME:
KindPropertyNamePropertyTypeStat,
KindCompositeIndexStat.STORED_KIND_NAME: KindCompositeIndexStat,
NamespaceGlobalStat.STORED_KIND_NAME: NamespaceGlobalStat,
NamespaceKindStat.STORED_KIND_NAME: NamespaceKindStat,
NamespaceKindRootEntityStat.STORED_KIND_NAME: NamespaceKindRootEntityStat,
NamespaceKindNonRootEntityStat.STORED_KIND_NAME:
NamespaceKindNonRootEntityStat,
NamespacePropertyTypeStat.STORED_KIND_NAME: NamespacePropertyTypeStat,
NamespaceKindPropertyTypeStat.STORED_KIND_NAME:
NamespaceKindPropertyTypeStat,
NamespaceKindPropertyNameStat.STORED_KIND_NAME:
NamespaceKindPropertyNameStat,
NamespaceKindPropertyNamePropertyTypeStat.STORED_KIND_NAME:
NamespaceKindPropertyNamePropertyTypeStat,
NamespaceKindCompositeIndexStat.STORED_KIND_NAME:
NamespaceKindCompositeIndexStat,
}
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import datetime
import logging
import random
import sys
import traceback
from catapult_base import cloud_storage
from telemetry.internal.results import json_output_formatter
from telemetry.internal.results import progress_reporter as reporter_module
from telemetry.internal.results import story_run
from telemetry import value as value_module
from telemetry.value import failure
from telemetry.value import skip
from telemetry.value import trace
class PageTestResults(object):
def __init__(self, output_formatters=None,
progress_reporter=None, trace_tag='', output_dir=None,
value_can_be_added_predicate=lambda v, is_first: True):
"""
Args:
output_formatters: A list of output formatters. The output
formatters are typically used to format the test results, such
as CsvPivotTableOutputFormatter, which output the test results as CSV.
progress_reporter: An instance of progress_reporter.ProgressReporter,
to be used to output test status/results progressively.
trace_tag: A string to append to the buildbot trace name. Currently only
used for buildbot.
output_dir: A string specified the directory where to store the test
artifacts, e.g: trace, videos,...
value_can_be_added_predicate: A function that takes two arguments:
a value.Value instance (except failure.FailureValue, skip.SkipValue
or trace.TraceValue) and a boolean (True when the value is part of
the first result for the story). It returns True if the value
can be added to the test results and False otherwise.
"""
# TODO(chrishenry): Figure out if trace_tag is still necessary.
super(PageTestResults, self).__init__()
self._progress_reporter = (
progress_reporter if progress_reporter is not None
else reporter_module.ProgressReporter())
self._output_formatters = (
output_formatters if output_formatters is not None else [])
self._trace_tag = trace_tag
self._output_dir = output_dir
self._value_can_be_added_predicate = value_can_be_added_predicate
self._current_page_run = None
self._all_page_runs = []
self._all_stories = set()
self._representative_value_for_each_value_name = {}
self._all_summary_values = []
self._serialized_trace_file_ids_to_paths = {}
self._pages_to_profiling_files = collections.defaultdict(list)
self._pages_to_profiling_files_cloud_url = collections.defaultdict(list)
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
for k, v in self.__dict__.items():
if isinstance(v, collections.Container):
v = copy.copy(v)
setattr(result, k, v)
return result
@property
def pages_to_profiling_files(self):
return self._pages_to_profiling_files
@property
def serialized_trace_file_ids_to_paths(self):
return self._serialized_trace_file_ids_to_paths
@property
def pages_to_profiling_files_cloud_url(self):
return self._pages_to_profiling_files_cloud_url
@property
def all_page_specific_values(self):
values = []
for run in self._all_page_runs:
values += run.values
if self._current_page_run:
values += self._current_page_run.values
return values
@property
def all_summary_values(self):
return self._all_summary_values
@property
def current_page(self):
assert self._current_page_run, 'Not currently running test.'
return self._current_page_run.story
@property
def current_page_run(self):
assert self._current_page_run, 'Not currently running test.'
return self._current_page_run
@property
def all_page_runs(self):
return self._all_page_runs
@property
def pages_that_succeeded(self):
"""Returns the set of pages that succeeded."""
pages = set(run.story for run in self.all_page_runs)
pages.difference_update(self.pages_that_failed)
return pages
@property
def pages_that_failed(self):
"""Returns the set of failed pages."""
failed_pages = set()
for run in self.all_page_runs:
if run.failed:
failed_pages.add(run.story)
return failed_pages
@property
def failures(self):
values = self.all_page_specific_values
return [v for v in values if isinstance(v, failure.FailureValue)]
@property
def skipped_values(self):
values = self.all_page_specific_values
return [v for v in values if isinstance(v, skip.SkipValue)]
def _GetStringFromExcInfo(self, err):
return ''.join(traceback.format_exception(*err))
def CleanUp(self):
"""Clean up any TraceValues contained within this results object."""
for run in self._all_page_runs:
for v in run.values:
if isinstance(v, trace.TraceValue):
v.CleanUp()
run.values.remove(v)
def __enter__(self):
return self
def __exit__(self, _, __, ___):
self.CleanUp()
def WillRunPage(self, page):
assert not self._current_page_run, 'Did not call DidRunPage.'
self._current_page_run = story_run.StoryRun(page)
self._progress_reporter.WillRunPage(self)
def DidRunPage(self, page): # pylint: disable=unused-argument
"""
Args:
page: The current page under test.
"""
assert self._current_page_run, 'Did not call WillRunPage.'
self._progress_reporter.DidRunPage(self)
self._all_page_runs.append(self._current_page_run)
self._all_stories.add(self._current_page_run.story)
self._current_page_run = None
def AddValue(self, value):
assert self._current_page_run, 'Not currently running test.'
self._ValidateValue(value)
is_first_result = (
self._current_page_run.story not in self._all_stories)
if not (isinstance(value, skip.SkipValue) or
isinstance(value, failure.FailureValue) or
isinstance(value, trace.TraceValue) or
self._value_can_be_added_predicate(value, is_first_result)):
return
# TODO(eakuefner/chrishenry): Add only one skip per pagerun assert here
self._current_page_run.AddValue(value)
self._progress_reporter.DidAddValue(value)
def AddProfilingFile(self, page, file_handle):
self._pages_to_profiling_files[page].append(file_handle)
def AddSummaryValue(self, value):
assert value.page is None
self._ValidateValue(value)
self._all_summary_values.append(value)
def _ValidateValue(self, value):
assert isinstance(value, value_module.Value)
if value.name not in self._representative_value_for_each_value_name:
self._representative_value_for_each_value_name[value.name] = value
representative_value = self._representative_value_for_each_value_name[
value.name]
assert value.IsMergableWith(representative_value)
def PrintSummary(self):
self._progress_reporter.DidFinishAllTests(self)
# Only serialize the trace if output_format is json.
if (self._output_dir and
any(isinstance(o, json_output_formatter.JsonOutputFormatter)
for o in self._output_formatters)):
self._SerializeTracesToDirPath(self._output_dir)
for output_formatter in self._output_formatters:
output_formatter.Format(self)
def FindValues(self, predicate):
"""Finds all values matching the specified predicate.
Args:
predicate: A function that takes a Value and returns a bool.
Returns:
A list of values matching |predicate|.
"""
values = []
for value in self.all_page_specific_values:
if predicate(value):
values.append(value)
return values
def FindPageSpecificValuesForPage(self, page, value_name):
return self.FindValues(lambda v: v.page == page and v.name == value_name)
def FindAllPageSpecificValuesNamed(self, value_name):
return self.FindValues(lambda v: v.name == value_name)
def FindAllPageSpecificValuesFromIRNamed(self, tir_label, value_name):
return self.FindValues(lambda v: v.name == value_name
and v.tir_label == tir_label)
def FindAllTraceValues(self):
return self.FindValues(lambda v: isinstance(v, trace.TraceValue))
def _SerializeTracesToDirPath(self, dir_path):
""" Serialize all trace values to files in dir_path and return a list of
file handles to those files. """
for value in self.FindAllTraceValues():
fh = value.Serialize(dir_path)
self._serialized_trace_file_ids_to_paths[fh.id] = fh.GetAbsPath()
def UploadTraceFilesToCloud(self, bucket):
for value in self.FindAllTraceValues():
value.UploadToCloud(bucket)
def UploadProfilingFilesToCloud(self, bucket):
for page, file_handle_list in self._pages_to_profiling_files.iteritems():
for file_handle in file_handle_list:
remote_path = ('profiler-file-id_%s-%s%-d%s' % (
file_handle.id,
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'),
random.randint(1, 100000),
file_handle.extension))
try:
cloud_url = cloud_storage.Insert(
bucket, remote_path, file_handle.GetAbsPath())
sys.stderr.write(
'View generated profiler files online at %s for page %s\n' %
(cloud_url, page.display_name))
self._pages_to_profiling_files_cloud_url[page].append(cloud_url)
except cloud_storage.PermissionError as e:
logging.error('Cannot upload profiling files to cloud storage due to '
' permission error: %s' % e.message)
| |
try:
set
except NameError:
from sets import Set as set
from django import template
from django.http import Http404
from django.core.paginator import Paginator, InvalidPage
from django.conf import settings
register = template.Library()
DEFAULT_PAGINATION = getattr(settings, 'PAGINATION_DEFAULT_PAGINATION', 20)
DEFAULT_WINDOW = getattr(settings, 'PAGINATION_DEFAULT_WINDOW', 4)
DEFAULT_ORPHANS = getattr(settings, 'PAGINATION_DEFAULT_ORPHANS', 0)
INVALID_PAGE_RAISES_404 = getattr(settings,
'PAGINATION_INVALID_PAGE_RAISES_404', False)
def do_autopaginate(parser, token):
"""
Splits the arguments to the autopaginate tag and formats them correctly.
"""
split = token.split_contents()
as_index = None
context_var = None
for i, bit in enumerate(split):
if bit == 'as':
as_index = i
break
if as_index is not None:
try:
context_var = split[as_index + 1]
except IndexError:
raise template.TemplateSyntaxError("Context variable assignment " +
"must take the form of {%% %r object.example_set.all ... as " +
"context_var_name %%}" % split[0])
del split[as_index:as_index + 2]
if len(split) == 2:
return AutoPaginateNode(split[1])
elif len(split) == 3:
return AutoPaginateNode(split[1], paginate_by=split[2],
context_var=context_var)
elif len(split) == 4:
try:
orphans = int(split[3])
except ValueError:
raise template.TemplateSyntaxError('Got %s, but expected integer.'
% split[3])
return AutoPaginateNode(split[1], paginate_by=split[2], orphans=orphans,
context_var=context_var)
else:
raise template.TemplateSyntaxError('%r tag takes one required ' +
'argument and one optional argument' % split[0])
class AutoPaginateNode(template.Node):
"""
Emits the required objects to allow for Digg-style pagination.
First, it looks in the current context for the variable specified, and using
that object, it emits a simple ``Paginator`` and the current page object
into the context names ``paginator`` and ``page_obj``, respectively.
It will then replace the variable specified with only the objects for the
current page.
.. note::
It is recommended to use *{% paginate %}* after using the autopaginate
tag. If you choose not to use *{% paginate %}*, make sure to display the
list of available pages, or else the application may seem to be buggy.
"""
def __init__(self, queryset_var, paginate_by=DEFAULT_PAGINATION,
orphans=DEFAULT_ORPHANS, context_var=None):
self.queryset_var = template.Variable(queryset_var)
if isinstance(paginate_by, int):
self.paginate_by = paginate_by
else:
self.paginate_by = template.Variable(paginate_by)
self.orphans = orphans
self.context_var = context_var
def render(self, context):
key = self.queryset_var.var
value = self.queryset_var.resolve(context)
if isinstance(self.paginate_by, int):
paginate_by = self.paginate_by
else:
paginate_by = self.paginate_by.resolve(context)
paginator = Paginator(value, paginate_by, self.orphans)
try:
page_obj = paginator.page(context['request'].page)
except InvalidPage:
if INVALID_PAGE_RAISES_404:
raise Http404('Invalid page requested. If DEBUG were set to ' +
'False, an HTTP 404 page would have been shown instead.')
context[key] = []
context['invalid_page'] = True
return ''
if self.context_var is not None:
context[self.context_var] = page_obj.object_list
else:
context[key] = page_obj.object_list
context['paginator'] = paginator
context['page_obj'] = page_obj
return ''
def paginate(context, window=DEFAULT_WINDOW, hashtag=''):
"""
Renders the ``pagination/pagination.html`` template, resulting in a
Digg-like display of the available pages, given the current page. If there
are too many pages to be displayed before and after the current page, then
elipses will be used to indicate the undisplayed gap between page numbers.
Requires one argument, ``context``, which should be a dictionary-like data
structure and must contain the following keys:
``paginator``
A ``Paginator`` or ``QuerySetPaginator`` object.
``page_obj``
This should be the result of calling the page method on the
aforementioned ``Paginator`` or ``QuerySetPaginator`` object, given
the current page.
This same ``context`` dictionary-like data structure may also include:
``getvars``
A dictionary of all of the **GET** parameters in the current request.
This is useful to maintain certain types of state, even when requesting
a different page.
"""
try:
paginator = context['paginator']
page_obj = context['page_obj']
page_range = paginator.page_range
# Calculate the record range in the current page for display.
records = {'first': 1 + (page_obj.number - 1) * paginator.per_page}
records['last'] = records['first'] + paginator.per_page - 1
if records['last'] + paginator.orphans >= paginator.count:
records['last'] = paginator.count
# First and last are simply the first *n* pages and the last *n* pages,
# where *n* is the current window size.
first = set(page_range[:window])
last = set(page_range[-window:])
# Now we look around our current page, making sure that we don't wrap
# around.
current_start = page_obj.number-1-window
if current_start < 0:
current_start = 0
current_end = page_obj.number-1+window
if current_end < 0:
current_end = 0
current = set(page_range[current_start:current_end])
pages = []
# If there's no overlap between the first set of pages and the current
# set of pages, then there's a possible need for elusion.
if len(first.intersection(current)) == 0:
first_list = list(first)
first_list.sort()
second_list = list(current)
second_list.sort()
pages.extend(first_list)
diff = second_list[0] - first_list[-1]
# If there is a gap of two, between the last page of the first
# set and the first page of the current set, then we're missing a
# page.
if diff == 2:
pages.append(second_list[0] - 1)
# If the difference is just one, then there's nothing to be done,
# as the pages need no elusion and are correct.
elif diff == 1:
pass
# Otherwise, there's a bigger gap which needs to be signaled for
# elusion, by pushing a None value to the page list.
else:
pages.append(None)
pages.extend(second_list)
else:
unioned = list(first.union(current))
unioned.sort()
pages.extend(unioned)
# If there's no overlap between the current set of pages and the last
# set of pages, then there's a possible need for elusion.
if len(current.intersection(last)) == 0:
second_list = list(last)
second_list.sort()
diff = second_list[0] - pages[-1]
# If there is a gap of two, between the last page of the current
# set and the first page of the last set, then we're missing a
# page.
if diff == 2:
pages.append(second_list[0] - 1)
# If the difference is just one, then there's nothing to be done,
# as the pages need no elusion and are correct.
elif diff == 1:
pass
# Otherwise, there's a bigger gap which needs to be signaled for
# elusion, by pushing a None value to the page list.
else:
pages.append(None)
pages.extend(second_list)
else:
differenced = list(last.difference(current))
differenced.sort()
pages.extend(differenced)
to_return = {
'MEDIA_URL': settings.MEDIA_URL,
'pages': pages,
'records': records,
'page_obj': page_obj,
'paginator': paginator,
'hashtag': hashtag,
'is_paginated': paginator.count > paginator.per_page,
}
if 'request' in context:
getvars = context['request'].GET.copy()
if 'page' in getvars:
del getvars['page']
if len(list(getvars.keys())) > 0:
to_return['getvars'] = "&%s" % getvars.urlencode()
else:
to_return['getvars'] = ''
return to_return
except KeyError as AttributeError:
return {}
register.inclusion_tag('pagination/pagination.html', takes_context=True)(
paginate)
register.tag('autopaginate', do_autopaginate)
| |
from datetime import datetime
from flask import render_template
import json
import requests
import pandas as pd
from settings import API_URL
# to avoid truncation
pd.set_option('display.max_colwidth', -1)
def convert_times(date_from, date_to):
date_from, date_to = str(date_from), str(date_to)
dt_start = datetime.strptime(date_from, '%Y-%m-%d')
dt_end = datetime.strptime(date_to, '%Y-%m-%d')
return dt_start, dt_end
# with open('mapping.json') as data_file:
# mapping = json.load(data_file)
def get_first_n(label, first_n, sep=None, last_n=None):
if not sep:
sep = "."
x = label.split(sep)
if last_n:
x = x[last_n:]
else:
x = x[:first_n]
return sep.join(x)
def _get_data_as_json(uri):
print("Request to TR-API", uri)
r = requests.get(uri)
out = None
if r.ok:
out = json.loads(r.text)
return out
def _get_data_as_dataframe(uri):
out = _get_data_as_json(uri)
if out:
out = pd.DataFrame(out)
return out
def get_jobs():
uri = "{}/jobs/".format(API_URL)
return _get_data_as_dataframe(uri)
def get_builds(job_label):
# get only completed builds
data_fields = "timestamp,result"
uri = "{}/builds/?building=0&job_label={}&data_fields={}".format(API_URL, job_label, data_fields)
df = _get_data_as_dataframe(uri)
# remove records without labels
df = df.dropna(subset=['label'])
df['temp'] = df['name'].apply(lambda x: str({"build": x.split(':')[-1], "job": ":".join(x.split(':')[:-1])}))
df = pd_spread_to_columns(df, 'temp')
if data_fields:
df = pd_spread_to_columns(df, 'data')
df['date'] = df['timestamp'].apply(
lambda x: pd.to_datetime(int(x)/1000, unit='s'))
# print(df.iloc[0, :])
return df
def create_product_list(df):
products = list()
columns = ["short_name", "url_y", "failCount", "passCount", "total"]
names = df['label'].unique()
for name in names:
df_jobs = df[df["label"] == name][columns]
df_jobs_sum = df_jobs.sum()
failed = int(df_jobs_sum['failCount'])
passed = int(df_jobs_sum['passCount'])
tot = failed + passed
jobs = df_jobs.to_dict(orient='list')
p = {"name": name, "jobs": jobs, "failed": failed, "tot": tot}
products.append(p)
return products
def pd_spread_to_columns(df, col_name):
existing_cols = df.columns
df2 = df[col_name].apply(lambda x: eval(x))
for k in df2.iloc[0].keys():
if k in existing_cols:
new_k = "{}_{}".format(col_name, k)
else:
new_k = k
df[new_k] = df2.apply(lambda x: x[k])
del df[col_name]
return df
def pd_embed_url(df, col_name, url_col_name, new_tab=True):
if new_tab:
new_tab = "\" target=\"_blank\">"
else:
new_tab = "\">"
df1 = "<a href=\"" + df[url_col_name].astype(str) + new_tab + df[col_name].astype(str) + "</a>"
df[col_name] = df1
del df[url_col_name]
return df
def pd_to_html(df, **kwargs):
cols = kwargs.get('cols')
if cols:
df = df[cols]
del kwargs['cols']
df_html = df.to_html(justify="left",
index=False,
classes="table table-striped table-hover table-condensed",
border=0, **kwargs)
return df_html
def get_test_reports(names=None):
"""
:param names: build names on report server
:return:
"""
fields = "suites,failCount,skipCount,duration,passCount"
uri = "{}/test_reports/?data_only=1&last=1&data_fields={}".format(API_URL, fields)
if names:
names_str = ",".join(names)
uri = "{}&names={}".format(uri, names_str)
df = _get_data_as_dataframe(uri)
test_reports = pd_spread_to_columns(df, 'data')
test_reports["total"] = test_reports["failCount"] + test_reports["passCount"]
return test_reports
def get_test_suites(name):
"""
Example:
/api/jenkins/test_reports/<test_report_name>/cases?cases_fields=status%2Cname%2Cduration%2CclassName
:param name: test report name
:return:
"""
options = "status,duration,name,className,age,errorDetails"
uri = "{}/test_reports/{}/cases?cases_fields={}".format(API_URL, name, options)
out = _get_data_as_json(uri)
if not out:
return None
cases = list()
for suite in out:
for case in suite['cases']:
case['suite_name'] = suite['name']
cases.append(case)
df = pd.DataFrame(cases)
df['T, s'] = df['duration'].astype(int)
return df
def home_view():
df_jobs = get_jobs()
if isinstance(df_jobs, pd.DataFrame):
df_jobs["short_name"] = df_jobs["name"].apply(lambda x: x.split(":")[-1])
df_test_reports = get_test_reports()
jobs_with_reports = pd.merge(df_jobs,
df_test_reports,
how="left",
left_on="name",
right_on="job")
products = create_product_list(jobs_with_reports)
else:
products = [
{"name": "Looks like no jenkins jobs are defined",
"jobs": []}
]
return render_template('index.html', products=products)
def platform_view(name):
"""
TODO:
1. get all jobs for label Maglev
2. get all builds for given job list
3. group results by version (label at build level)
:param name:
:return:
"""
df_builds_all = get_builds(job_label=name)
df_builds_all['branch'] = df_builds_all['label'].apply(get_first_n, args=(3,), sep=".")
# TODO retain only latest build that has test results
# TODO the latest build might not have test results
df_builds = df_builds_all.groupby(['branch', 'job']).max().reset_index()
# now populate with test results
build_names = df_builds['name'].tolist()
df_test_reports = get_test_reports(names=build_names)
df_test_reports['build_name'] = df_test_reports['name'].apply(get_first_n, args=(-1,), sep=":")
df_builds = pd.merge(df_builds,
df_test_reports,
how="left",
left_on="name",
right_on="build_name")
# df_builds = pd_embed_url(df_builds, 'name_x', 'url_x')
df_builds['tr_url'] = df_builds.apply(lambda x: "/platform/{}/label/{}/test_report/{}".format(name, x['label'], x['name_y']), axis=1)
df_builds = pd_embed_url(df_builds, 'name_x', 'tr_url', new_tab=False)
df_builds = df_builds.sort_values(['label', 'failCount'], ascending=False)
# print(df_builds.iloc[0, :])
cols = ["label", "name_x", "failCount", "total", "duration", "date", "result"]
branches = df_builds['branch'].unique()
builds_htmls = list()
for branch in branches:
df = df_builds[df_builds['branch'] == branch]
failed = df['failCount'].sum()
total = df['total'].sum()
builds_html = pd_to_html(df, escape=False, cols=cols)
builds_htmls.append(
{
"branch": branch,
"failed": failed,
"total": total,
"html": builds_html
}
)
return render_template('platform.html', name=name, data=builds_htmls)
def branch_view(platform, name):
return render_template('branch.html', platform=platform, name=name)
def test_report_view(platform, label, name):
df_suites = get_test_suites(name)
if df_suites is not None:
# print(df_suites.iloc[0, :])
df_suites = df_suites[(df_suites["status"] == "FAILED") | (df_suites["status"] == "REGRESSION")]
df_suites = df_suites.sort_values(['className', 'name'], ascending=True)
#TODO map className to Components
cols = ["age", "status", "name", "T, s", "errorDetails"]
suites_html = pd_to_html(df_suites, cols=cols, escape=True)
else:
suites_html = ""
return render_template('test_report.html', platform=platform, label=label, name=name, suites_html=suites_html)
| |
#!/usr/bin/python
# Copyright 2013 the Neutrino authors (see AUTHORS).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
# Utilities for parsing command-line options into plankton.
from . import codec
import collections
import re
import sys
# Parsing failed.
class OptionSyntaxError(Exception):
def __init__(self, token):
super(OptionSyntaxError, self).__init__(token)
self.token = token
# An individual command-line argument token.
class Token(object):
_VALUE = 'value'
_PUNCTUATION = 'punctuation'
_END = 'end'
def __init__(self, type, value):
self.type = type
self.value = value
# Is this an atomic value?
def is_value(self):
return self.type == Token._VALUE
# Is this the specified punctuation value?
def is_punctuation(self, value):
return self.is_any_punctuation() and (self.value == value)
# Is this some punctuation?
def is_any_punctuation(self):
return self.type == Token._PUNCTUATION
# Is this the end token?
def is_end(self):
return self.type == Token._END
# Basic support for scanning over a stream of elements, in this case characters
# in a string or tokens in a list of tokens.
class AbstractScanner(object):
def __init__(self, elements):
self.elements = elements
self.cursor = 0
# Is there more input?
def has_more(self):
return self.cursor < len(self.elements)
# Returns the current character.
def current(self):
return self.elements[self.cursor]
# Advances one character ahead.
def advance(self):
assert self.has_more()
self.cursor += 1
# Returns a subset of the elements from 'start' to 'end'.
def slice(self, start, end):
return self.elements[start:end]
# Tokenizer that chops raw command-line source code (individual options and
# arguments) into tokens.
class Tokenizer(AbstractScanner):
_PUNCTUATORS = '[],():'
_DOUBLE_PUNCTUATORS = '-{}'
def __init__(self, source):
super(Tokenizer, self).__init__(source)
self.skip_whitespace()
# Skips over any whitespace characters from the current position.
def skip_whitespace(self):
while self.has_more() and self.is_whitespace(self.current()):
self.advance()
_WHITESPACE = re.compile(r'\s')
# Is the given character whitespace?
def is_whitespace(self, c):
return Tokenizer._WHITESPACE.match(c)
_SYMBOL_EXTRA_CHARS = '_-/.'
def is_symbol(self, c):
return c.isalpha() or c.isdigit() or (c in Tokenizer._SYMBOL_EXTRA_CHARS)
# Returns the next token from the input, advancing past it.
def scan_next_token(self):
assert self.has_more()
current = self.current()
if current.isdigit():
result = self.scan_next_number()
elif current == '"':
result = self.scan_next_string()
elif current in Tokenizer._PUNCTUATORS:
result = Token(Token._PUNCTUATION, current)
self.advance()
elif current in Tokenizer._DOUBLE_PUNCTUATORS:
result = self.scan_next_double_punctuator()
elif self.is_symbol(current):
result = self.scan_next_symbol()
else:
raise self.new_syntax_error()
self.skip_whitespace()
return result
# Returns the next token which must be a number.
def scan_next_number(self):
start = self.cursor
while self.has_more() and self.current().isdigit():
self.advance()
value = self.slice(start, self.cursor)
return Token(Token._VALUE, int(value))
_RESERVED_SYMBOLS = {
'null': None,
'true': True,
'false': False
}
# Returns the next token which must be a symbol.
def scan_next_symbol(self):
start = self.cursor
while self.has_more() and self.is_symbol(self.current()):
self.advance()
value = self.slice(start, self.cursor)
if value in Tokenizer._RESERVED_SYMBOLS:
value = Tokenizer._RESERVED_SYMBOLS[value]
return Token(Token._VALUE, value)
# Returns the next token which must be a string.
def scan_next_string(self):
assert self.current() == '"'
self.advance()
start = self.cursor
while self.has_more() and self.current() != '"':
self.advance()
if not self.has_more():
raise self.new_syntax_error()
end = self.cursor
assert self.current() == '"'
self.advance()
value = self.slice(start, end)
return Token(Token._VALUE, value)
def scan_next_double_punctuator(self):
c = self.current()
self.advance()
if self.has_more() and self.current() == c:
self.advance()
return Token(Token._PUNCTUATION, c + c)
else:
return Token(Token._PUNCTUATION, c)
def new_syntax_error(self):
current = self.current()
return OptionSyntaxError(current)
# Returns a list of the tokens from the given source string.
@staticmethod
def tokenize(source):
tokenizer = Tokenizer(source)
result = []
while tokenizer.has_more():
result.append(tokenizer.scan_next_token())
return result
def value_to_string(value):
if isinstance(value, collections.OrderedDict):
mappings = ['--%s %s' % (value_to_string(k), value_to_string(v))
for (k, v) in value.items()]
return '{%s}' % ' '.join(mappings)
elif isinstance(value, list):
return '[%s]' % ' '.join(map(value_to_string, value))
else:
return str(value)
# An individual option element representing an argument (as opposed to a flag).
@codec.serializable(codec.EnvironmentReference.path('options', 'ArgumentElement'))
class ArgumentElement(object):
@codec.field('value')
def __init__(self, value):
self.value = value
def apply(self, options):
options.arguments.append(self.value)
def __eq__(self, that):
if not isinstance(that, ArgumentElement):
return False
return self.value == that.value
def __hash__(self):
return hash(self.value)
def __str__(self):
return '%s' % value_to_string(self.value)
@codec.serializable(codec.EnvironmentReference.path('options', 'FlagElement'))
class FlagElement(object):
@codec.field('key')
@codec.field('value')
def __init__(self, key, value):
self.key = key
self.value = value
def apply(self, options):
options.flags[self.key] = self.value
def __eq__(self, that):
if not isinstance(that, FlagElement):
return False
return (self.key == that.key) and (self.value == that.value)
def __hash__(self):
return hash(self.key) ^ id(self.value)
def __str__(self):
if self.value is None:
return '--%s' % value_to_string(self.key)
else:
return '--%s %s' % (value_to_string(self.key), value_to_string(self.value))
# A collection of command-line options. An options object provides access to
# the arguments as a list, the flags by name, and a more naked view of the
# options as a list of elements, each either an argument or a flag.
@codec.serializable(codec.EnvironmentReference.path('options', 'Options'))
class Options(object):
@codec.field('elements')
def __init__(self, elements=None):
if elements is None:
self.elements = []
else:
self.elements = elements
def add_argument(self, value):
self.elements.append(ArgumentElement(value))
def add_flag(self, key, value):
self.elements.append(FlagElement(key, value))
def get_flags(self):
self.ensure_processed()
return self.flags_proxy
def get_flag_map(self):
self.ensure_processed()
return self.flags
def get_arguments(self):
self.ensure_processed()
return self.arguments
def ensure_processed(self):
if hasattr(self, 'has_been_processed') and self.has_been_processed:
return
self.has_been_processed = True
outer = self
class FlagsProxy(object):
def __getattr__(self, name):
return self.get(name, None)
def get(self, name, default=None):
return outer.get_flag_value(name, default)
def __contains__(self, name):
return name in outer.flags
self.flags = collections.OrderedDict()
self.flags_proxy = FlagsProxy()
self.arguments = []
for element in self.elements:
element.apply(self)
def get_flag_value(self, name, default):
if name in self.flags:
return self.flags[name]
elif ('_' in name):
return self.get_flag_value(name.replace('_', '-'), default)
else:
return default
# Encodes this option set as base64 plankton.
def base64_encode(self):
encoder = codec.Encoder()
return "p64/%s" % encoder.base64encode(self)
# Parses a base64 string into an options object.
@staticmethod
def base64_decode(string):
if string.startswith('p64/'):
decoder = codec.Decoder()
return decoder.base64decode(string[4:])
else:
return string
def __str__(self):
return '{{%s}}' % " ".join(map(str, self.elements))
def __eq__(self, that):
if not isinstance(that, Options):
return False
if len(self.elements) != len(that.elements):
return False
for i in range(0, len(self.elements)):
if not (self.elements[i] == that.elements[i]):
return False
return True
def __hash__(self):
return hash(tuple(self.elements))
# Command-line option parser.
class Parser(AbstractScanner):
def __init__(self, tokens):
super(Parser, self).__init__(tokens)
# Is there more input?
def has_more(self):
return not self.current().is_end()
def expect_punctuation(self, value):
if not self.current().is_punctuation(value):
raise self.new_syntax_error()
self.advance()
# <options>
# -> <option>*
def parse_options(self):
options = Options()
while self.has_more() and not self.current().is_punctuation('}}'):
self.parse_option(options)
return options
# <nested options>
# -> "{{" <options> "}}"
def parse_nested_options(self):
self.expect_punctuation('{{')
result = self.parse_options()
self.expect_punctuation('}}')
return result
# <option>
# -> <expression>
# -> <map entry>
def parse_option(self, options):
current = self.current()
if current.is_punctuation('--'):
(key, value) = self.parse_map_entry()
options.add_flag(key, value)
else:
options.add_argument(self.parse_expression())
# <expression>
# -> <atomic expression>
def parse_expression(self):
return self.parse_atomic_expression()
# Are we at the beginning of an atomic expression?
def at_atomic_expression(self):
current = self.current()
if current.is_value():
return True
if not current.is_any_punctuation():
return False
return current.value in ['[', '{', '(', '{{']
# <atomic expression>
# -> <value>
# -> <list>
# -> <nested options>
# -> "(" <expression> ")"
def parse_atomic_expression(self):
current = self.current()
if current.is_value():
return self.parse_value()
elif current.is_punctuation('['):
return self.parse_list()
elif current.is_punctuation('{'):
return self.parse_map()
elif current.is_punctuation('{{'):
return self.parse_nested_options()
elif current.is_punctuation('('):
self.expect_punctuation('(')
result = self.parse_expression()
self.expect_punctuation(')')
return result
else:
raise self.new_syntax_error()
# <value>
def parse_value(self):
assert self.current().is_value()
value = self.current().value
self.advance()
return value
# <list>
# -> "[" <atomic expression>* "]"
def parse_list(self):
self.expect_punctuation('[')
result = []
while not self.current().is_punctuation(']'):
next = self.parse_atomic_expression()
result.append(next)
self.expect_punctuation(']')
return result
# <map>
# -> "{" <map entry>* "}"
def parse_map(self):
self.expect_punctuation('{')
result = collections.OrderedDict()
while not self.current().is_punctuation('}'):
(key, value) = self.parse_map_entry()
result[key] = value
self.expect_punctuation('}')
return result
# <map entry>
# -> <key> <atomic expression>?
def parse_map_entry(self):
key = self.parse_key()
if self.at_atomic_expression():
value = self.parse_atomic_expression()
else:
value = None
return (key, value)
# <key>
# -> "--" <atomic expression>
def parse_key(self):
self.expect_punctuation('--')
return self.parse_atomic_expression()
def new_syntax_error(self):
return OptionSyntaxError(self.current().value)
# Tokenizes the given list of arguments.
def tokenize_arguments(args):
tokens = []
for arg in args:
tokens += Tokenizer.tokenize(arg)
tokens.append(Token(Token._END, None))
return tokens
# Parses the given arguments, returning an Options object describing the
# contents.
def parse(args):
tokens = tokenize_arguments(args)
return Parser(tokens).parse_options()
| |
"""Script to execute CPU and get a best move using Minimax algorithm"""
import copy
from common import board_full, win
OPP = [1, 0]
def eval_rc(board, player, glength, roc):
"""Returns row or column score"""
score_sum = 0
clone_board = board
if roc == "c":
clone_board = [[board[j][i] for j in xrange(glength)] for i in xrange(glength)]
for i in xrange(glength):
score = 0
if clone_board[i][0] == player:
score = 1
else:
score = -1
for j in xrange(1, glength):
if clone_board[i][j] == player and score > 0:
score = score * 10
elif board[i][j] == player and score < 0:
score = 0
break
elif board[i][j] == player:
score = 1
elif board[i][j] == OPP[player] and score < 0:
score = score * 10
elif board[i][j] == OPP[player] and score > 0:
score = 0
break
elif board[i][j] == OPP[player]:
score = 1
score_sum = score_sum + score
return score_sum
def eval_diags(board, player, glength):
"""Returns diagonal score"""
score = 0
if board[0][0] == player:
score = 1
elif board[0][0] == OPP[player]:
score = -1
for i in range(1, glength):
if board[i][i] == player and score > 0:
score = score * 10
elif board[i][i] == player and score < 0:
score = 0
break
elif board[i][i] == player:
score = 1
elif board[i][i] == OPP[player] and score < 0:
score = score * 10
elif board[i][i] == OPP[player] and score > 0:
score = 0
break
elif board[i][i] == OPP[player]:
score = 1
score_sum = score
score = 0
if board[glength - 1][0] == player:
score = 1
else:
score = -1
for i in range(1, glength):
if board[glength - i - 1][i] == player and score > 0:
score = score * 10
elif board[glength - i - 1][i] == player and score < 0:
score = 0
break
elif board[glength - i - 1][i] == player:
score = 1
elif board[glength - i - 1][i] == OPP[player] and score < 0:
score = score * 10
elif board[glength - i - 1][i] == OPP[player] and score > 0:
score = 0
break
elif board[glength - i - 1][i] == OPP[player]:
score = 1
score_sum = score_sum + score
return score_sum
def evaluate(board, player, glength):
"""Evaluates the score for the player based on horizontal, vertical and diagonal advantages"""
score = eval_rc(board, player, glength, "r")
score += eval_rc(board, player, glength, "c")
score += eval_diags(board, player, glength)
return score
def get_moves(board, glength):
"""Returns all possible moves"""
moves = []
for i in range(glength):
for j in range(glength):
if board[i][j] == -1:
moves = moves + [[i, j]]
return moves
def gen_board(board, player, pos):
"""Returns a new clone board by playing a move"""
new_board = copy.deepcopy(board)
new_board[pos[0]][pos[1]] = player
return new_board
def if_second_move(board, glength):
"""Returns True if it is the second move of the game, otherwise False"""
check = 0
for i in xrange(glength):
for j in xrange(glength):
if board[i][j] == 0 or board[i][j] == 1:
check += 1
if check > 1:
return False
return True
def minimax(board, player, depth, glength):
"""Returns the best move for the CPU by traversing
all best CPU and worst user moves with depth
"""
moves = get_moves(board, glength)
if not moves:
return None
if len(moves) == 1 or if_second_move(board, glength):
return moves[0]
best_move = moves[0]
best_score = 0.0
for move in moves:
clone_board = gen_board(board, player, move)
if win(clone_board, player, glength):
return move
for move in moves:
clone_board = gen_board(board, OPP[player], move)
if win(clone_board, OPP[player], glength):
return move
for move in moves:
clone_board = gen_board(board, player, move)
if win(clone_board, player, glength):
return move
score = min_play(clone_board, OPP[player], depth, glength)
if best_score < score:
best_score = score
best_move = move
return best_move
def min_play(board, player, depth, glength):
"""Returns the worst score for the player"""
moves = get_moves(board, glength)
if not moves or depth == 0:
return evaluate(board, player, glength)
best_score = float('inf')
for move in moves:
clone_board = gen_board(board, player, move)
if win(clone_board, player, glength):
return evaluate(clone_board, player, glength)
score = max_play(clone_board, OPP[player], depth - 1, glength)
if score < best_score:
best_score = score
return best_score
def max_play(board, player, depth, glength):
"""Returns the best score for the CPU"""
moves = get_moves(board, glength)
if not moves or depth == 0:
return evaluate(board, player, glength)
best_score = float('-inf')
for move in moves:
clone_board = gen_board(board, player, move)
if win(clone_board, player, glength):
return evaluate(clone_board, player, glength)
score = max_play(clone_board, OPP[player], depth - 1, glength)
if score > best_score:
best_score = score
return best_score
| |
import itertools
import logging
import os
import sys
from contextlib import closing
import flask
from flask import render_template
from flask import request
from packaging.version import parse
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class PluginResult(Base):
"""Results of testing a pytest plugin against a pytest and python version."""
__tablename__ = "results"
id = Column(Integer, primary_key=True)
name = Column(String, index=True)
version = Column(String, index=True)
env = Column(String)
pytest = Column(String)
status = Column(String)
output = Column(String)
description = Column(String, default="")
def as_dict(self):
return {
"name": self.name,
"version": self.version,
"env": self.env,
"pytest": self.pytest,
"status": self.status,
"output": self.output,
"description": self.description,
}
def __repr__(self):
attrs = [f"{k}={v!r}" for k, v in self.as_dict().items()]
return f"PluginResult({', '.join(attrs)})"
def __eq__(self, other):
if not isinstance(other, PluginResult):
return NotImplemented
return self.as_dict() == other.as_dict()
app = flask.Flask("plugincompat")
def get_python_versions():
"""
Python versions we are willing to display on the page, in order to ignore
old and incomplete results.
"""
return {"py36", "py37", "py38"}
def get_pytest_versions():
"""
Same as `get_python_versions`, but for pytest versions.
"""
return {"6.0.1"}
class PlugsStorage:
"""
API around a MongoDatabase used to add and obtain test results for pytest plugins.
"""
def __init__(self, url=None):
url = url or os.environ["DATABASE_URL"]
self._engine = create_engine(url)
Base.metadata.create_all(self._engine)
self._session_maker = sessionmaker(autocommit=False, autoflush=False, bind=self._engine)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._engine.dispose()
def add_test_result(self, payload):
"""
:param result: adds results from a compatibility test for a pytest plugin.
The results is given as a dict containing the following keys:
* "name": name of the library;
* "version": version of the library;
* "env": python environment of the test. Examples: "py27", "py32", "py33".
* "pytest": pytest version of the test. Examples: "2.3.5"
* "status": "ok" or "fail".
* "output": string with output from running tox commands.
* "description": description of this package (optional).
"""
expected = {"name", "version", "env", "pytest", "status"}
if not expected.issubset(payload):
raise TypeError("Invalid keys given: %s" % payload.keys())
with closing(self._session_maker()) as session:
result = (
session.query(PluginResult)
.filter(PluginResult.name == payload["name"])
.filter(PluginResult.version == payload["version"])
.filter(PluginResult.env == payload["env"])
.filter(PluginResult.pytest == payload["pytest"])
.first()
)
if result is None:
result = PluginResult(**payload)
result.status = payload["status"]
result.output = payload.get("output", "")
result.description = payload.get("description", "")
session.add(result)
session.commit()
def drop_all(self):
Base.metadata.drop_all(self._engine)
Base.metadata.create_all(self._engine)
def get_all_results(self):
with closing(self._session_maker()) as session:
return [x.as_dict() for x in session.query(PluginResult).all()]
def get_test_results(self, name, version):
"""
searches the database for all test results given library name and
version. If version is LATEST_VERSION, only results for highest
version number are returned.
"""
with closing(self._session_maker()) as session:
q = session.query(PluginResult).filter(PluginResult.name == name)
if version != LATEST_VERSION:
q = q.filter(PluginResult.version == version)
results = [p.as_dict() for p in q.all()]
if version != LATEST_VERSION:
return results
else:
return filter_latest_results(results)
def _filter_entry_ids(self, entries):
"""
removes special "_id" from entries returned from MongoDB
"""
result = []
for entry in entries:
del entry["_id"]
result.append(entry)
return result
_storage = None
def get_storage_for_view():
"""
Returns a storage instance to be used by the view functions. This exists
solely we can mock this function during testing.
"""
global _storage
if _storage is None:
_storage = PlugsStorage()
return _storage
def authenticate(json_data):
"""Ensure the posted data contains the correct secret"""
if json_data.get("secret") != os.environ["POST_KEY"]:
flask.abort(401)
@app.route("/", methods=["GET", "POST"])
def index():
storage = get_storage_for_view()
if request.method == "POST":
data = request.get_json()
authenticate(data)
results = data["results"]
if not isinstance(results, list):
results = [results]
for result in results:
storage.add_test_result(result)
return "OK, posted {} entries".format(len(results))
else:
all_results = storage.get_all_results()
if request.args.get("json", False):
response = flask.jsonify(data=all_results)
return response
else:
if all_results:
namespace = get_namespace_for_rendering(all_results)
return render_template("index.html", **namespace)
else:
return "Database is empty"
def filter_latest_results(all_results):
"""
given a list of test results read from the db, filter out only the ones
for highest library version available in the database.
"""
latest_versions = set(get_latest_versions((x["name"], x["version"]) for x in all_results))
for result in all_results:
if (result["name"], result["version"]) in latest_versions:
yield result
def get_namespace_for_rendering(all_results):
# python_versions, lib_names, pytest_versions, statuses, latest_pytest_ver
python_versions = get_python_versions()
lib_names = set()
pytest_versions = get_pytest_versions()
statuses = {}
outputs = {}
descriptions = {}
latest_results = filter_latest_results(all_results)
for result in latest_results:
ignore = result["env"] not in python_versions or result["pytest"] not in pytest_versions
if ignore:
continue
lib_name = "{}-{}".format(result["name"], result["version"])
lib_names.add(lib_name)
key = (lib_name, result["env"], result["pytest"])
statuses[key] = result["status"]
outputs[key] = result.get("output", NO_OUTPUT_AVAILABLE)
if not descriptions.get(lib_name):
descriptions[lib_name] = result.get("description", "")
latest_pytest_ver = max(pytest_versions, key=parse)
return dict(
python_versions=sorted(python_versions),
lib_names=sorted(lib_names),
pytest_versions=sorted(pytest_versions),
statuses=statuses,
outputs=outputs,
descriptions=descriptions,
latest_pytest_ver=latest_pytest_ver,
)
def get_latest_versions(names_and_versions):
"""
Returns an iterator of (name, version) from the given list of (name,
version), but returning only the latest version of the package.
"""
names_and_versions = sorted((name, parse(version)) for (name, version) in names_and_versions)
for name, grouped_versions in itertools.groupby(names_and_versions, key=lambda x: x[0]):
name, loose_version = list(grouped_versions)[-1]
yield name, str(loose_version)
@app.route("/status")
@app.route("/status/<name>")
def get_status_image(name=None):
py = request.args.get("py")
pytest = request.args.get("pytest")
if name and py and pytest:
status = get_field_for(name, py, pytest, "status")
if not status:
status = "unknown"
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, "static", "%s.png" % status)
response = flask.make_response(open(filename, "rb").read())
response.content_type = "image/png"
return response
else:
if name is None:
name = "pytest-pep8-1.0.5"
name = name.rsplit("-", 1)[0]
return render_template("status_help.html", name=name)
@app.route("/output/<name>")
def get_output(name):
py = request.args.get("py")
pytest = request.args.get("pytest")
if name and py and pytest:
output = get_field_for(name, py, pytest, "output")
status_code = 200
if not output:
output = NO_OUTPUT_AVAILABLE
status_code = 404
response = flask.make_response(output)
response.content_type = "text/plain"
response.content_type = "text/plain"
response.status_code = status_code
return response
else:
return 'Specify "py" and "pytest" parameters'
def get_field_for(fullname, env, pytest, field_name):
storage = get_storage_for_view()
name, version = fullname.rsplit("-", 1)
for test_result in storage.get_test_results(name, version):
if test_result["env"] == env and test_result["pytest"] == pytest:
return test_result.get(field_name, None)
# text returned when an entry in the database lacks an "output" field
NO_OUTPUT_AVAILABLE = "<no output available>"
LATEST_VERSION = "latest"
def main():
app.debug = True
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.ERROR)
app.run(host="127.0.0.1", port=int(os.environ.get("PORT", "5000")))
if __name__ == "__main__":
main()
| |
#!/usr/bin/env python
"""
Copyright 2016 Brian Quach
Licensed under MIT (https://github.com/brianquach/udacity-nano-fullstack-conference/blob/master/LICENSE) # noqa
"""
import endpoints
from protorpc import message_types
from protorpc import remote
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from enum import HandState
from form import CancelGameForm
from form import CardForm
from form import GameForm
from form import GameForms
from form import GameHistoryForm
from form import GameHistoryForms
from form import NewGameForm
from form import PlayerHandForm
from form import PlayerHandRequest
from form import PlayerMoveForm
from form import PlayerName
from form import PlayerRankForm
from form import PlayerRankForms
from form import StringMessage
from form import UserForm
from game import Poker
from model import Game
from model import Hand
from model import User
from utility import get_by_urlsafe
@endpoints.api(name='poker', version='v1')
class FiveCardPokerAPI(remote.Service):
"""An API for a two-player five card poker game."""
@endpoints.method(
request_message=UserForm,
response_message=StringMessage,
path='user/create',
name='createUser',
http_method='POST'
)
def create_user(self, request):
"""Create a player. Username must be unique."""
# Code Citation:
# https://github.com/udacity/FSND-P4-Design-A-Game/blob/master/Skeleton%20Project%20Guess-a-Number/api.py # noqa
if not request.name:
raise endpoints.BadRequestException('A name is required.')
if not request.email:
raise endpoints.BadRequestException('An email is required.')
if User.query(User.name == request.name).get():
raise endpoints.ConflictException(
'A User with that name already exists!'
)
user = User(name=request.name, email=request.email)
user.put()
return StringMessage(
message='User {0} created!'.format(request.name)
)
@endpoints.method(
request_message=NewGameForm,
response_message=GameForm,
path='game/new',
name='newGame',
http_method='POST'
)
def new_game(self, request):
"""Start a new five card poker game"""
player_one = User.query(User.name == request.player_one).get()
player_two = User.query(User.name == request.player_two).get()
err_msg = '{0} does not exist!'
if not player_one:
raise endpoints.NotFoundException(
err_msg.format(request.player_one)
)
if not player_two:
raise endpoints.NotFoundException(
err_msg.format(request.player_two)
)
game_id = Game.allocate_ids(size=1)[0]
game = Poker.new_game(player_one.key, player_two.key, game_id)
return game.to_form()
@endpoints.method(
request_message=PlayerMoveForm,
response_message=StringMessage,
path='game/action',
name='makeMove',
http_method='PUT'
)
def make_move(self, request):
"""Make a move."""
game = get_by_urlsafe(request.game_urlsafe_key, Game)
player = User.query(User.name == request.player).get()
if not player:
raise endpoints.NotFoundException(
'{0} does not exist!'.format(request.player)
)
if game.player_one != player.key and game.player_two != player.key:
raise endpoints.ForbiddenException(
'{0} is not part of this game!'.format(request.player)
)
if game.active_player != player.key:
raise endpoints.ForbiddenException(
'It is not your turn {0}'.format(request.player)
)
hand = Poker.make_move(game, player, request.card_ids_to_exchange)
return StringMessage(
message='Your move has been made here is your final hand: {0}. '
'Good luck!'.format(str(hand))
)
@endpoints.method(
request_message=PlayerName,
response_message=GameForms,
path='user/games',
name='getUserGames',
http_method='GET'
)
def get_user_games(self, request):
"""Get all active user games."""
player = User.query(User.name == request.player).get()
if not player:
raise endpoints.NotFoundException(
'{0} does not exist!'.format(request.player)
)
games = Game.query(
ndb.AND(
Game.game_over == False, # noqa
ndb.OR(
Game.player_one == player.key,
Game.player_two == player.key
)
)
)
return GameForms(
games=[game.to_form() for game in games]
)
@endpoints.method(
request_message=CancelGameForm,
response_message=StringMessage,
path='user/cancel-game',
name='cancelGame',
http_method='PUT'
)
def cancel_game(self, request):
"""Player forfeits game."""
game = get_by_urlsafe(request.game_urlsafe_key, Game)
player = User.query(User.name == request.player).get()
if not player:
raise endpoints.NotFoundException(
'{0} does not exist!'.format(request.player)
)
if game.player_one != player.key and game.player_two != player.key:
raise endpoints.ForbiddenException(
'{0} is not part of this game!'.format(request.player)
)
if game.player_one == player.key:
game.winner = game.player_two
else:
game.winner = game.player_one
game.game_over = True
game.is_forfeit = True
game.active_player = None
game.put()
Poker.update_player_stats(game)
# Notify the opponent that they have won
taskqueue.add(
url='/tasks/send_player_forfeit_email',
params={
'game_key': game.key.urlsafe(),
'winner_key': game.winner.urlsafe(),
'loser_name': player.name
}
)
return StringMessage(message='You have forfeited the game!')
@endpoints.method(
request_message=message_types.VoidMessage,
response_message=PlayerRankForms,
path='user/ranking',
name='getUserRankings',
http_method='GET'
)
def get_user_rankings(self, request):
"""Get player stats and ranking based on total points earned."""
player_rankings = User.query().order(-User.points)
player_rank = 1
player_rank_forms = []
for player in player_rankings:
player_stats = '{0}-{1}-{2} (Wins-Ties-Losses)'.format(
player.wins, player.ties, player.losses
)
player_rank_forms.append(
PlayerRankForm(
name=player.name,
stats=player_stats,
points=player.points,
rank=player_rank
)
)
player_rank += 1
return PlayerRankForms(
player_ranks=player_rank_forms
)
@endpoints.method(
request_message=PlayerName,
response_message=GameHistoryForms,
path='user/history',
name='getGameHistory',
http_method='GET'
)
def get_game_history(self, request):
"""Get player game history."""
player = User.query(User.name == request.player).get()
if not player:
raise endpoints.NotFoundException(
'{0} does not exist!'.format(request.player)
)
games = Game.query(
ndb.AND(
Game.game_over == True, # noqa
ndb.OR(
Game.player_one == player.key,
Game.player_two == player.key
)
)
)
game_histories = []
for game in games:
player_one = game.player_one.get()
player_two = game.player_two.get()
if game.is_forfeit:
game_histories.append(
GameHistoryForm(
game_urlsafe_key=game.key.urlsafe(),
player_one=player_one.name,
player_two=player_two.name,
is_forfeit=game.is_forfeit,
winner=game.winner.get().name
)
)
else:
p1_hands = Hand.query(
Hand.game == game.key,
Hand.player == player_one.key
)
p1_hands = Poker.get_player_start_end_hands(p1_hands)
p2_hands = Hand.query(
Hand.game == game.key,
Hand.player == player_two.key
)
p2_hands = Poker.get_player_start_end_hands(p2_hands)
game_histories.append(
GameHistoryForm(
game_urlsafe_key=game.key.urlsafe(),
player_one=player_one.name,
player_one_start_hand=repr(p1_hands[0]),
player_one_end_hand=repr(p1_hands[1]),
player_two=player_two.name,
player_two_start_hand=repr(p2_hands[0]),
player_two_end_hand=repr(p2_hands[1]),
is_forfeit=game.is_forfeit,
winner=game.winner.get().name
)
)
return GameHistoryForms(
games=game_histories
)
@endpoints.method(
request_message=PlayerHandRequest,
response_message=PlayerHandForm,
path='game/user/hand',
name='getUserHand',
http_method='GET'
)
def get_user_hand(self, request):
"""Get player's most recent hand state for a given game."""
game = get_by_urlsafe(request.game_urlsafe_key, Game)
player = User.query(User.name == request.player).get()
if not player:
raise endpoints.NotFoundException(
'{0} does not exist!'.format(request.player)
)
if game.player_one != player.key and game.player_two != player.key:
raise endpoints.ForbiddenException(
'{0} is not part of this game!'.format(request.player)
)
def get_card_form(hand):
card_forms = []
hand = Poker.load_player_hand(hand.hand)
for card in hand:
card_forms.append(
CardForm(name=card.name, suit=card.suit, card_id=card.id)
)
return card_forms
cards = []
state = 'STARTING'
hands = Hand.query(
Hand.game == game.key, Hand.player == player.key
).fetch()
if len(hands) == 1:
cards = get_card_form(hands[0])
else:
state = 'ENDING'
for hand in hands:
if hand.state == HandState.ENDING.name:
cards = get_card_form(hand)
return PlayerHandForm(
name=player.name,
cards=cards,
state=state
)
api = endpoints.api_server([FiveCardPokerAPI])
| |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import uuid
from google.api_core.exceptions import NotFound
from google.cloud import bigquery
from google.cloud import dataproc_v1 as dataproc
from google.cloud import storage
import pandas as pd
import pytest
# GCP project
PROJECT_ID = os.environ["GOOGLE_CLOUD_PROJECT"]
TEST_ID = uuid.uuid4()
# Google Cloud Storage constants
BUCKET_NAME = f"process-test-code-{TEST_ID}"
BUCKET_BLOB = "process.py"
# Big Query constants
BQ_DATASET = f"{PROJECT_ID}.process_test_{str(TEST_ID).replace('-', '_')}"
BQ_TABLE = f"{BQ_DATASET}.dirty_data"
CSV_FILE = "testing_data/raw_data.csv"
# Dataproc constants
DATAPROC_CLUSTER = f"process-test-{TEST_ID}"
CLUSTER_REGION = "us-central1"
CLUSTER_IMAGE = "2.0-debian10"
CLUSTER_CONFIG = { # Dataproc cluster configuration
"project_id": PROJECT_ID,
"cluster_name": DATAPROC_CLUSTER,
"config": {
"gce_cluster_config": {
"zone_uri": "",
"metadata": {"PIP_PACKAGES": "google-cloud-storage"},
},
# We recommend these settings for running our code
# We use a less robust machine type for testing purposes
# "master_config": {"num_instances": 1, "machine_type_uri": "n1-standard-8"},
# "worker_config": {"num_instances": 6, "machine_type_uri": "n1-standard-8"},
"master_config": {"num_instances": 1, "machine_type_uri": "n1-standard-4"},
"worker_config": {"num_instances": 2, "machine_type_uri": "n1-standard-4"},
"software_config": {
"image_version": CLUSTER_IMAGE,
},
},
}
DATAPROC_JOB = { # Dataproc job configuration
"placement": {"cluster_name": DATAPROC_CLUSTER},
"pyspark_job": {
"main_python_file_uri": f"gs://{BUCKET_NAME}/{BUCKET_BLOB}",
"args": [BUCKET_NAME, BQ_TABLE, "--dry-run"],
"jar_file_uris": ["gs://spark-lib/bigquery/spark-bigquery-latest_2.12.jar"],
},
}
@pytest.fixture(autouse=True)
def setup_and_teardown_table():
bq_client = bigquery.Client()
# Create dataset and load table
dataset = bigquery.Dataset(BQ_DATASET)
dataset = bq_client.create_dataset(dataset)
# Load table from dataframe
df = pd.read_csv(CSV_FILE)
job_config = bigquery.LoadJobConfig(
autodetect=True, write_disposition="WRITE_TRUNCATE"
)
# Logging for debugging the flake:
# https://github.com/GoogleCloudPlatform/python-docs-samples/issues/5312
print(f"df: {df}")
print(f"job_config: {job_config}")
operation = bq_client.load_table_from_dataframe(df, BQ_TABLE, job_config=job_config)
# Wait for job to complete
operation.result()
yield
# Delete dataset
try:
bq_client.delete_dataset(BQ_DATASET, delete_contents=True)
except NotFound as e:
print(f"Ignoring NotFound upon cleanup, details: {e}")
@pytest.fixture(autouse=True)
def setup_and_teardown_cluster():
try:
# Create Dataproc cluster using cluster client
cluster_client = dataproc.ClusterControllerClient(
client_options={
"api_endpoint": f"{CLUSTER_REGION}-dataproc.googleapis.com:443"
}
)
operation = cluster_client.create_cluster(
project_id=PROJECT_ID, region=CLUSTER_REGION, cluster=CLUSTER_CONFIG
)
# Wait for cluster to provision
operation.result()
yield
finally:
try:
# Delete cluster
operation = cluster_client.delete_cluster(
project_id=PROJECT_ID, region=CLUSTER_REGION, cluster_name=DATAPROC_CLUSTER
)
operation.result()
except NotFound:
print("Cluster already deleted")
@pytest.fixture(autouse=True)
def setup_and_teardown_bucket():
# Create GCS bucket
storage_client = storage.Client()
bucket = storage_client.create_bucket(BUCKET_NAME)
# Upload file
blob = bucket.blob(BUCKET_BLOB)
blob.upload_from_filename("process.py")
yield
# Delete GCS bucket
bucket = storage_client.get_bucket(BUCKET_NAME)
bucket.delete(force=True)
def is_in_table(value, out):
return re.search(f"\\| *{value} *\\|", out)
def get_blob_from_path(path):
bucket_name = re.search("dataproc.+?/", path).group(0)[0:-1]
bucket = storage.Client().get_bucket(bucket_name)
output_location = re.search("google-cloud-dataproc.+", path).group(0)
return bucket.blob(output_location)
def test_process():
"""Tests process.py by submitting it to a Dataproc cluster"""
# Submit job to Dataproc cluster
job_client = dataproc.JobControllerClient(
client_options={"api_endpoint": f"{CLUSTER_REGION}-dataproc.googleapis.com:443"}
)
operation = job_client.submit_job_as_operation(
project_id=PROJECT_ID, region=CLUSTER_REGION, job=DATAPROC_JOB
)
# Wait for job to complete
result = operation.result()
# Get job output
output_location = result.driver_output_resource_uri + ".000000000"
blob = get_blob_from_path(output_location)
out = blob.download_as_string().decode("utf-8")
# trip duration
assert not is_in_table(r"\d*.\d* s", out)
assert not is_in_table(r"\d*.\d* min", out)
assert not is_in_table(r"\d*.\d* h", out)
# station latitude & longitude
assert not is_in_table(r"\d+" + "\u00B0" + r"\d+\'\d+\"", out)
assert is_in_table(r"\d*.\d*", out)
# gender
assert not is_in_table("M", out)
assert not is_in_table("F", out)
# customer plan
assert not is_in_table("subscriber", out)
assert not is_in_table("SUBSCRIBER", out)
assert not is_in_table("sub", out)
assert not is_in_table("customer", out)
assert not is_in_table("CUSTOMER", out)
assert not is_in_table("cust", out)
assert is_in_table("Subscriber", out)
assert is_in_table("Customer", out)
| |
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import integrate
class BaseDiscretizer(ABCMeta):
"""
Base class for the discretizer classes in pgmpy. The discretizer
classes are used to discretize a continuous random variable
distribution into discrete probability masses.
Parameters
----------
factor: A ContinuousNode or a ContinuousFactor object
the continuous node or factor representing the distribution
to be discretized.
low, high: float
the range over which the function will be discretized.
cardinality: int
the number of states required in the discretized output.
Examples
--------
>>> from scipy.stats import norm
>>> from pgmpy.factors.continuous import ContinuousNode
>>> normal = ContinuousNode(norm(0, 1).pdf)
>>> from pgmpy.discretize import BaseDiscretizer
>>> class ChildDiscretizer(BaseDiscretizer):
... def get_discrete_values(self):
... pass
>>> discretizer = ChildDiscretizer(normal, -3, 3, 10)
>>> discretizer.factor
<pgmpy.factors.continuous.ContinuousNode.ContinuousNode object at 0x04C98190>
>>> discretizer.cardinality
10
>>> discretizer.get_labels()
['x=-3.0', 'x=-2.4', 'x=-1.8', 'x=-1.2', 'x=-0.6', 'x=0.0', 'x=0.6', 'x=1.2', 'x=1.8', 'x=2.4']
"""
def __init__(self, factor, low, high, cardinality):
self.factor = factor
self.low = low
self.high = high
self.cardinality = cardinality
@abstractmethod
def get_discrete_values(self):
"""
This method implements the algorithm to discretize the given
continuous distribution.
It must be implemented by all the subclasses of BaseDiscretizer.
Returns
-------
A list of discrete values or a DiscreteFactor object.
"""
pass
def get_labels(self):
"""
Returns a list of strings representing the values about
which the discretization method calculates the probability
masses.
Default value is the points -
[low, low+step, low+2*step, ......... , high-step]
unless the method is overridden by a subclass.
Examples
--------
>>> from pgmpy.factors import ContinuousNode
>>> from pgmpy.discretize import BaseDiscretizer
>>> class ChildDiscretizer(BaseDiscretizer):
... def get_discrete_values(self):
... pass
>>> from scipy.stats import norm
>>> node = ContinuousNode(norm(0).pdf)
>>> child = ChildDiscretizer(node, -5, 5, 20)
>>> chld.get_labels()
['x=-5.0', 'x=-4.5', 'x=-4.0', 'x=-3.5', 'x=-3.0', 'x=-2.5',
'x=-2.0', 'x=-1.5', 'x=-1.0', 'x=-0.5', 'x=0.0', 'x=0.5', 'x=1.0',
'x=1.5', 'x=2.0', 'x=2.5', 'x=3.0', 'x=3.5', 'x=4.0', 'x=4.5']
"""
step = (self.high - self.low) / self.cardinality
labels = [
f"x={str(i)}" for i in np.round(np.arange(self.low, self.high, step), 3)
]
return labels
class RoundingDiscretizer(BaseDiscretizer):
"""
This class uses the rounding method for discretizing the
given continuous distribution.
For the rounding method,
The probability mass is,
cdf(x+step/2)-cdf(x), for x = low
cdf(x+step/2)-cdf(x-step/2), for low < x <= high
where, cdf is the cumulative density function of the distribution
and step = (high-low)/cardinality.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors.continuous import ContinuousNode
>>> from pgmpy.factors.continuous import RoundingDiscretizer
>>> std_normal_pdf = lambda x : np.exp(-x*x/2) / (np.sqrt(2*np.pi))
>>> std_normal = ContinuousNode(std_normal_pdf)
>>> std_normal.discretize(RoundingDiscretizer, low=-3, high=3,
... cardinality=12)
[0.001629865203424451, 0.009244709419989363, 0.027834684208773178,
0.065590616803038182, 0.120977578710013, 0.17466632194020804,
0.19741265136584729, 0.17466632194020937, 0.12097757871001302,
0.065590616803036905, 0.027834684208772664, 0.0092447094199902269]
"""
def get_discrete_values(self):
step = (self.high - self.low) / self.cardinality
# for x=[low]
discrete_values = [
self.factor.cdf(self.low + step / 2) - self.factor.cdf(self.low)
]
# for x=[low+step, low+2*step, ........., high-step]
points = np.linspace(self.low + step, self.high - step, self.cardinality - 1)
discrete_values.extend(
[
self.factor.cdf(i + step / 2) - self.factor.cdf(i - step / 2)
for i in points
]
)
return discrete_values
class UnbiasedDiscretizer(BaseDiscretizer):
"""
This class uses the unbiased method for discretizing the
given continuous distribution.
The unbiased method for discretization is the matching of the
first moment method. It involves calculating the first order
limited moment of the distribution which is done by the _lim_moment
method.
For this method,
The probability mass is,
(E(x) - E(x + step))/step + 1 - cdf(x), for x = low
(2 * E(x) - E(x - step) - E(x + step))/step, for low < x < high
(E(x) - E(x - step))/step - 1 + cdf(x), for x = high
where, E(x) is the first limiting moment of the distribution
about the point x, cdf is the cumulative density function
and step = (high-low)/cardinality.
References
----------
Klugman, S. A., Panjer, H. H. and Willmot, G. E.,
Loss Models, From Data to Decisions, Fourth Edition,
Wiley, section 9.6.5.2 (Method of local moment matching) and
exercise 9.41.
Examples
--------
>>> import numpy as np
>>> from pgmpy.factors import ContinuousNode
>>> from pgmpy.factors.continuous import UnbiasedDiscretizer
# exponential distribution with rate = 2
>>> exp_pdf = lambda x: 2*np.exp(-2*x) if x>=0 else 0
>>> exp_node = ContinuousNode(exp_pdf)
>>> exp_node.discretize(UnbiasedDiscretizer, low=0, high=5, cardinality=10)
[0.39627368905806137, 0.4049838434034298, 0.13331784003148325,
0.043887287876647259, 0.014447413395300212, 0.0047559685431339703,
0.0015656350182896128, 0.00051540201980112557, 0.00016965346326140994,
3.7867260839208328e-05]
"""
def get_discrete_values(self):
lev = self._lim_moment
step = (self.high - self.low) / (self.cardinality - 1)
# for x=[low]
discrete_values = [
(lev(self.low) - lev(self.low + step)) / step
+ 1
- self.factor.cdf(self.low)
]
# for x=[low+step, low+2*step, ........., high-step]
points = np.linspace(self.low + step, self.high - step, self.cardinality - 2)
discrete_values.extend(
[(2 * lev(i) - lev(i - step) - lev(i + step)) / step for i in points]
)
# for x=[high]
discrete_values.append(
(lev(self.high) - lev(self.high - step)) / step
- 1
+ self.factor.cdf(self.high)
)
return discrete_values
def _lim_moment(self, u, order=1):
"""
This method calculates the kth order limiting moment of
the distribution. It is given by -
E(u) = Integral (-inf to u) [ (x^k)*pdf(x) dx ] + (u^k)(1-cdf(u))
where, pdf is the probability density function and cdf is the
cumulative density function of the distribution.
Reference
---------
Klugman, S. A., Panjer, H. H. and Willmot, G. E.,
Loss Models, From Data to Decisions, Fourth Edition,
Wiley, definition 3.5 and equation 3.8.
Parameters
----------
u: float
The point at which the moment is to be calculated.
order: int
The order of the moment, default is first order.
"""
def fun(x):
return np.power(x, order) * self.factor.pdf(x)
return integrate.quad(fun, -np.inf, u)[0] + np.power(u, order) * (
1 - self.factor.cdf(u)
)
def get_labels(self):
labels = list(
f"x={str(i)}"
for i in np.round(np.linspace(self.low, self.high, self.cardinality), 3)
)
return labels
| |
import numpy as np
import pylab as pl
from . import utils
from sklearn.utils import check_X_y,check_array
from sklearn.neural_network import MLPClassifier as MultilayerPerceptronClassifier
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression as LogReg
from sklearn.linear_model import SGDClassifier
from .supervised_numpynet import *
class GenericClassifier(object):
def percent_correct(self,vectors,targets):
return self.score(vectors,targets)*100.0
def predict_names(self,vectors,names):
result=self.predict(vectors)
return [names[i] for i in result]
class SVM(SVC,GenericClassifier):
pass
class LogisticRegression(LogReg,GenericClassifier):
pass
class BackProp(MultilayerPerceptronClassifier,GenericClassifier):
def __init__(self,**kwargs):
if 'tol' not in kwargs:
kwargs['tol']=1e-7
MultilayerPerceptronClassifier.__init__(self,**kwargs)
self.equivalent={'weights':'coefs_',
}
self.__dict__.update(self.equivalent)
def fit(self,*args,**kwargs):
MultilayerPerceptronClassifier.fit(self,*args,**kwargs)
for name in self.equivalent:
super(MultilayerPerceptronClassifier,self).__setattr__(name,self.__getattribute__(self.equivalent[name]))
def output(self, X):
"""Fit the model to the data X and target y.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples)
Predicted target values per element in X.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
# Make sure self.hidden_layer_sizes is a list
hidden_layer_sizes = self.hidden_layer_sizes
if not hasattr(hidden_layer_sizes, "__iter__"):
hidden_layer_sizes = [hidden_layer_sizes]
hidden_layer_sizes = list(hidden_layer_sizes)
layer_units = [X.shape[1]] + hidden_layer_sizes + \
[self.n_outputs_]
# Initialize layers
activations = []
activations.append(X)
for i in range(self.n_layers_ - 1):
activations.append(np.empty((X.shape[0],
layer_units[i + 1])))
# forward propagate
self._forward_pass(activations)
y_pred = activations[-1]
return activations[1:]
from sklearn.neighbors import KNeighborsClassifier
class kNearestNeighbor(KNeighborsClassifier,GenericClassifier):
def __init__(self,k=5):
self.k=k
KNeighborsClassifier.__init__(self,n_neighbors=k)
from sklearn.naive_bayes import GaussianNB
class NaiveBayes(GaussianNB,GenericClassifier):
def __init__(self):
GaussianNB.__init__(self)
self.var_smoothing=1e-2 # make it much more stable
self.equivalent={'means':'theta_',
'stddevs':'sigma_',
'fraction':'class_prior_'}
#self.__dict__.update(self.equivalent)
def fit(self,*args,**kwargs):
GaussianNB.fit(self,*args,**kwargs)
for name in self.equivalent:
super(GaussianNB,self).__setattr__(name,self.__getattribute__(self.equivalent[name]))
def anotherfit(self, X, y):
X,y=check_X_y(X,y)
GaussianNB.fit(self,X,y)
for name in self.equivalent:
super(GaussianNB,self).__setattr__(name,self.__getattribute__(self.equivalent[name]))
def predict_probability(X):
return predict_proba(X)
def plot_centers(self):
ax=pl.gca().axis()
colors=utils.bold_colors
angle=np.linspace(0,2*np.pi,100)
i=0
for c,r in zip(self.means,self.stddevs):
pl.plot(c[0],c[1],'*',color=colors[i],markersize=15)
i+=1
i=0
for c,r in zip(self.means,self.stddevs):
for k in range(3):
xd=np.cos(angle)*r[0]*(k+1) + c[0]
yd=np.sin(angle)*r[1]*(k+1) + c[1]
pl.plot(xd,yd,'-',linewidth=3,color='k',alpha=0.5)
i+=1
#pl.axis('equal')
pl.gca().axis(ax)
from sklearn.linear_model import Perceptron as skPerceptron
class Perceptron(skPerceptron,GenericClassifier):
def __init__(self,number_of_iterations=50,tol=1e-3):
skPerceptron.__init__(self,shuffle=True,max_iter=number_of_iterations,tol=tol)
self.equivalent={'weights':'coef_',
'biases':'intercept_',
}
#self.__dict__.update(self.equivalent)
def fit(self,*args,**kwargs):
skPerceptron.fit(self,*args,**kwargs)
for name in self.equivalent:
super(skPerceptron,self).__setattr__(name,self.__getattribute__(self.equivalent[name]))
def output(self,vectors):
return self.decision_function(vectors)
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.base import BaseEstimator, ClassifierMixin
class RCEsk(BaseEstimator, ClassifierMixin):
def __init__(self, metric='euclidean',r_min=0.1,r_max=1.0,r_step=1e-30,verbose=False):
self.r_min=r_min
self.r_max=r_max
self.r_step=r_step
self.metric = metric
self.centers_=np.array([],dtype=np.float)
self.radii_=np.array([],dtype=np.float)
self.targets_=np.array([],dtype=np.int)
self.verbose=verbose
def _add_center(self,center,radius,target):
try:
center=center.toarray() # deal with sparse
except AttributeError:
pass
center=np.array(center,dtype=np.float)
radius=np.array([radius],dtype=np.float)
target=np.array([target],dtype=np.int)
if len(self.centers_)==0:
self.centers_=center
self.targets_=target
self.radii_=radius
else:
self.centers_=np.vstack( (self.centers_,center) )
self.targets_=np.concatenate( (self.targets_,target) )
self.radii_=np.concatenate( (self.radii_,radius) )
def fit(self, X, y):
X,y=check_X_y(X,y)
# X, y = check_arrays(X, y, sparse_format="csr")
# y = column_or_1d(y, warn=True)
n_samples, n_features = X.shape
classes = np.unique(y)
self.classes_ = classes
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has fewer than 2 classes')
if len(self.centers_)>0:
assert len(self.centers_[0])==n_features
# first pass
pass_number=0
for v,t in zip(X,y): # Go through all of the data points
v=v.reshape(1, -1)
if len(self.centers_)==0:
self._add_center(v,self.r_max,t)
continue
match=self.targets_[ (pairwise_distances(v,self.centers_,metric=self.metric)<self.radii_).ravel() ]
# if a point is not already in a sphere, of correct category,
# add a sphere, centered at that point, of the correct category
if not t in match:
self._add_center(v,self.r_max,t)
continue
pass_number+=1
if self.verbose:
print("%d clusters." % (len(self.centers_)))
# second pass
stop=False
while not stop:
old_radii_=self.radii_.copy()
for v,t in zip(X,y): # Go through all of the data points (again)
v=v.reshape(1, -1)
D=pairwise_distances(v,self.centers_,metric=self.metric).ravel()
within_centers=(D<self.radii_)
matched=(t==self.targets_) & (within_centers)
# not already in a sphere, of correct category --> add a sphere,
# centered at that point, of the correct category
if not any(matched):
self._add_center(v,self.r_max,t)
continue
not_matched=(t!=self.targets_) & (within_centers)
# in a sphere of wrong category -- > shrink the wrong sphere as much as possible
self.radii_[not_matched]-=D[not_matched]-self.r_step
self.radii_[self.radii_<self.r_min]=self.r_min
pass_number+=1
if self.verbose:
print("%d clusters." % (len(self.centers_)))
if len(old_radii_)!=len(self.radii_):
continue
# Repeat until no changes
if sum(abs(self.radii_-old_radii_))<1e-10:
stop=True
def predict(self,X):
X = check_array(X)
if len(self.centers_)==0:
raise AttributeError("Model has not been trained yet.")
result=[]
for vector in X:
vector=vector.reshape(1, -1)
D=pairwise_distances(vector, self.centers_, metric=self.metric)/self.radii_
result.append(self.targets_[D.argmin()])
return np.array(result)
class RCE(RCEsk,GenericClassifier):
def __init__(self, **kwargs):
RCEsk.__init__(self, **kwargs)
self.equivalent={'centers':'centers_',
'radii':'radii_',
'targets':'targets_'}
self.__dict__.update(self.equivalent)
def fit(self,*args,**kwargs):
RCEsk.fit(self,*args,**kwargs)
for name in self.equivalent:
super(RCE,self).__setattr__(name,self.__getattribute__(self.equivalent[name]))
def plot_centers(self):
colors=utils.bold_colors
for c,r,t in zip(self.centers_,self.radii_,self.targets_):
pl.plot(c[0],c[1],'*',color=colors[t])
angle=np.linspace(0,2*np.pi,100)
for c,r,t in zip(self.centers_,self.radii_,self.targets_):
xd=np.cos(angle)*r + c[0]
yd=np.sin(angle)*r + c[1]
pl.plot(xd,yd,'-',color=colors[t])
pl.axis('equal')
class CSCsk(BaseEstimator, ClassifierMixin):
def __init__(self, metric='euclidean',r_step=1e-30,verbose=False):
self.r_step=r_step
self.metric = metric
self.centers_=np.array([],dtype=np.float)
self.radii_=np.array([],dtype=np.float)
self.targets_=np.array([],dtype=np.int)
self.verbose=verbose
def _add_center(self,center,radius,target):
try:
center=center.toarray() # deal with sparse
except AttributeError:
pass
center=np.array(center,dtype=np.float)
radius=np.array([radius],dtype=np.float)
target=np.array([target],dtype=np.int)
if len(self.centers_)==0:
self.centers_=center
self.targets_=target
self.radii_=radius
else:
self.centers_=np.vstack( (self.centers_,center) )
self.targets_=np.concatenate( (self.targets_,target) )
self.radii_=np.concatenate( (self.radii_,radius) )
def fit(self, X, y):
X,y=check_X_y(X,y)
# X, y = check_arrays(X, y, sparse_format="csr")
# y = column_or_1d(y, warn=True)
n_samples, n_features = X.shape
classes = np.unique(y)
self.classes_ = classes
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has fewer than 2 classes')
if len(self.centers_)>0:
assert len(self.centers_[0])==n_features
radii=[]
count=[]
# first pass - only need the radii, because the vectors and the targets are already stored
pass_number=0
i=0
for v,t in zip(X,y):
v=v.reshape(1, -1)
D=pairwise_distances(v,X).ravel()
r=max(D[y!=t].min()-1e-10,1e-10)
radii.append(r)
within=D[y==t]<=r
count.append(within.sum())
i+=1
radii=np.array(radii)
count=np.array(count)
# second pass
for v,t in zip(X,y): # Go through all of the data points
#Select the sphere that contains that point,
# and the largest number of other points,
# and add it to the final spheres list
v=v.reshape(1, -1)
D=pairwise_distances(v,X).ravel()
within_centers=(D<=radii)
matched=(t==y) & (within_centers)
idx=np.arange(len(y))
idx_matched=idx[matched]
best=idx_matched[np.argmax(count[matched])]
self._add_center(X[best],radii[best],y[best])
pass_number+=1
def predict(self,X):
X = check_array(X)
if len(self.centers_)==0:
raise AttributeError("Model has not been trained yet.")
result=[]
for vector in X:
vector=vector.reshape(1, -1)
D=pairwise_distances(vector, self.centers_, metric=self.metric)/self.radii_
result.append(self.targets_[D.argmin()])
return np.array(result)
class CSC(CSCsk,GenericClassifier):
def __init__(self, **kwargs):
CSCsk.__init__(self, **kwargs)
self.equivalent={'centers':'centers_',
'radii':'radii_',
'targets':'targets_'}
self.__dict__.update(self.equivalent)
def fit(self,*args,**kwargs):
CSCsk.fit(self,*args,**kwargs)
for name in self.equivalent:
super(CSC,self).__setattr__(name,self.__getattribute__(self.equivalent[name]))
def plot_centers(self):
colors=utils.bold_colors
for c,r,t in zip(self.centers_,self.radii_,self.targets_):
pl.plot(c[0],c[1],'*',color=colors[t])
angle=np.linspace(0,2*np.pi,100)
for c,r,t in zip(self.centers_,self.radii_,self.targets_):
xd=np.cos(angle)*r + c[0]
yd=np.sin(angle)*r + c[1]
pl.plot(xd,yd,'-',color=colors[t])
pl.axis('equal')
# from http://danielfrg.com/blog/2013/07/03/basic-neural-network-python/
from scipy import optimize
class NN_1HLsk(BaseEstimator, ClassifierMixin):
def __init__(self, hidden_layer_size=25, reg_lambda=0, epsilon_init=0.12, opti_method='TNC', maxiter=500):
self.reg_lambda = reg_lambda
self.epsilon_init = epsilon_init
self.hidden_layer_size = hidden_layer_size
self.activation_func = self.sigmoid
self.activation_func_prime = self.sigmoid_prime
self.method = opti_method
self.maxiter = maxiter
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def sigmoid_prime(self, z):
sig = self.sigmoid(z)
return sig * (1 - sig)
def sumsqr(self, a):
return np.sum(a ** 2)
def rand_init(self, l_in, l_out):
return np.random.rand(l_out, l_in + 1) * 2 * self.epsilon_init - self.epsilon_init
def pack_thetas(self, t1, t2):
return np.concatenate((t1.reshape(-1), t2.reshape(-1)))
def unpack_thetas(self, thetas, input_layer_size, hidden_layer_size, num_labels):
t1_start = 0
t1_end = hidden_layer_size * (input_layer_size + 1)
t1 = thetas[t1_start:t1_end].reshape((hidden_layer_size, input_layer_size + 1))
t2 = thetas[t1_end:].reshape((num_labels, hidden_layer_size + 1))
return t1, t2
def _forward(self, X, t1, t2):
m = X.shape[0]
ones = None
if len(X.shape) == 1:
ones = np.array(1).reshape(1,)
else:
ones = np.ones(m).reshape(m,1)
# Input layer
a1 = np.hstack((ones, X))
# Hidden Layer
z2 = np.dot(t1, a1.T)
a2 = self.activation_func(z2)
a2 = np.hstack((ones, a2.T))
# Output layer
z3 = np.dot(t2, a2.T)
a3 = self.activation_func(z3)
return a1, z2, a2, z3, a3
def function(self, thetas, input_layer_size, hidden_layer_size, num_labels, X, y, reg_lambda):
t1, t2 = self.unpack_thetas(thetas, input_layer_size, hidden_layer_size, num_labels)
m = X.shape[0]
Y = np.eye(num_labels)[y]
_, _, _, _, h = self._forward(X, t1, t2)
costPositive = -Y * np.log(h).T
costNegative = (1 - Y) * np.log(1 - h).T
cost = costPositive - costNegative
J = np.sum(cost) / m
if reg_lambda != 0:
t1f = t1[:, 1:]
t2f = t2[:, 1:]
reg = (self.reg_lambda / (2 * m)) * (self.sumsqr(t1f) + self.sumsqr(t2f))
J = J + reg
return J
def function_prime(self, thetas, input_layer_size, hidden_layer_size, num_labels, X, y, reg_lambda):
t1, t2 = self.unpack_thetas(thetas, input_layer_size, hidden_layer_size, num_labels)
m = X.shape[0]
t1f = t1[:, 1:]
t2f = t2[:, 1:]
Y = np.eye(num_labels)[y]
Delta1, Delta2 = 0, 0
for i, row in enumerate(X):
a1, z2, a2, z3, a3 = self._forward(row, t1, t2)
# Backprop
d3 = a3 - Y[i, :].T
d2 = np.dot(t2f.T, d3) * self.activation_func_prime(z2)
Delta2 += np.dot(d3[np.newaxis].T, a2[np.newaxis])
Delta1 += np.dot(d2[np.newaxis].T, a1[np.newaxis])
Theta1_grad = (1 / m) * Delta1
Theta2_grad = (1 / m) * Delta2
if reg_lambda != 0:
Theta1_grad[:, 1:] = Theta1_grad[:, 1:] + (reg_lambda / m) * t1f
Theta2_grad[:, 1:] = Theta2_grad[:, 1:] + (reg_lambda / m) * t2f
return self.pack_thetas(Theta1_grad, Theta2_grad)
def fit(self, X, y):
num_features = X.shape[0]
input_layer_size = X.shape[1]
num_labels = len(set(y))
theta1_0 = self.rand_init(input_layer_size, self.hidden_layer_size)
theta2_0 = self.rand_init(self.hidden_layer_size, num_labels)
thetas0 = self.pack_thetas(theta1_0, theta2_0)
options = {'maxiter': self.maxiter}
_res = optimize.minimize(self.function, thetas0, jac=self.function_prime, method=self.method,
args=(input_layer_size, self.hidden_layer_size, num_labels, X, y, 0), options=options)
self.t1, self.t2 = self.unpack_thetas(_res.x, input_layer_size, self.hidden_layer_size, num_labels)
def predict(self, X):
return self.predict_proba(X).argmax(0)
def predict_proba(self, X):
_, _, _, _, h = self._forward(X, self.t1, self.t2)
return h
class NN_1HL(NN_1HLsk,GenericClassifier):
def __init__(self,N, **kwargs):
NN_1HLsk.__init__(self,hidden_layer_size=N, **kwargs)
self.equivalent={}
self.__dict__.update(self.equivalent)
def fit(self,*args,**kwargs):
NN_1HLsk.fit(self,*args,**kwargs)
for name in self.equivalent:
super(NN_1HL,self).__setattr__(name,
self.__getattribute__(self.equivalent[name]))
| |
# -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2018, 2019, 2020, 2021, 2022 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""CWL v1.0 interface CLI implementation."""
import io
import logging
import os
import re
import sys
import traceback
from time import sleep
import click
import yaml
from bravado.exception import HTTPServerError
from cwltool.load_tool import fetch_document
from cwltool.main import printdeps
from reana_commons.specification import load_workflow_spec
from reana_client.cli.utils import add_access_token_options
from reana_client.version import __version__
def findfiles(wo, fn=None):
"""Return a list CWL workflow files."""
if fn is None:
fn = []
if isinstance(wo, dict):
if wo.get("class") == "File":
fn.append(wo)
findfiles(wo.get("secondaryFiles"), fn)
elif wo.get("class") == "Directory":
fn.append(wo)
findfiles(wo.get("secondaryFiles"), fn)
else:
for w in wo.values():
findfiles(w, fn)
elif isinstance(wo, list):
for w in wo:
findfiles(w, fn)
return fn
def get_file_dependencies_obj(cwl_obj, basedir):
"""Return a dictionary which contains the CWL workflow file dependencies.
:param cwl_obj: A CWL tool or job which might contain file dependencies.
:param basedir: Workflow base dir.
:returns: A dictionary composed of valid CWL file dependencies.
"""
# Load the document
# remove filename additions (e.g. 'v1.0/conflict-wf.cwl#collision')
document = cwl_obj.split("#")[0]
document_loader, workflow_obj, uri = fetch_document(document)
in_memory_buffer = io.StringIO()
# Get dependencies
printdeps(
workflow_obj,
document_loader.loader,
in_memory_buffer,
"primary",
uri,
basedir=basedir,
)
file_dependencies_obj = yaml.load(
in_memory_buffer.getvalue(), Loader=yaml.FullLoader
)
in_memory_buffer.close()
return file_dependencies_obj
def upload_files(files, basedir, workflow_id, access_token):
"""Upload file or directory to REANA server."""
from reana_client.api.client import upload_file
for cwl_file_object in files:
file_path = cwl_file_object.get("location")
abs_file_path = os.path.join(basedir, file_path)
if os.path.isdir(abs_file_path):
for root, dirs, files in os.walk(abs_file_path, topdown=False):
for next_path in files + dirs:
location = os.path.join(root, next_path).replace(basedir + "/", "")
upload_files(
[{"location": location}], basedir, workflow_id, access_token,
)
else:
with open(abs_file_path, "r") as f:
upload_file(workflow_id, f, file_path, access_token)
logging.error("File {} uploaded.".format(file_path))
@click.command()
@click.version_option(version=__version__)
@click.option("--quiet", is_flag=True, help="No diagnostic output")
@click.option(
"--outdir",
type=click.Path(),
help="Output directory, defaults to the current directory",
)
@click.option("--basedir", type=click.Path(), help="Base directory.")
@add_access_token_options
@click.argument("processfile")
@click.argument("jobfile", required=False)
@click.pass_context
def cwl_runner(ctx, quiet, outdir, basedir, processfile, jobfile, access_token):
"""Run CWL files in a standard format <workflow.cwl> <job.json>."""
import json
from reana_client.utils import get_api_url
from reana_client.api.client import (
create_workflow,
get_workflow_logs,
start_workflow,
upload_file,
)
logging.basicConfig(
format="[%(levelname)s] %(message)s",
stream=sys.stderr,
level=logging.INFO if quiet else logging.DEBUG,
)
try:
basedir = basedir or os.path.abspath(os.path.dirname(processfile))
reana_spec = {"workflow": {"type": "cwl"}}
job = {}
if jobfile:
with open(jobfile) as f:
job = yaml.load(f, Loader=yaml.FullLoader)
if processfile:
reana_spec["inputs"] = {"parameters": job}
reana_spec["workflow"]["specification"] = load_workflow_spec(
reana_spec["workflow"]["type"], processfile
)
reana_spec["workflow"]["specification"] = replace_location_in_cwl_spec(
reana_spec["workflow"]["specification"]
)
logging.info("Connecting to {0}".format(get_api_url()))
reana_specification = json.loads(json.dumps(reana_spec, sort_keys=True))
response = create_workflow(reana_specification, "cwl-test", access_token)
logging.error(response)
workflow_name = response["workflow_name"]
workflow_id = response["workflow_id"]
logging.info(
"Workflow {0}/{1} has been created.".format(workflow_name, workflow_id)
)
file_dependencies_list = []
for cwlobj in [processfile, jobfile]:
if not cwlobj:
continue
file_dependencies_obj = get_file_dependencies_obj(cwlobj, basedir)
file_dependencies_list.append(file_dependencies_obj)
files_to_upload = findfiles(file_dependencies_list)
upload_files(files_to_upload, basedir, workflow_id, access_token)
response = start_workflow(
workflow_id, access_token, reana_spec["inputs"]["parameters"]
)
logging.error(response)
first_logs = ""
while True:
sleep(1)
logging.error("Polling workflow logs")
response = get_workflow_logs(workflow_id, access_token)
logs = response["logs"]
if logs != first_logs:
logging.error(logs[len(first_logs) :])
first_logs = logs
if (
"Final process status" in logs
or "Traceback (most recent call last)" in logs
):
# click.echo(response['status'])
break
try:
import ast
out = (
re.search(r"FinalOutput[\s\S]*?FinalOutput", logs)
.group()
.replace("FinalOutput", "")
)
json_output = out.encode("utf8").decode("unicode_escape")
except AttributeError:
logging.error("Workflow execution failed")
sys.exit(1)
except Exception:
logging.error(traceback.format_exc())
sys.exit(1)
sys.stdout.write(json_output)
sys.stdout.write("\n")
sys.stdout.flush()
except HTTPServerError as e:
logging.error(traceback.print_exc())
logging.error(e)
except Exception:
logging.error(traceback.print_exc())
def replace_location_in_cwl_spec(spec):
"""Replace absolute paths with relative in a workflow.
Recursively replace absolute paths with relative in a normalized (packed)
workflow.
"""
if spec.get("$graph"):
result = spec.copy()
result["$graph"] = []
for tool in spec["$graph"]:
result["$graph"].append(replace_location_in_cwl_tool(tool))
return result
elif spec.get("inputs"):
return replace_location_in_cwl_tool(spec)
else:
return spec
def replace_location_in_cwl_tool(spec):
"""Recursively replace absolute paths with relative."""
# tools
inputs_parameters = []
for param in spec["inputs"]:
if param["type"] == "File":
if param.get("default", ""):
location = "location" if param["default"].get("location") else "path"
param["default"][location] = param["default"][location].split("/")[-1]
inputs_parameters.append(param)
spec["inputs"] = inputs_parameters
# workflows
if spec.get("steps"):
steps = []
for tool in spec["steps"]:
tool_inputs = []
for param in tool["in"]:
if param.get("default") and type(param["default"]) is dict:
if (
param["default"].get("class", param["default"].get("type"))
== "File"
):
location = (
"location" if param["default"].get("location") else "path"
)
param["default"][location] = param["default"][location].split(
"/"
)[-1]
tool_inputs.append(param)
tool["in"] = tool_inputs
steps.append(tool)
spec["steps"] = steps
return spec
if __name__ == "__main__":
cwl_runner()
| |
#! /usr/bin/env python3
"""The Tab Nanny despises ambiguous indentation. She knows no mercy.
tabnanny -- Detection of ambiguous indentation
For the time being this module is intended to be called as a script.
However it is possible to import it into an IDE and use the function
check() described below.
Warning: The API provided by this module is likely to change in future
releases; such changes may not be backward compatible.
"""
# Released to the public domain, by Tim Peters, 15 April 1998.
# XXX Note: this is now a standard library module.
# XXX The API needs to undergo changes however; the current code is too
# XXX script-like. This will be addressed later.
__version__ = "6"
import os
import sys
import getopt
import tokenize
if not hasattr(tokenize, 'NL'):
raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
__all__ = ["check", "NannyNag", "process_tokens"]
verbose = 0
filename_only = 0
def errprint(*args):
sep = ""
for arg in args:
sys.stderr.write(sep + str(arg))
sep = " "
sys.stderr.write("\n")
def main():
global verbose, filename_only
try:
opts, args = getopt.getopt(sys.argv[1:], "qv")
except getopt.error as msg:
errprint(msg)
return
for o, a in opts:
if o == '-q':
filename_only = filename_only + 1
if o == '-v':
verbose = verbose + 1
if not args:
errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...")
return
for arg in args:
check(arg)
class NannyNag(Exception):
"""
Raised by tokeneater() if detecting an ambiguous indent.
Captured and handled in check().
"""
def __init__(self, lineno, msg, line):
self.lineno, self.msg, self.line = lineno, msg, line
def get_lineno(self):
return self.lineno
def get_msg(self):
return self.msg
def get_line(self):
return self.line
def check(file):
"""check(file_or_dir)
If file_or_dir is a directory and not a symbolic link, then recursively
descend the directory tree named by file_or_dir, checking all .py files
along the way. If file_or_dir is an ordinary Python source file, it is
checked for whitespace related problems. The diagnostic messages are
written to standard output using the print statement.
"""
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print("%r: listing directory" % (file,))
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if (os.path.isdir(fullname) and
not os.path.islink(fullname) or
os.path.normcase(name[-3:]) == ".py"):
check(fullname)
return
try:
f = tokenize.open(file)
except IOError as msg:
errprint("%r: I/O Error: %s" % (file, msg))
return
if verbose > 1:
print("checking %r ..." % file)
try:
process_tokens(tokenize.generate_tokens(f.readline))
except tokenize.TokenError as msg:
errprint("%r: Token Error: %s" % (file, msg))
return
except IndentationError as msg:
errprint("%r: Indentation Error: %s" % (file, msg))
return
except NannyNag as nag:
badline = nag.get_lineno()
line = nag.get_line()
if verbose:
print("%r: *** Line %d: trouble in tab city! ***" % (file, badline))
print("offending line: %r" % (line,))
print(nag.get_msg())
else:
if ' ' in file: file = '"' + file + '"'
if filename_only: print(file)
else: print(file, badline, repr(line))
return
finally:
f.close()
if verbose:
print("%r: Clean bill of health." % (file,))
class Whitespace:
# the characters used for space and tab
S, T = ' \t'
# members:
# raw
# the original string
# n
# the number of leading whitespace characters in raw
# nt
# the number of tabs in raw[:n]
# norm
# the normal form as a pair (count, trailing), where:
# count
# a tuple such that raw[:n] contains count[i]
# instances of S * i + T
# trailing
# the number of trailing spaces in raw[:n]
# It's A Theorem that m.indent_level(t) ==
# n.indent_level(t) for all t >= 1 iff m.norm == n.norm.
# is_simple
# true iff raw[:n] is of the form (T*)(S*)
def __init__(self, ws):
self.raw = ws
S, T = Whitespace.S, Whitespace.T
count = []
b = n = nt = 0
for ch in self.raw:
if ch == S:
n = n + 1
b = b + 1
elif ch == T:
n = n + 1
nt = nt + 1
if b >= len(count):
count = count + [0] * (b - len(count) + 1)
count[b] = count[b] + 1
b = 0
else:
break
self.n = n
self.nt = nt
self.norm = tuple(count), b
self.is_simple = len(count) <= 1
# return length of longest contiguous run of spaces (whether or not
# preceding a tab)
def longest_run_of_spaces(self):
count, trailing = self.norm
return max(len(count)-1, trailing)
def indent_level(self, tabsize):
# count, il = self.norm
# for i in range(len(count)):
# if count[i]:
# il = il + (i//tabsize + 1)*tabsize * count[i]
# return il
# quicker:
# il = trailing + sum (i//ts + 1)*ts*count[i] =
# trailing + ts * sum (i//ts + 1)*count[i] =
# trailing + ts * sum i//ts*count[i] + count[i] =
# trailing + ts * [(sum i//ts*count[i]) + (sum count[i])] =
# trailing + ts * [(sum i//ts*count[i]) + num_tabs]
# and note that i//ts*count[i] is 0 when i < ts
count, trailing = self.norm
il = 0
for i in range(tabsize, len(count)):
il = il + i//tabsize * count[i]
return trailing + tabsize * (il + self.nt)
# return true iff self.indent_level(t) == other.indent_level(t)
# for all t >= 1
def equal(self, other):
return self.norm == other.norm
# return a list of tuples (ts, i1, i2) such that
# i1 == self.indent_level(ts) != other.indent_level(ts) == i2.
# Intended to be used after not self.equal(other) is known, in which
# case it will return at least one witnessing tab size.
def not_equal_witness(self, other):
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
a = []
for ts in range(1, n+1):
if self.indent_level(ts) != other.indent_level(ts):
a.append( (ts,
self.indent_level(ts),
other.indent_level(ts)) )
return a
# Return True iff self.indent_level(t) < other.indent_level(t)
# for all t >= 1.
# The algorithm is due to Vincent Broman.
# Easy to prove it's correct.
# XXXpost that.
# Trivial to prove n is sharp (consider T vs ST).
# Unknown whether there's a faster general way. I suspected so at
# first, but no longer.
# For the special (but common!) case where M and N are both of the
# form (T*)(S*), M.less(N) iff M.len() < N.len() and
# M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded.
# XXXwrite that up.
# Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1.
def less(self, other):
if self.n >= other.n:
return False
if self.is_simple and other.is_simple:
return self.nt <= other.nt
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
# the self.n >= other.n test already did it for ts=1
for ts in range(2, n+1):
if self.indent_level(ts) >= other.indent_level(ts):
return False
return True
# return a list of tuples (ts, i1, i2) such that
# i1 == self.indent_level(ts) >= other.indent_level(ts) == i2.
# Intended to be used after not self.less(other) is known, in which
# case it will return at least one witnessing tab size.
def not_less_witness(self, other):
n = max(self.longest_run_of_spaces(),
other.longest_run_of_spaces()) + 1
a = []
for ts in range(1, n+1):
if self.indent_level(ts) >= other.indent_level(ts):
a.append( (ts,
self.indent_level(ts),
other.indent_level(ts)) )
return a
def format_witnesses(w):
firsts = (str(tup[0]) for tup in w)
prefix = "at tab size"
if len(w) > 1:
prefix = prefix + "s"
return prefix + " " + ', '.join(firsts)
def process_tokens(tokens):
INDENT = tokenize.INDENT
DEDENT = tokenize.DEDENT
NEWLINE = tokenize.NEWLINE
JUNK = tokenize.COMMENT, tokenize.NL
indents = [Whitespace("")]
check_equal = 0
for (type, token, start, end, line) in tokens:
if type == NEWLINE:
# a program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
# If an INDENT appears, setting check_equal is wrong, and will
# be undone when we see the INDENT.
check_equal = 1
elif type == INDENT:
check_equal = 0
thisguy = Whitespace(token)
if not indents[-1].less(thisguy):
witness = indents[-1].not_less_witness(thisguy)
msg = "indent not greater e.g. " + format_witnesses(witness)
raise NannyNag(start[0], msg, line)
indents.append(thisguy)
elif type == DEDENT:
# there's nothing we need to check here! what's important is
# that when the run of DEDENTs ends, the indentation of the
# program statement (or ENDMARKER) that triggered the run is
# equal to what's left at the top of the indents stack
# Ouch! This assert triggers if the last line of the source
# is indented *and* lacks a newline -- then DEDENTs pop out
# of thin air.
# assert check_equal # else no earlier NEWLINE, or an earlier INDENT
check_equal = 1
del indents[-1]
elif check_equal and type not in JUNK:
# this is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER; the "line" argument exposes the leading whitespace
# for this statement; in the case of ENDMARKER, line is an empty
# string, so will properly match the empty string with which the
# "indents" stack was seeded
check_equal = 0
thisguy = Whitespace(line)
if not indents[-1].equal(thisguy):
witness = indents[-1].not_equal_witness(thisguy)
msg = "indent not equal e.g. " + format_witnesses(witness)
raise NannyNag(start[0], msg, line)
if __name__ == '__main__':
main()
| |
"""
Support for monitoring an SABnzbd NZB client.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sabnzbd/
"""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.discovery import SERVICE_SABNZBD
from homeassistant.const import (
CONF_HOST, CONF_API_KEY, CONF_NAME, CONF_PORT, CONF_SENSORS, CONF_SSL)
from homeassistant.core import callback
from homeassistant.helpers import discovery
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util.json import load_json, save_json
REQUIREMENTS = ['pysabnzbd==1.0.1']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'sabnzbd'
DATA_SABNZBD = 'sabznbd'
_CONFIGURING = {}
ATTR_SPEED = 'speed'
BASE_URL_FORMAT = '{}://{}:{}/'
CONFIG_FILE = 'sabnzbd.conf'
DEFAULT_HOST = 'localhost'
DEFAULT_NAME = 'SABnzbd'
DEFAULT_PORT = 8080
DEFAULT_SPEED_LIMIT = '100'
DEFAULT_SSL = False
UPDATE_INTERVAL = timedelta(seconds=30)
SERVICE_PAUSE = 'pause'
SERVICE_RESUME = 'resume'
SERVICE_SET_SPEED = 'set_speed'
SIGNAL_SABNZBD_UPDATED = 'sabnzbd_updated'
SENSOR_TYPES = {
'current_status': ['Status', None, 'status'],
'speed': ['Speed', 'MB/s', 'kbpersec'],
'queue_size': ['Queue', 'MB', 'mb'],
'queue_remaining': ['Left', 'MB', 'mbleft'],
'disk_size': ['Disk', 'GB', 'diskspacetotal1'],
'disk_free': ['Disk Free', 'GB', 'diskspace1'],
'queue_count': ['Queue Count', None, 'noofslots_total'],
'day_size': ['Daily Total', 'GB', 'day_size'],
'week_size': ['Weekly Total', 'GB', 'week_size'],
'month_size': ['Monthly Total', 'GB', 'month_size'],
'total_size': ['Total', 'GB', 'total_size'],
}
SPEED_LIMIT_SCHEMA = vol.Schema({
vol.Optional(ATTR_SPEED, default=DEFAULT_SPEED_LIMIT): cv.string,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SENSORS):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
}),
}, extra=vol.ALLOW_EXTRA)
async def async_check_sabnzbd(sab_api):
"""Check if we can reach SABnzbd."""
from pysabnzbd import SabnzbdApiException
try:
await sab_api.check_available()
return True
except SabnzbdApiException:
_LOGGER.error("Connection to SABnzbd API failed")
return False
async def async_configure_sabnzbd(hass, config, use_ssl, name=DEFAULT_NAME,
api_key=None):
"""Try to configure Sabnzbd and request api key if configuration fails."""
from pysabnzbd import SabnzbdApi
host = config[CONF_HOST]
port = config[CONF_PORT]
uri_scheme = 'https' if use_ssl else 'http'
base_url = BASE_URL_FORMAT.format(uri_scheme, host, port)
if api_key is None:
conf = await hass.async_add_job(load_json,
hass.config.path(CONFIG_FILE))
api_key = conf.get(base_url, {}).get(CONF_API_KEY, '')
sab_api = SabnzbdApi(base_url, api_key)
if await async_check_sabnzbd(sab_api):
async_setup_sabnzbd(hass, sab_api, config, name)
else:
async_request_configuration(hass, config, base_url)
async def async_setup(hass, config):
"""Set up the SABnzbd component."""
async def sabnzbd_discovered(service, info):
"""Handle service discovery."""
ssl = info.get('properties', {}).get('https', '0') == '1'
await async_configure_sabnzbd(hass, info, ssl)
discovery.async_listen(hass, SERVICE_SABNZBD, sabnzbd_discovered)
conf = config.get(DOMAIN)
if conf is not None:
use_ssl = conf.get(CONF_SSL)
name = conf.get(CONF_NAME)
api_key = conf.get(CONF_API_KEY)
await async_configure_sabnzbd(hass, conf, use_ssl, name, api_key)
return True
@callback
def async_setup_sabnzbd(hass, sab_api, config, name):
"""Set up SABnzbd sensors and services."""
sab_api_data = SabnzbdApiData(sab_api, name, config.get(CONF_SENSORS, {}))
if config.get(CONF_SENSORS):
hass.data[DATA_SABNZBD] = sab_api_data
hass.async_create_task(
discovery.async_load_platform(hass, 'sensor', DOMAIN, {}, config))
async def async_service_handler(service):
"""Handle service calls."""
if service.service == SERVICE_PAUSE:
await sab_api_data.async_pause_queue()
elif service.service == SERVICE_RESUME:
await sab_api_data.async_resume_queue()
elif service.service == SERVICE_SET_SPEED:
speed = service.data.get(ATTR_SPEED)
await sab_api_data.async_set_queue_speed(speed)
hass.services.async_register(DOMAIN, SERVICE_PAUSE,
async_service_handler,
schema=vol.Schema({}))
hass.services.async_register(DOMAIN, SERVICE_RESUME,
async_service_handler,
schema=vol.Schema({}))
hass.services.async_register(DOMAIN, SERVICE_SET_SPEED,
async_service_handler,
schema=SPEED_LIMIT_SCHEMA)
async def async_update_sabnzbd(now):
"""Refresh SABnzbd queue data."""
from pysabnzbd import SabnzbdApiException
try:
await sab_api.refresh_data()
async_dispatcher_send(hass, SIGNAL_SABNZBD_UPDATED, None)
except SabnzbdApiException as err:
_LOGGER.error(err)
async_track_time_interval(hass, async_update_sabnzbd, UPDATE_INTERVAL)
@callback
def async_request_configuration(hass, config, host):
"""Request configuration steps from the user."""
from pysabnzbd import SabnzbdApi
configurator = hass.components.configurator
# We got an error if this method is called while we are configuring
if host in _CONFIGURING:
configurator.async_notify_errors(
_CONFIGURING[host],
'Failed to register, please try again.')
return
async def async_configuration_callback(data):
"""Handle configuration changes."""
api_key = data.get(CONF_API_KEY)
sab_api = SabnzbdApi(host, api_key)
if not await async_check_sabnzbd(sab_api):
return
def success():
"""Signal successful setup."""
conf = load_json(hass.config.path(CONFIG_FILE))
conf[host] = {CONF_API_KEY: api_key}
save_json(hass.config.path(CONFIG_FILE), conf)
req_config = _CONFIGURING.pop(host)
configurator.request_done(req_config)
hass.async_add_job(success)
async_setup_sabnzbd(hass, sab_api, config,
config.get(CONF_NAME, DEFAULT_NAME))
_CONFIGURING[host] = configurator.async_request_config(
DEFAULT_NAME,
async_configuration_callback,
description='Enter the API Key',
submit_caption='Confirm',
fields=[{'id': CONF_API_KEY, 'name': 'API Key', 'type': ''}]
)
class SabnzbdApiData:
"""Class for storing/refreshing sabnzbd api queue data."""
def __init__(self, sab_api, name, sensors):
"""Initialize component."""
self.sab_api = sab_api
self.name = name
self.sensors = sensors
async def async_pause_queue(self):
"""Pause Sabnzbd queue."""
from pysabnzbd import SabnzbdApiException
try:
return await self.sab_api.pause_queue()
except SabnzbdApiException as err:
_LOGGER.error(err)
return False
async def async_resume_queue(self):
"""Resume Sabnzbd queue."""
from pysabnzbd import SabnzbdApiException
try:
return await self.sab_api.resume_queue()
except SabnzbdApiException as err:
_LOGGER.error(err)
return False
async def async_set_queue_speed(self, limit):
"""Set speed limit for the Sabnzbd queue."""
from pysabnzbd import SabnzbdApiException
try:
return await self.sab_api.set_speed_limit(limit)
except SabnzbdApiException as err:
_LOGGER.error(err)
return False
def get_queue_field(self, field):
"""Return the value for the given field from the Sabnzbd queue."""
return self.sab_api.queue.get(field)
| |
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
from ConfigParser import SafeConfigParser
except ImportError:
from configparser import ConfigParser as SafeConfigParser
import yaml
from confirm import generator, utils
def config_from_config_string(config_string):
config_parser = SafeConfigParser()
config_parser.readfp(StringIO(config_string))
return utils.config_parser_to_dict(config_parser)
class AppendValuesTestCase(unittest.TestCase):
def test_append(self):
config_string = "[section]\noption1=value1\noption2=value2"
schema_string = """
"section":
"option1":
"required": true
"option2":
"required": true
"option3":
"required": true
""".strip()
schema = yaml.load(StringIO(schema_string))
config = config_from_config_string(config_string)
migrated_config = generator.append_existing_values(schema, config)
self.assertIn('section', migrated_config)
self.assertIn('option3', migrated_config['section'])
self.assertIn('required', migrated_config['section']['option3'])
self.assertNotIn('value', migrated_config['section']['option3'])
self.assertIn('value', migrated_config['section']['option1'])
self.assertIn('value', migrated_config['section']['option2'])
class GenerateSchemaTestCase(unittest.TestCase):
def test_init(self):
config_string = "[section]\noption1=value1\noption2=value2"
schema_string = generator.generate_schema_file(config_string)
schema = yaml.load(StringIO(schema_string))
self.assertIn('section', schema)
self.assertIn('option1', schema['section'])
self.assertIn('description', schema['section']['option1']['description'])
self.assertEqual('No description provided.', schema['section']['option1']['description'])
class GenerateConfigParserTestCase(unittest.TestCase):
def test_empty_config(self):
config_parser = generator.generate_config_parser({})
self.assertFalse(len(config_parser.sections()))
def test_required(self):
config = {"section":
{"option":
{"required": True}
}
}
config_parser = generator.generate_config_parser(config)
options = config_parser.options('section')
self.assertIn('option', options)
self.assertIn('# required', options)
value = config_parser.get('section', 'option')
self.assertEqual(value, 'TO FILL')
def test_required_default(self):
config = {"section":
{"option":
{"required": True, "default": 12}
}
}
config_parser = generator.generate_config_parser(config)
value = config_parser.get('section', 'option')
self.assertEqual(value, '12')
def test_required_default(self):
config = {"section":
{"option":
{"required": True, "default": 12, "value": 25}
}
}
config_parser = generator.generate_config_parser(config)
options = config_parser.options('section')
self.assertIn('option', options)
self.assertIn('# required', options)
value = config_parser.get('section', 'option')
self.assertEqual(value, '25', "We should use the existing value instead of the default!")
def test_options(self):
config = {"section":
{"optiona":
{"required": True, "default": 'DA', "value": 'VA'},
"optionb":
{"required": True, "default": 'DB'}
}
}
config_parser = generator.generate_config_parser(config)
options = config_parser.options('section')
self.assertIn('optiona', options)
value = config_parser.get('section', 'optiona')
self.assertEqual(value, 'VA')
self.assertIn('optionb', options)
value = config_parser.get('section', 'optionb')
self.assertEqual(value, 'DB')
def test_generate_include_all(self):
config = {"section":
{"optiona":
{"required": True, "default": 'DA', "value": 'VA'},
"optionb":
{"default": 'DB'}
}
}
config_parser = generator.generate_config_parser(config)
options = config_parser.options('section')
self.assertNotIn('optionb', options)
config_parser = generator.generate_config_parser(config, include_all=True)
options = config_parser.options('section')
self.assertIn('optionb', options)
value = config_parser.get('section', 'optionb')
self.assertEqual(value, 'DB')
class GenerateDocumentationTestCase(unittest.TestCase):
def _call_generate_documentation(self, schema_string):
schema = yaml.load(StringIO(schema_string))
return generator.generate_documentation(schema)
def test_basic_case(self):
schema = """
"section":
"option":
"required": true
"description": "This is a description."
""".strip()
documentation = self._call_generate_documentation(schema).split('\n')
self.assertIn("Configuration documentation", documentation)
self.assertIn("section", documentation)
self.assertIn("option", documentation)
self.assertIn("This is a description.", documentation)
def test_option_with_type(self):
schema = """
"section":
"option":
"required": true
"type": "bool"
""".strip()
documentation = self._call_generate_documentation(schema).split('\n')
self.assertIn("*Type : bool.*", documentation)
def test_deprecated(self):
schema = """
"section":
"option":
"required": true
"deprecated": true
"type": "bool"
""".strip()
documentation = self._call_generate_documentation(schema).split('\n')
self.assertIn('** This option is deprecated! **', documentation)
self.assertIn('** This option is required! **', documentation)
def test_default(self):
schema = """
"section":
"option":
"default": "1"
""".strip()
documentation = self._call_generate_documentation(schema).split('\n')
self.assertIn("The default value is 1.", documentation)
| |
#!/usr/bin/env python
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import contextlib
import datetime
import errno
import json
import logging
import os
import re
import sys
import traceback
AUTH_DETAILS = {'OS_USERNAME': None,
'OS_PASSWORD': None,
'OS_TENANT_NAME': None,
'OS_AUTH_URL': None,
'OS_USER_DOMAIN_NAME': None,
'OS_PROJECT_DOMAIN_NAME': None,
'OS_IDENTITY_API_VERSION': None,
'OS_AUTH_VERSION': None,
'OS_ENDPOINT_TYPE': None,
'OS_API_INSECURE': False}
# OS_API_INSECURE is currently hard coded to false until OSA fix
# LP #1537117 is implemented
# IMPORTANT:
# v2 keystone auth is still necessary until RPCR switches over to v3 auth
OPENRC = '/root/openrc-maas'
TOKEN_FILE = '/root/.auth_ref.json'
try:
from cinderclient import client as c_client
from cinderclient import exceptions as c_exc
except ImportError:
def get_cinder_client(*args, **kwargs):
status_err('Cannot import cinderclient')
else:
def get_cinder_client(previous_tries=0):
if previous_tries > 3:
return None
# right now, cinderclient does not accept a previously derived token
# or endpoint url. So we have to pass it creds and let it do it's own
# auth each time it's called.
# NOTE: (mancdaz) update when https://review.openstack.org/#/c/74602/
# lands
auth_details = get_auth_details()
cinder = c_client.Client('2',
auth_details['OS_USERNAME'],
auth_details['OS_PASSWORD'],
auth_details['OS_TENANT_NAME'],
auth_details['OS_AUTH_URL'],
insecure=auth_details['OS_API_INSECURE'],
endpoint_type=auth_details[
'OS_ENDPOINT_TYPE'])
try:
# Do something just to ensure we actually have auth'd ok
volumes = cinder.volumes.list()
# Exceptions are only thrown when we iterate over volumes
[i.id for i in volumes]
except (c_exc.Unauthorized, c_exc.AuthorizationFailure) as e:
cinder = get_cinder_client(previous_tries + 1)
except Exception as e:
status_err(str(e))
return cinder
try:
import glanceclient as g_client
from glanceclient import exc as g_exc
except ImportError:
def get_glance_client(*args, **kwargs):
status_err('Cannot import glanceclient')
else:
def get_glance_client(token=None, endpoint=None, previous_tries=0):
if previous_tries > 3:
return None
# first try to use auth details from auth_ref so we
# don't need to auth with keystone every time
auth_ref = get_auth_ref()
auth_details = get_auth_details()
keystone = get_keystone_client(auth_ref)
if not token:
token = keystone.auth_token
if not endpoint:
endpoint = get_endpoint_url_for_service('image',
auth_ref,
get_endpoint_type(
auth_details))
glance = g_client.Client('1', endpoint=endpoint, token=token)
try:
# We don't want to be pulling massive lists of images every time we
# run
image = glance.images.list(limit=1)
# Exceptions are only thrown when we iterate over image
[i.id for i in image]
except g_exc.HTTPUnauthorized:
auth_ref = force_reauth()
keystone = get_keystone_client(auth_ref)
token = keystone.auth_token
glance = get_glance_client(token, endpoint, previous_tries + 1)
# we only want to pass HTTPException back to the calling poller
# since this encapsulates all of our actual API failures. Other
# exceptions will be treated as script/environmental issues and
# sent to status_err
except g_exc.HTTPException:
raise
except Exception as e:
status_err(str(e))
return glance
try:
from novaclient import client as nova_client
from novaclient.client import exceptions as nova_exc
except ImportError:
def get_nova_client(*args, **kwargs):
status_err('Cannot import novaclient')
else:
def get_nova_client(auth_token=None, bypass_url=None, previous_tries=0):
if previous_tries > 3:
return None
# first try to use auth details from auth_ref so we
# don't need to auth with keystone every time
auth_ref = get_auth_ref()
auth_details = get_auth_details()
keystone = get_keystone_client(auth_ref)
if not auth_token:
auth_token = keystone.auth_token
if not bypass_url:
bypass_url = get_endpoint_url_for_service('compute',
auth_ref,
get_endpoint_type(
auth_details))
nova = nova_client.Client('2', auth_token=auth_token,
bypass_url=bypass_url,
insecure=auth_details['OS_API_INSECURE'])
try:
flavors = nova.flavors.list()
# Exceptions are only thrown when we try and do something
[flavor.id for flavor in flavors]
except (nova_exc.Unauthorized, nova_exc.AuthorizationFailure,
AttributeError) as e:
# NOTE(mancdaz)nova doesn't properly pass back unauth errors, but
# in fact tries to re-auth, all by itself. But we didn't pass it
# an auth_url, so it bombs out horribly with an error.
auth_ref = force_reauth()
keystone = get_keystone_client(auth_ref)
auth_token = keystone.auth_token
nova = get_nova_client(auth_token, bypass_url, previous_tries + 1)
# we only want to pass ClientException back to the calling poller
# since this encapsulates all of our actual API failures. Other
# exceptions will be treated as script/environmental issues and
# sent to status_err
except nova_exc.ClientException:
raise
except Exception as e:
status_err(str(e))
return nova
try:
from keystoneclient.openstack.common.apiclient import exceptions as k_exc
from keystoneclient.v2_0 import client as k2_client
from keystoneclient.v3 import client as k3_client
except ImportError:
def keystone_auth(*args, **kwargs):
status_err('Cannot import keystoneclient')
def get_keystone_client(*args, **kwargs):
status_err('Cannot import keystoneclient')
else:
def keystone_auth(auth_details):
try:
if auth_details['OS_AUTH_URL'].endswith('v3'):
k_client = k3_client
else:
k_client = k2_client
tenant_name = auth_details['OS_TENANT_NAME']
keystone = k_client.Client(username=auth_details['OS_USERNAME'],
password=auth_details['OS_PASSWORD'],
tenant_name=tenant_name,
auth_url=auth_details['OS_AUTH_URL'])
except Exception as e:
status_err(str(e))
try:
with open(TOKEN_FILE, 'w') as token_file:
json.dump(keystone.auth_ref, token_file)
except IOError:
# if we can't write the file we go on
pass
return keystone.auth_ref
def get_keystone_client(auth_ref=None, endpoint=None, previous_tries=0):
if previous_tries > 3:
return None
# first try to use auth details from auth_ref so we
# don't need to auth with keystone every time
if not auth_ref:
auth_ref = get_auth_ref()
auth_version = auth_ref['version']
if not endpoint:
endpoint = get_endpoint_url_for_service('identity', auth_ref,
'admin',
version=auth_version)
if auth_version == 'v3':
k_client = k3_client
else:
k_client = k2_client
keystone = k_client.Client(auth_ref=auth_ref, endpoint=endpoint)
try:
# This should be a rather light-weight call that validates we're
# actually connected/authenticated.
keystone.services.list()
except (k_exc.AuthorizationFailure, k_exc.Unauthorized):
# Force an update of auth_ref
auth_ref = force_reauth()
keystone = get_keystone_client(auth_ref,
endpoint,
previous_tries + 1)
except (k_exc.HttpServerError, k_exc.ClientException):
raise
except Exception as e:
status_err(str(e))
return keystone
try:
from neutronclient.common import exceptions as n_exc
from neutronclient.neutron import client as n_client
except ImportError:
def get_neutron_client(*args, **kwargs):
status_err('Cannot import neutronclient')
else:
def get_neutron_client(token=None, endpoint_url=None, previous_tries=0):
if previous_tries > 3:
return None
# first try to use auth details from auth_ref so we
# don't need to auth with keystone every time
auth_ref = get_auth_ref()
auth_details = get_auth_details()
keystone = get_keystone_client(auth_ref)
if not token:
token = keystone.auth_token
if not endpoint_url:
endpoint_url = get_endpoint_url_for_service('network',
auth_ref,
get_endpoint_type(
auth_details))
neutron = n_client.Client('2.0',
token=token,
endpoint_url=endpoint_url,
insecure=auth_details['OS_API_INSECURE'])
try:
# some arbitrary command that should always have at least 1 result
agents = neutron.list_agents()
# iterate the list to ensure we actually have something
[i['id'] for i in agents['agents']]
# if we have provided a bum token, neutron wants to try and reauth
# itself but it can't as we didn't provide it an auth_url and all that
# jazz. Since we want to auth again ourselves (so we can update our
# local token) we'll just catch the exception it throws and move on
except n_exc.NoAuthURLProvided:
auth_ref = force_reauth()
keystone = get_keystone_client(auth_ref)
token = keystone.auth_token
neutron = get_neutron_client(token, endpoint_url,
previous_tries + 1)
# we only want to pass NeutronClientException back to the caller
# since this encapsulates all of our actual API failures. Other
# exceptions will be treated as script/environmental issues and
# sent to status_err
except n_exc.NeutronClientException as e:
raise
except Exception as e:
status_err(str(e))
return neutron
try:
from heatclient import client as heat_client
from heatclient import exc as h_exc
except ImportError:
def get_heat_client(*args, **kwargs):
status_err('Cannot import heatclient')
else:
def get_heat_client(token=None, endpoint=None, previous_tries=0):
if previous_tries > 3:
return None
# first try to use auth details from auth_ref so we
# don't need to auth with keystone every time
auth_ref = get_auth_ref()
auth_details = get_auth_details()
keystone = get_keystone_client(auth_ref)
if not token:
token = keystone.auth_token
if not endpoint:
endpoint = get_endpoint_url_for_service('orchestration',
auth_ref,
get_endpoint_type(
auth_details))
heat = heat_client.Client('1',
endpoint=endpoint,
token=token,
insecure=auth_details['OS_API_INSECURE'])
try:
heat.build_info.build_info()
except h_exc.HTTPUnauthorized:
auth_ref = force_reauth()
keystone = get_keystone_client(auth_ref)
token = keystone.auth_token
heat = get_heat_client(token, endpoint, previous_tries + 1)
except h_exc.HTTPException:
raise
except Exception as e:
status_err(str(e))
return heat
class MaaSException(Exception):
"""Base MaaS plugin exception."""
def is_token_expired(token, auth_details):
for fmt in ('%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S.%fZ'):
try:
if auth_details['OS_AUTH_URL'].endswith('v3'):
expires_at = token.get('expires_at')
else:
expires_at = token['token'].get('expires')
expires = datetime.datetime.strptime(expires_at, fmt)
break
except ValueError as e:
pass
else:
raise e
return datetime.datetime.now() >= expires
def get_service_catalog(auth_ref):
return auth_ref.get('catalog',
# Default back to Keystone v2.0's auth-ref format
auth_ref.get('serviceCatalog'))
def get_endpoint_type(auth_details):
endpoint_type = auth_details['OS_ENDPOINT_TYPE']
if endpoint_type == 'publicURL':
return 'public'
if endpoint_type == 'adminURL':
return 'admin'
return 'internal'
def get_auth_ref():
auth_details = get_auth_details()
auth_ref = get_auth_from_file()
if auth_ref is None:
auth_ref = keystone_auth(auth_details)
if is_token_expired(auth_ref, auth_details):
auth_ref = keystone_auth(auth_details)
return auth_ref
def get_auth_from_file():
try:
with open(TOKEN_FILE) as token_file:
auth_ref = json.load(token_file)
return auth_ref
except IOError as e:
if e.errno == errno.ENOENT:
return None
status_err(e)
def get_auth_details(openrc_file=OPENRC):
auth_details = AUTH_DETAILS
pattern = re.compile(
'^(?:export\s)?(?P<key>\w+)(?:\s+)?=(?:\s+)?(?P<value>.*)$'
)
try:
with open(openrc_file) as openrc:
for line in openrc:
match = pattern.match(line)
if match is None:
continue
k = match.group('key')
v = match.group('value')
if k in auth_details and auth_details[k] is None:
auth_details[k] = v
except IOError as e:
if e.errno != errno.ENOENT:
status_err(e)
# no openrc file, so we try the environment
for key in auth_details.keys():
auth_details[key] = os.environ.get(key)
for key in auth_details.keys():
if auth_details[key] is None:
status_err('%s not set' % key)
return auth_details
def get_url_for_type(endpoint, url_type, auth_version):
if auth_version == 'v3':
return endpoint['url'] if endpoint['interface'] == url_type else None
else:
return endpoint[url_type + 'URL']
def get_endpoint_url_for_service(service_type, auth_ref,
url_type='public', version=None):
# version = the version identifier on the end of the url. eg:
# for keystone admin api v3:
# http://172.29.236.3:35357/v3
# so you'd pass version='v3'
service_catalog = get_service_catalog(auth_ref)
auth_version = auth_ref['version']
for service in service_catalog:
if service['type'] == service_type:
for endpoint in service['endpoints']:
url = get_url_for_type(endpoint, url_type, auth_version)
if url is not None:
# If version is not provided or it is provided and the url
# ends with it, we want to return it, otherwise we want to
# do nothing.
if not version or url.endswith(version):
return url
def force_reauth():
auth_details = get_auth_details()
return keystone_auth(auth_details)
STATUS = ''
def status(status, message, force_print=False):
global STATUS
if status in ('ok', 'warn', 'err'):
raise ValueError('The status "%s" is not allowed because it creates a '
'metric called legacy_state' % status)
status_line = 'status %s' % status
if message is not None:
status_line = ' '.join((status_line, str(message)))
status_line = status_line.replace('\n', '\\n')
STATUS = status_line
if force_print:
print(STATUS)
def status_err(message=None, force_print=False, exception=None):
if exception:
# a status message cannot exceed 256 characters
# 'error ' plus up to 250 from the end of the exception
message = message[-250:]
status('error', message, force_print=force_print)
if exception:
raise exception
sys.exit(1)
def status_ok(message=None, force_print=False):
status('okay', message, force_print=force_print)
METRICS = []
def metric(name, metric_type, value, unit=None):
global METRICS
if len(METRICS) > 49:
status_err('Maximum of 50 metrics per check')
metric_line = 'metric %s %s %s' % (name, metric_type, value)
if unit is not None:
metric_line = ' '.join((metric_line, unit))
metric_line = metric_line.replace('\n', '\\n')
METRICS.append(metric_line)
def metric_bool(name, success):
value = success and 1 or 0
metric(name, 'uint32', value)
try:
logging.basicConfig(filename='/var/log/maas_plugins.log',
format='%(asctime)s %(levelname)s: %(message)s')
except IOError as e:
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s')
logging.error('An error occurred accessing /var/log/maas_plugins.log. %s' %
e)
@contextlib.contextmanager
def print_output():
try:
yield
except SystemExit as e:
if STATUS:
print(STATUS)
raise
except Exception as e:
logging.exception('The plugin %s has failed with an unhandled '
'exception', sys.argv[0])
status_err(traceback.format_exc(), force_print=True, exception=e)
else:
if STATUS:
print(STATUS)
for metric in METRICS:
print(metric)
| |
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module contains constants used by cbuildbot and related code."""
import os
def _FindSourceRoot():
"""Try and find the root check out of the chromiumos tree"""
source_root = path = os.path.realpath(os.path.join(
os.path.abspath(__file__), '..', '..', '..'))
while True:
if os.path.isdir(os.path.join(path, '.repo')):
return path
elif path == '/':
break
path = os.path.dirname(path)
return source_root
SOURCE_ROOT = _FindSourceRoot()
CHROOT_SOURCE_ROOT = '/mnt/host/source'
CROSUTILS_DIR = os.path.join(SOURCE_ROOT, 'src/scripts')
CHROMITE_DIR = os.path.join(SOURCE_ROOT, 'chromite')
CHROMITE_BIN_SUBDIR = 'chromite/bin'
CHROMITE_BIN_DIR = os.path.join(SOURCE_ROOT, CHROMITE_BIN_SUBDIR)
PATH_TO_CBUILDBOT = os.path.join(CHROMITE_BIN_SUBDIR, 'cbuildbot')
DEFAULT_CHROOT_DIR = 'chroot'
SDK_TOOLCHAINS_OUTPUT = 'tmp/toolchain-pkgs'
AUTOTEST_BUILD_PATH = 'usr/local/build/autotest'
# TODO: Eliminate these or merge with manifest_version.py:STATUS_PASSED
# crbug.com/318930
FINAL_STATUS_PASSED = 'passed'
FINAL_STATUS_FAILED = 'failed'
# Re-execution API constants.
# Used by --resume and --bootstrap to decipher which options they
# can pass to the target cbuildbot (since it may not have that
# option).
# Format is Major:Minor. Minor is used for tracking new options added
# that aren't critical to the older version if it's not ran.
# Major is used for tracking heavy API breakage- for example, no longer
# supporting the --resume option.
REEXEC_API_MAJOR = 0
REEXEC_API_MINOR = 2
REEXEC_API_VERSION = '%i.%i' % (REEXEC_API_MAJOR, REEXEC_API_MINOR)
ISOLATESERVER = 'https://isolateserver.appspot.com'
GOOGLE_EMAIL = '@google.com'
CHROMIUM_EMAIL = '@chromium.org'
CORP_DOMAIN = 'corp.google.com'
GOLO_DOMAIN = 'golo.chromium.org'
GOB_HOST = '%s.googlesource.com'
EXTERNAL_GOB_INSTANCE = 'chromium'
EXTERNAL_GERRIT_INSTANCE = 'chromium-review'
EXTERNAL_GOB_HOST = GOB_HOST % EXTERNAL_GOB_INSTANCE
EXTERNAL_GERRIT_HOST = GOB_HOST % EXTERNAL_GERRIT_INSTANCE
EXTERNAL_GOB_URL = 'https://%s' % EXTERNAL_GOB_HOST
EXTERNAL_GERRIT_URL = 'https://%s' % EXTERNAL_GERRIT_HOST
INTERNAL_GOB_INSTANCE = 'chrome-internal'
INTERNAL_GERRIT_INSTANCE = 'chrome-internal-review'
INTERNAL_GOB_HOST = GOB_HOST % INTERNAL_GOB_INSTANCE
INTERNAL_GERRIT_HOST = GOB_HOST % INTERNAL_GERRIT_INSTANCE
INTERNAL_GOB_URL = 'https://%s' % INTERNAL_GOB_HOST
INTERNAL_GERRIT_URL = 'https://%s' % INTERNAL_GERRIT_HOST
REPO_PROJECT = 'external/repo'
REPO_URL = '%s/%s' % (EXTERNAL_GOB_URL, REPO_PROJECT)
CHROMITE_PROJECT = 'chromiumos/chromite'
CHROMITE_URL = '%s/%s' % (EXTERNAL_GOB_URL, CHROMITE_PROJECT)
CHROMIUM_SRC_PROJECT = 'chromium/src'
CHROMIUM_GOB_URL = '%s/%s.git' % (EXTERNAL_GOB_URL, CHROMIUM_SRC_PROJECT)
MANIFEST_PROJECT = 'chromiumos/manifest'
MANIFEST_INT_PROJECT = 'chromeos/manifest-internal'
MANIFEST_PROJECTS = (MANIFEST_PROJECT, MANIFEST_INT_PROJECT)
MANIFEST_URL = '%s/%s' % (EXTERNAL_GOB_URL, MANIFEST_PROJECT)
MANIFEST_INT_URL = '%s/%s' % (INTERNAL_GERRIT_URL, MANIFEST_INT_PROJECT)
DEFAULT_MANIFEST = 'default.xml'
OFFICIAL_MANIFEST = 'official.xml'
SHARED_CACHE_ENVVAR = 'CROS_CACHEDIR'
# CrOS remotes specified in the manifests.
EXTERNAL_REMOTE = 'cros'
INTERNAL_REMOTE = 'cros-internal'
CHROMIUM_REMOTE = 'chromium'
CHROME_REMOTE = 'chrome'
GERRIT_HOSTS = {
EXTERNAL_REMOTE: EXTERNAL_GERRIT_HOST,
INTERNAL_REMOTE: INTERNAL_GERRIT_HOST,
}
CROS_REMOTES = {
EXTERNAL_REMOTE: EXTERNAL_GOB_URL,
INTERNAL_REMOTE: INTERNAL_GOB_URL,
}
GIT_REMOTES = {
CHROMIUM_REMOTE: EXTERNAL_GOB_URL,
CHROME_REMOTE: INTERNAL_GOB_URL,
}
GIT_REMOTES.update(CROS_REMOTES)
# Prefix to distinguish internal and external changes. This is used
# when user specifies a patch with "-g", when generating a key for
# a patch to used in our PatchCache, and when display a custom string
# for the patch.
INTERNAL_CHANGE_PREFIX = '*'
EXTERNAL_CHANGE_PREFIX = ''
CHANGE_PREFIX = {
INTERNAL_REMOTE: INTERNAL_CHANGE_PREFIX,
EXTERNAL_REMOTE: EXTERNAL_CHANGE_PREFIX,
}
# List of remotes that are ok to include in the external manifest.
EXTERNAL_REMOTES = (EXTERNAL_REMOTE, CHROMIUM_REMOTE)
# Mapping 'remote name' -> regexp that matches names of repositories on that
# remote that can be branched when creating CrOS branch. Branching script will
# actually create a new git ref when branching these projects. It won't attempt
# to create a git ref for other projects that may be mentioned in a manifest.
BRANCHABLE_PROJECTS = {
EXTERNAL_REMOTE: r'chromiumos/(.+)',
INTERNAL_REMOTE: r'chromeos/(.+)',
}
# TODO(sosa): Move to manifest-versions-external once its created
MANIFEST_VERSIONS_SUFFIX = '/chromiumos/manifest-versions'
MANIFEST_VERSIONS_INT_SUFFIX = '/chromeos/manifest-versions'
MANIFEST_VERSIONS_GS_URL = 'gs://chromeos-manifest-versions'
TRASH_BUCKET = 'gs://chromeos-throw-away-bucket'
STREAK_COUNTERS = 'streak_counters'
PATCH_BRANCH = 'patch_branch'
STABLE_EBUILD_BRANCH = 'stabilizing_branch'
MERGE_BRANCH = 'merge_branch'
# These branches are deleted at the beginning of every buildbot run.
CREATED_BRANCHES = [
PATCH_BRANCH,
STABLE_EBUILD_BRANCH,
MERGE_BRANCH
]
# Constants for uprevving Chrome
# Portage category and package name for Chrome.
CHROME_PN = 'chromeos-chrome'
CHROME_CP = 'chromeos-base/%s' % CHROME_PN
# Chrome use flags
USE_CHROME_INTERNAL = 'chrome_internal'
USE_AFDO_USE = 'afdo_use'
# Builds and validates _alpha ebuilds. These builds sync to the latest
# revsion of the Chromium src tree and build with that checkout.
CHROME_REV_TOT = 'tot'
# Builds and validates chrome at a given revision through cbuildbot
# --chrome_version
CHROME_REV_SPEC = 'spec'
# Builds and validates the latest Chromium release as defined by
# ~/trunk/releases in the Chrome src tree. These ebuilds are suffixed with rc.
CHROME_REV_LATEST = 'latest_release'
# Builds and validates the latest Chromium release for a specific Chromium
# branch that we want to watch. These ebuilds are suffixed with rc.
CHROME_REV_STICKY = 'stable_release'
# Builds and validates Chromium for a pre-populated directory.
# Also uses _alpha, since portage doesn't have anything lower.
CHROME_REV_LOCAL = 'local'
VALID_CHROME_REVISIONS = [CHROME_REV_TOT, CHROME_REV_LATEST,
CHROME_REV_STICKY, CHROME_REV_LOCAL, CHROME_REV_SPEC]
# Build types supported.
# TODO(sosa): Deprecate PFQ type.
# Incremental builds that are built using binary packages when available.
# These builds have less validation than other build types.
INCREMENTAL_TYPE = 'binary'
# These builds serve as PFQ builders. This is being deprecated.
PFQ_TYPE = 'pfq'
# Hybrid Commit and PFQ type. Ultimate protection. Commonly referred to
# as simply "commit queue" now.
PALADIN_TYPE = 'paladin'
# A builder that kicks off Pre-CQ builders that bless the purest CLs.
PRE_CQ_LAUNCHER_TYPE = 'priest'
# A builder that cuts and prunes branches.
CREATE_BRANCH_TYPE = 'gardener'
# Chrome PFQ type. Incremental build type that builds and validates new
# versions of Chrome. Only valid if set with CHROME_REV. See
# VALID_CHROME_REVISIONS for more information.
CHROME_PFQ_TYPE = 'chrome'
# Builds from source and non-incremental. This builds fully wipe their
# chroot before the start of every build and no not use a BINHOST.
BUILD_FROM_SOURCE_TYPE = 'full'
# Full but with versioned logic.
CANARY_TYPE = 'canary'
# Generate payloads for an already built build/version.
PAYLOADS_TYPE = 'payloads'
BRANCH_UTIL_CONFIG = 'branch-util'
# Special build type for Chroot builders. These builds focus on building
# toolchains and validate that they work.
CHROOT_BUILDER_TYPE = 'chroot'
CHROOT_BUILDER_BOARD = 'amd64-host'
# Build that refreshes the online Portage package status spreadsheet.
REFRESH_PACKAGES_TYPE = 'refresh_packages'
VALID_BUILD_TYPES = (
PALADIN_TYPE,
INCREMENTAL_TYPE,
BUILD_FROM_SOURCE_TYPE,
CANARY_TYPE,
CHROOT_BUILDER_TYPE,
CHROOT_BUILDER_BOARD,
CHROME_PFQ_TYPE,
PFQ_TYPE,
PRE_CQ_LAUNCHER_TYPE,
REFRESH_PACKAGES_TYPE,
CREATE_BRANCH_TYPE,
PAYLOADS_TYPE,
)
# The name of the builder used to launch the pre-CQ.
PRE_CQ_BUILDER_NAME = 'pre-cq-group'
# The name of the Pre-CQ launcher on the waterfall.
PRE_CQ_LAUNCHER_NAME = 'Pre-CQ Launcher'
# Define pool of machines for Hardware tests.
HWTEST_DEFAULT_NUM = 6
HWTEST_TRYBOT_NUM = 3
HWTEST_MACH_POOL = 'bvt'
HWTEST_PALADIN_POOL = 'cq'
HWTEST_TOT_PALADIN_POOL = 'tot-cq'
HWTEST_PFQ_POOL = 'pfq'
HWTEST_SUITES_POOL = 'suites'
HWTEST_CHROME_PERF_POOL = 'chromeperf'
HWTEST_TRYBOT_POOL = 'try-bot'
# Defines for special purpose Hardware tests suites.
HWTEST_AU_SUITE = 'au'
HWTEST_QAV_SUITE = 'qav'
HWTEST_AFDO_SUITE = 'AFDO_record'
# Additional timeout to wait for autotest to abort a suite if the test takes
# too long to run. This is meant to be overly conservative as a timeout may
# indicate that autotest is at capacity.
HWTEST_TIMEOUT_EXTENSION = 10 * 60
HWTEST_DEFAULT_PRIORITY = 'DEFAULT'
HWTEST_CQ_PRIORITY = 'CQ'
HWTEST_BUILD_PRIORITY = 'Build'
HWTEST_PFQ_PRIORITY = 'PFQ'
# Ordered by priority (first item being lowest).
HWTEST_VALID_PRIORITIES = ['Weekly',
'Daily',
'PostBuild',
HWTEST_DEFAULT_PRIORITY,
HWTEST_BUILD_PRIORITY,
HWTEST_PFQ_PRIORITY,
HWTEST_CQ_PRIORITY]
# Creates a mapping of priorities to make easy comparsions.
HWTEST_PRIORITIES_MAP = dict(zip(HWTEST_VALID_PRIORITIES,
range(len(HWTEST_VALID_PRIORITIES))))
# Defines VM Test types.
FULL_AU_TEST_TYPE = 'full_suite'
SIMPLE_AU_TEST_TYPE = 'pfq_suite'
SMOKE_SUITE_TEST_TYPE = 'smoke_suite'
TELEMETRY_SUITE_TEST_TYPE = 'telemetry_suite'
CROS_VM_TEST_TYPE = 'cros_vm_test'
DEV_MODE_TEST_TYPE = 'dev_mode_test'
VALID_VM_TEST_TYPES = [FULL_AU_TEST_TYPE, SIMPLE_AU_TEST_TYPE,
SMOKE_SUITE_TEST_TYPE, TELEMETRY_SUITE_TEST_TYPE,
CROS_VM_TEST_TYPE, DEV_MODE_TEST_TYPE]
CHROMIUMOS_OVERLAY_DIR = 'src/third_party/chromiumos-overlay'
VERSION_FILE = os.path.join(CHROMIUMOS_OVERLAY_DIR,
'chromeos/config/chromeos_version.sh')
SDK_VERSION_FILE = os.path.join(CHROMIUMOS_OVERLAY_DIR,
'chromeos/binhost/host/sdk_version.conf')
SDK_GS_BUCKET = 'chromiumos-sdk'
PUBLIC = 'public'
PRIVATE = 'private'
BOTH_OVERLAYS = 'both'
PUBLIC_OVERLAYS = PUBLIC
PRIVATE_OVERLAYS = PRIVATE
VALID_OVERLAYS = [BOTH_OVERLAYS, PUBLIC_OVERLAYS, PRIVATE_OVERLAYS, None]
# Common default logging settings for use with the logging module.
LOGGER_FMT = '%(asctime)s: %(levelname)s: %(message)s'
LOGGER_DATE_FMT = '%H:%M:%S'
# Used by remote patch serialization/deserialzation.
INTERNAL_PATCH_TAG = 'i'
EXTERNAL_PATCH_TAG = 'e'
PATCH_TAGS = (INTERNAL_PATCH_TAG, EXTERNAL_PATCH_TAG)
# Tree status strings
TREE_OPEN = 'open'
TREE_THROTTLED = 'throttled'
TREE_CLOSED = 'closed'
VALID_TREE_STATUSES = (TREE_OPEN, TREE_THROTTLED, TREE_CLOSED)
_GERRIT_QUERY_TEMPLATE = ('status:open AND '
'label:Code-Review=+2 AND '
'label:Verified=+1 AND '
'label:Commit-Queue>=%+i AND '
'NOT ( label:CodeReview=-2 OR label:Verified=-1 OR '
'is:draft )')
# Default gerrit query used to find changes for CQ.
# Permits CQ+1 or CQ+2 changes.
DEFAULT_CQ_READY_QUERY = _GERRIT_QUERY_TEMPLATE % 1
# Gerrit query used to find changes for CQ when tree is throttled.
# Permits only CQ+2 changes.
THROTTLED_CQ_READY_QUERY = _GERRIT_QUERY_TEMPLATE % 2
# Default filter rules for verifying that Gerrit returned results that matched
# our query. This used for working around Gerrit bugs.
DEFAULT_CQ_READY_FIELDS = {
'CRVW': '2',
'VRIF': '1',
'COMR': ('1', '2'),
}
DEFAULT_CQ_SHOULD_REJECT_FIELDS = {
'CRVW': '-2',
'VRIF': '-1',
}
GERRIT_ON_BORG_LABELS = {
'Code-Review': 'CRVW',
'Commit-Queue': 'COMR',
'Verified': 'VRIF',
'Trybot-Verified': 'TBVF',
}
# Actions that a CQ run can take on a CL
CL_ACTION_PICKED_UP = 'picked_up' # CL picked up in CommitQueueSync
CL_ACTION_SUBMITTED = 'submitted' # CL submitted successfully
CL_ACTION_KICKED_OUT = 'kicked_out' # CL CQ-Ready value set to zero
CL_ACTION_SUBMIT_FAILED = 'submit_failed' # CL submitted but submit failed
CL_ACTIONS = [CL_ACTION_PICKED_UP,
CL_ACTION_SUBMITTED,
CL_ACTION_KICKED_OUT,
CL_ACTION_SUBMIT_FAILED]
# CQ types.
CQ = 'cq'
PRE_CQ = 'pre-cq'
# Environment variables that should be exposed to all children processes
# invoked via cros_build_lib.RunCommand.
ENV_PASSTHRU = ('CROS_SUDO_KEEP_ALIVE', SHARED_CACHE_ENVVAR)
# List of variables to proxy into the chroot from the host, and to
# have sudo export if existent. Anytime this list is modified, a new
# chroot_version_hooks.d upgrade script that symlinks to 45_rewrite_sudoers.d
# should be created.
CHROOT_ENVIRONMENT_WHITELIST = (
'CHROMEOS_OFFICIAL',
'CHROMEOS_VERSION_AUSERVER',
'CHROMEOS_VERSION_DEVSERVER',
'CHROMEOS_VERSION_TRACK',
'GCC_GITHASH',
'GIT_AUTHOR_EMAIL',
'GIT_AUTHOR_NAME',
'GIT_COMMITTER_EMAIL',
'GIT_COMMITTER_NAME',
'GIT_PROXY_COMMAND',
'GIT_SSH',
'RSYNC_PROXY',
'SSH_AGENT_PID',
'SSH_AUTH_SOCK',
'USE',
'all_proxy',
'ftp_proxy',
'http_proxy',
'https_proxy',
'no_proxy',
)
# Paths for Chrome LKGM which are relative to the Chromium base url.
CHROME_LKGM_FILE = 'CHROMEOS_LKGM'
PATH_TO_CHROME_LKGM = 'chromeos/%s' % CHROME_LKGM_FILE
SVN_CHROME_LKGM = 'trunk/src/%s' % PATH_TO_CHROME_LKGM
# Cache constants.
COMMON_CACHE = 'common'
# Artifact constants.
def _SlashToUnderscore(string):
return string.replace('/', '_')
DEFAULT_ARCHIVE_BUCKET = 'gs://chromeos-image-archive'
RELEASE_BUCKET = 'gs://chromeos-releases'
TRASH_BUCKET = 'gs://chromeos-throw-away-bucket'
CHROME_SYSROOT_TAR = 'sysroot_%s.tar.xz' % _SlashToUnderscore(CHROME_CP)
CHROME_ENV_TAR = 'environment_%s.tar.xz' % _SlashToUnderscore(CHROME_CP)
CHROME_ENV_FILE = 'environment'
BASE_IMAGE_NAME = 'chromiumos_base_image'
BASE_IMAGE_TAR = '%s.tar.xz' % BASE_IMAGE_NAME
BASE_IMAGE_BIN = '%s.bin' % BASE_IMAGE_NAME
IMAGE_SCRIPTS_NAME = 'image_scripts'
IMAGE_SCRIPTS_TAR = '%s.tar.xz' % IMAGE_SCRIPTS_NAME
VM_IMAGE_NAME = 'chromiumos_qemu_image'
VM_IMAGE_BIN = '%s.bin' % VM_IMAGE_NAME
VM_DISK_PREFIX = 'chromiumos_qemu_disk.bin'
VM_MEM_PREFIX = 'chromiumos_qemu_mem.bin'
VM_TEST_RESULTS = 'vm_test_results_%(attempt)s'
METADATA_JSON = 'metadata.json'
PARTIAL_METADATA_JSON = 'partial-metadata.json'
DELTA_SYSROOT_TAR = 'delta_sysroot.tar.xz'
DELTA_SYSROOT_BATCH = 'batch'
# Global configuration constants.
CHROMITE_CONFIG_DIR = os.path.expanduser('~/.chromite')
CHROME_SDK_BASHRC = os.path.join(CHROMITE_CONFIG_DIR, 'chrome_sdk.bashrc')
SYNC_RETRIES = 2
SLEEP_TIMEOUT = 30
# Lab status url.
LAB_STATUS_URL = 'http://chromiumos-lab.appspot.com/current?format=json'
GOLO_SMTP_SERVER = 'mail.golo.chromium.org'
# URLs to the various waterfalls.
BUILD_DASHBOARD = 'http://build.chromium.org/p/chromiumos'
BUILD_INT_DASHBOARD = 'https://uberchromegw.corp.google.com/i/chromeos'
TRYBOT_DASHBOARD = 'https://uberchromegw.corp.google.com/i/chromiumos.tryserver'
# Useful config targets.
CQ_MASTER = 'master-paladin'
PRE_CQ_GROUP = 'trybot-pre-cq-group'
# Email validation regex. Not quite fully compliant with RFC 2822, but good
# approximation.
EMAIL_REGEX = r'[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4}'
# Blacklist of files not allowed to be uploaded into the Partner Project Google
# Storage Buckets:
# debug.tgz contains debug symbols.
# manifest.xml exposes all of our repo names.
# vm_test_results can contain symbolicated crash dumps.
EXTRA_BUCKETS_FILES_BLACKLIST = [
'debug.tgz',
'manifest.xml',
'vm_test_results_*'
]
# AFDO common constants.
# How long does the AFDO_record autotest have to generate the AFDO perf data.
AFDO_GENERATE_TIMEOUT = 90 * 60
| |
import logging
from typing import Optional
from . import JNISimProcedure
from ...engines.soot.expressions import SimSootExpr_NewArray
from ...engines.soot.values import SimSootValue_ArrayRef
l = logging.getLogger('angr.procedures.java_jni.array_operations')
# pylint: disable=arguments-differ,unused-argument
# pylint: disable=arguments-differ,unused-argument
#
# GetArrayLength
#
class GetArrayLength(JNISimProcedure):
return_ty = 'int'
def run(self, ptr_env, array_):
array = self.state.jni_references.lookup(array_)
return array.size
#
# New<Type>Array
#
class NewArray(JNISimProcedure):
element_type: Optional[str] = None
return_ty = 'reference'
def run(self, ptr_env, length_):
length = self._normalize_array_idx(length_)
array = SimSootExpr_NewArray.new_array(self.state, self.element_type, length)
return self.state.jni_references.create_new_reference(obj=array)
class NewBooleanArray(NewArray):
element_type = "boolean"
class NewByteArray(NewArray):
element_type = "byte"
class NewCharArray(NewArray):
element_type = "char"
class NewShortArray(NewArray):
element_type = "short"
class NewIntArray(NewArray):
element_type = "int"
class NewLongArray(NewArray):
element_type = "long"
#
# NewObjectArray
#
class NewObjectArray(JNISimProcedure):
return_ty = 'reference'
def run(self, ptr_env, length_, element_type_, initial_element_):
length = self._normalize_array_idx(length_)
element_type = self.state.jni_references.lookup(element_type_)
# create new array
array = SimSootExpr_NewArray.new_array(self.state, element_type.name, length)
# if available, set the initial_element as the arrays default value
if self.state.solver.eval(initial_element_ != 0):
initial_element = self.state.jni_references.lookup(initial_element_)
generator = lambda state: initial_element
array.add_default_value_generator(generator)
else:
initial_element = None
# return reference to array
return self.state.jni_references.create_new_reference(array)
#
# GetObjectArrayElement / SetObjectArrayElement
#
class GetObjectArrayElement(JNISimProcedure):
return_ty = 'reference'
def run(self, ptr_env, array_, idx_):
idx = self._normalize_array_idx(idx_)
array = self.state.jni_references.lookup(array_)
# check array bounds
SimSootValue_ArrayRef.check_array_bounds(idx, array, self.state)
# concretize idx (TODO: handle symbolic idxes)
if self.state.solver.symbolic(idx):
idx = self.state.eval(idx)
l.warning("Symbolic indices are not supported for object arrays %s. "
"Index gets concretized to %s", array, idx)
# load element and return reference to it
element = self.state.javavm_memory.load_array_element(array, idx)
return self.state.jni_references.create_new_reference(element)
class SetObjectArrayElement(JNISimProcedure):
return_ty = 'reference'
def run(self, ptr_env, array_, idx_, value_):
idx = self._normalize_array_idx(idx_)
array = self.state.jni_references.lookup(array_)
value = self.state.jni_references.lookup(value_)
# check array bounds
SimSootValue_ArrayRef.check_array_bounds(idx, array, self.state)
# concretize idx (TODO: handle symbolic idxes)
if self.state.solver.symbolic(idx):
idx = self.state.eval(idx)
l.warning("Symbolic indices are not supported for object arrays %s. "
"Index gets concretized to %s", array, idx)
self.state.javavm_memory.store_array_element(array, idx, value)
#
# Get<Type>ArrayElements / Release<Type>ArrayElements
#
class GetArrayElements(JNISimProcedure):
return_ty = 'reference'
def run(self, ptr_env, array_, ptr_isCopy):
array = self.state.jni_references.lookup(array_)
# load array elements from java memory
# => if size is symbolic, we load the maximum number of elements
max_array_length = self.state.solver.max(array.size)
values = self.state.javavm_memory.load_array_elements(array, start_idx=0, no_of_elements=max_array_length)
# store elements in native memory
memory_addr = self._store_in_native_memory(values, array.element_type)
# if isCopy is not null, store JNI_TRUE at that address
if self.state.solver.eval(ptr_isCopy != 0):
self._store_in_native_memory(data=self.JNI_TRUE, data_type='boolean', addr=ptr_isCopy)
# return native address to the elements
return memory_addr
class ReleaseArrayElements(JNISimProcedure):
return_ty = 'void'
JNI_COMMIT = 1
JNI_ABORT = 2
def run(self, ptr_env, array_, ptr_elems, mode_):
if self.state.solver.symbolic(mode_):
l.warning("Symbolic mode %s in JNI function ReleaseArrayElements"
"is not supported and gets concretized.", mode_)
mode = self.state.solver.min(mode_) # avoid JNI_ABORT by taking the minimum
if mode == self.JNI_ABORT:
return
array = self.state.jni_references.lookup(array_)
# load array elements from native memory
# => if size is symbolic, we load the maximum number of elements
max_array_size = self.state.solver.max(array.size)
elements = self._load_from_native_memory(addr=ptr_elems,
data_type=array.element_type,
no_of_elements=max_array_size)
# store elements in java memory
self.state.javavm_memory.store_array_elements(array, start_idx=0, data=elements)
#
# Get<Type>ArrayRegion / Set<Type>ArrayRegion
#
class GetArrayRegion(JNISimProcedure):
return_ty = 'void'
def run(self, ptr_env, array_, start_idx_, length_, ptr_buf):
array = self.state.jni_references.lookup(array_)
start_idx = self._normalize_array_idx(start_idx_)
length = self._normalize_array_idx(length_)
# check if the range (induced by start_idx and length) is valid
if not self._check_region_bounds(array, start_idx, length, self.state):
return
# concretize length (TODO handle symbolic length)
no_of_elements = self._concretize_region_length(length, self.state)
# load elements from java memory
elements = self.state.javavm_memory.load_array_elements(array, start_idx, no_of_elements)
# and store them in the native memory
self._store_in_native_memory(data=elements, data_type=array.element_type, addr=ptr_buf)
@staticmethod
def _concretize_region_length(length, state):
# if necessary, concretize length
# TODO handle symbolic length
if state.solver.symbolic(length):
midpoint_length = (state.solver.min(length) + state.solver.max(length)) // 2
state.solver.add(length == midpoint_length)
l.warning("Symbolic lengths are currently not supported. "
"Length is concretized to a midpoint value.")
return state.solver.eval_one(length)
@staticmethod
def _check_region_bounds(array, start_idx, length, state):
# A valid range fulfills the following constraints:
# - 0 <= start_idx < array_size
# - start_idx <= last_idx < array_size
# with last_idx := start_idx+length-1
# - 0 <= length <= array_size
range_constraints = state.solver.And(
start_idx.SGE(0), start_idx.SLT(array.size),
array.size.SGT(start_idx+length-1),
length.SGE(0), length.SLE(array.size)
)
# Evaluate range constraints
# => Note: if start_idx and/or length are symbolic, the result can be
# True and False at the same time
range_stays_within_bounds = state.solver.eval_upto(range_constraints, 2)
if not True in range_stays_within_bounds:
# There is no valid combination of start_idx and length, s.t. the
# range stays within the array bounds.
# Correct simulation must continue with a raised Exception
# TODO raise java.lang.ArrayIndexOutOfBoundsException
# For now, we just skip this SimProcedure.
l.error("Skipping SimProcedure: "
"Every combination of start_idx %s and length %s is invalid (array length %s).",
start_idx, length, array.size)
return False
if False in range_stays_within_bounds and \
True in range_stays_within_bounds:
# There are some combination of start_idx and length, s.t. the range
# exceeds array bounds.
# For now, just constraint values to stay within bounds.
# TODO split current SimState into two successors:
# --> one w/ all valid indexes
# --> one w/ all invalid indexes and a raised exception
l.warning("Possible out-of-bounds access! "
"Constraint start_idx %s and length %s to valid values (array length %s).",
start_idx, length, array.size)
state.solver.add(range_constraints)
return True
class SetArrayRegion(JNISimProcedure):
return_ty = 'void'
def run(self, ptr_env, array_, start_idx_, length_, ptr_buf):
array = self.state.jni_references.lookup(array_)
start_idx = self._normalize_array_idx(start_idx_)
length = self._normalize_array_idx(length_)
# check if the range (induced by start_idx and length) is valid
if not GetArrayRegion._check_region_bounds(array, start_idx, length, self.state):
return
# concretize length (TODO handle symbolic length)
no_of_elements = GetArrayRegion._concretize_region_length(length, self.state)
# load elements from native memory
elements = self._load_from_native_memory(addr=ptr_buf,
data_type=array.element_type,
no_of_elements=no_of_elements)
# and store them in the java memory
self.state.javavm_memory.store_array_elements(array, start_idx, elements)
| |
#!/usr/bin/env python
# Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
"""
Classify an image using individual model files
Use this script as an example to build your own tool
"""
import argparse
import os
import time
import PIL.Image
import numpy as np
import scipy.misc
from google.protobuf import text_format
os.environ['GLOG_minloglevel'] = '2' # Suppress most caffe output
import caffe
from caffe.proto import caffe_pb2
CONFIDENCE_THRESHOLD = 80
def get_net(caffemodel, deploy_file, use_gpu=True):
"""
Returns an instance of caffe.Net
Arguments:
caffemodel -- path to a .caffemodel file
deploy_file -- path to a .prototxt file
Keyword arguments:
use_gpu -- if True, use the GPU for inference
"""
if use_gpu:
caffe.set_mode_gpu()
# load a new model
return caffe.Net(deploy_file, caffemodel, caffe.TEST)
def get_transformer(deploy_file, mean_file=None):
"""
Returns an instance of caffe.io.Transformer
Arguments:
deploy_file -- path to a .prototxt file
Keyword arguments:
mean_file -- path to a .binaryproto file (optional)
"""
network = caffe_pb2.NetParameter()
with open(deploy_file) as infile:
text_format.Merge(infile.read(), network)
dims = network.input_dim
t = caffe.io.Transformer(
inputs = {'data': dims}
)
t.set_transpose('data', (2,0,1)) # transpose to (channels, height, width)
# color images
if dims[1] == 3:
# channel swap
t.set_channel_swap('data', (2,1,0))
if mean_file:
# set mean pixel
with open(mean_file) as infile:
blob = caffe_pb2.BlobProto()
blob.MergeFromString(infile.read())
if blob.HasField('shape'):
blob_dims = blob.shape
assert len(blob_dims) == 4, 'Shape should have 4 dimensions - shape is "%s"' % blob.shape
elif blob.HasField('num') and blob.HasField('channels') and \
blob.HasField('height') and blob.HasField('width'):
blob_dims = (blob.num, blob.channels, blob.height, blob.width)
else:
raise ValueError('blob does not provide shape or 4d dimensions')
pixel = np.reshape(blob.data, blob_dims[1:]).mean(1).mean(1)
t.set_mean('data', pixel)
return t
def load_image(path, height, width, mode='RGB'):
"""
Load an image from disk
Returns an np.ndarray (channels x width x height)
Arguments:
path -- path to an image on disk
width -- resize dimension
height -- resize dimension
Keyword arguments:
mode -- the PIL mode that the image should be converted to
(RGB for color or L for grayscale)
"""
image = PIL.Image.open(path)
image = image.convert(mode)
image = np.array(image)
# squash
image = scipy.misc.imresize(image, (height, width), 'bilinear')
return image
def load_and_splice_image(file_path, height, width, mode='L'):
image = PIL.Image.open(file_path)
image = image.convert(mode)
''' # black_threshold = 230
for y in range(0, image.size[1]):
for x in range(0, image.size[0]):
image.putpixel((x,y), imager
'''
crops = []
print "image.size = " + str(image.size)
for y in range(10, image.size[1] - height+1):
for x in range(0, image.size[0] - width+1):
# limit to 10 for test
if len(crops) < 160:
crop = image.crop((x, y, x+ width, y + height))
# crop.load()
# not being cropped correctly? why?
crop.save("dump/crop_" + str(y) + "_" + str(x) + ".bmp", "BMP")
crop = np.array(crop)
crops.append(crop)
return crops
def forward_pass(images, net, transformer, batch_size=1):
"""
Returns scores for each image as an np.ndarray (nImages x nClasses)
Arguments:
images -- a list of np.ndarrays
net -- a caffe.Net
transformer -- a caffe.io.Transformer
Keyword arguments:
batch_size -- how many images can be processed at once
(a high value may result in out-of-memory errors)
"""
caffe_images = []
for image in images:
if image.ndim == 2:
caffe_images.append(image[:,:,np.newaxis])
else:
caffe_images.append(image)
caffe_images = np.array(caffe_images)
dims = transformer.inputs['data'][1:]
scores = None
for chunk in [caffe_images[x:x+batch_size] for x in xrange(0, len(caffe_images), batch_size)]:
new_shape = (len(chunk),) + tuple(dims)
if net.blobs['data'].data.shape != new_shape:
net.blobs['data'].reshape(*new_shape)
for index, image in enumerate(chunk):
image_data = transformer.preprocess('data', image)
net.blobs['data'].data[index] = image_data
output = net.forward()[net.outputs[-1]]
if scores is None:
scores = output
else:
scores = np.vstack((scores, output))
# print 'Processed %s/%s images ...' % (len(scores), len(caffe_images))
print "Processed ", len(caffe_images), " images"
return scores
def read_labels(labels_file):
"""
Returns a list of strings
Arguments:
labels_file -- path to a .txt file
"""
if not labels_file:
print 'WARNING: No labels file provided. Results will be difficult to interpret.'
return None
labels = []
with open(labels_file) as infile:
for line in infile:
label = line.strip()
if label:
labels.append(label)
assert len(labels), 'No labels found'
return labels
def classify(caffemodel, deploy_file, image_files,
mean_file=None, labels_file=None, use_gpu=True):
"""
Classify some images against a Caffe model and print the results
Arguments:
caffemodel -- path to a .caffemodel
deploy_file -- path to a .prototxt
image_files -- list of paths to images
Keyword arguments:
mean_file -- path to a .binaryproto
labels_file path to a .txt file
use_gpu -- if True, run inference on the GPU
"""
# Load the model and images
net = get_net(caffemodel, deploy_file, use_gpu)
transformer = get_transformer(deploy_file, mean_file)
_, channels, height, width = transformer.inputs['data']
print "target height, width: ", height, " ", width
if channels == 3:
mode = 'RGB'
elif channels == 1:
mode = 'L'
else:
raise ValueError('Invalid number for channels: %s' % channels)
# images = [load_image(image_file, height, width, mode) for image_file in image_files]
images = load_and_splice_image(image_files[0], height, width, mode)
labels = read_labels(labels_file)
# Classify the image
classify_start_time = time.time()
scores = forward_pass(images, net, transformer)
print 'Classification took %s seconds.' % (time.time() - classify_start_time,)
### Process the results
indices = (-scores).argsort()[:, :5] # take top 5 results
classifications = []
for image_index, index_list in enumerate(indices):
result = []
for i in index_list:
# 'i' is a category in labels and also an index into scores
if labels is None:
label = 'Class #%s' % i
else:
label = labels[i]
result.append((label, round(100.0*scores[image_index, i],4)))
classifications.append(result)
''' for index, classification in enumerate(classifications):
print '{:-^80}'.format(' Prediction for %s ' % str(index))
for label, confidence in classification:
print '{:9.4%} - "{}"'.format(confidence/100.0, label)
print '''
for index, classification in enumerate(classifications):
print index,
print '{:9.4%} - "{}"'.format(classification[0][1]/100.0, classification[0][0])
count = 0
for index, classification in enumerate(classifications):
if count % 32 == 0:
print
count += 1
if classification[0][1] > CONFIDENCE_THRESHOLD:
print classification[0][0],
else:
print ".",
print
if __name__ == '__main__':
script_start_time = time.time()
parser = argparse.ArgumentParser(description='Classification example - DIGITS')
### Positional arguments
parser.add_argument('caffemodel', help='Path to a .caffemodel')
parser.add_argument('deploy_file', help='Path to the deploy file')
parser.add_argument('image', help='Path to an image')
### Optional arguments
parser.add_argument('-m', '--mean',
help='Path to a mean file (*.npy)')
parser.add_argument('-l', '--labels',
help='Path to a labels file')
parser.add_argument('--nogpu',
action='store_true',
help="Don't use the GPU")
args = vars(parser.parse_args())
image_files = [args['image']]
classify(args['caffemodel'], args['deploy_file'], image_files,
args['mean'], args['labels'], not args['nogpu'])
print 'Script took %s seconds.' % (time.time() - script_start_time,)
| |
import random
from core.exception import *
from core.grid import *
class NoAvailablePlaceError(CoreError):
message = "No available place."
class NoAvailableTargetError(CoreError):
message = "No available target."
class TurnState(object):
NONE = 0x00
MARKED = 0x01
MISS = 0x02
HIT = 0x03
SUNK = 0x04
class Turn(Position):
owner = None
state = None
def __init__(self, owner, v=None, h=None):
self.owner = owner
self.state = TurnState.NONE
super().__init__(v, h)
def is_none(self):
return TurnState.NONE == self.state
def is_marked(self):
return TurnState.MARKED == self.state
def is_miss(self):
return TurnState.MISS == self.state
def is_hit(self):
return TurnState.HIT == self.state
def is_sunk(self):
return TurnState.SUNK == self.state
def marked(self):
self.state = TurnState.MARKED
def miss(self):
self.state = TurnState.MISS
def hit(self):
self.state = TurnState.HIT
def sunk(self):
self.state = TurnState.SUNK
class Player(object):
grid = None
fleet = None
def __init__(self, grid, fleet):
self.grid = grid
self.fleet = fleet
self.fleet.create(self.grid)
def name(self):
raise NotImplementedError()
def prepare(self):
raise NotImplementedError()
def answer(self, turn):
cell = self.grid.cell(turn.position)
if not cell.is_marked():
cell.mark()
if cell.unit:
if cell.unit.is_sunked():
turn.sunk()
else:
turn.hit()
else:
turn.miss()
else:
turn.marked()
return turn
def is_defeated(self):
return self.fleet.is_defeated()
def is_ready(self):
return self.fleet.is_ready()
def turn(self):
raise NotImplementedError()
def say(self):
return self.turn()
class BaseHumanPlayer(Player):
def name(self):
return 'Human'
def current_unit(self):
return self.fleet.current()
def prepare(self):
self.current_unit().move_to()
def unit_rotate(self):
self.current_unit().rotate()
def unit_move_left(self):
self.current_unit().move_left()
def unit_move_right(self):
self.current_unit().move_right()
def unit_move_up(self):
self.current_unit().move_up()
def unit_move_down(self):
self.current_unit().move_down()
def unit_place(self):
if self.current_unit().place():
if not self.is_ready():
self.current_unit().move_to()
def turn(self):
return Turn(self)
class BaseComputerPlayer(Player):
def name(self):
return 'Computer'
class HumanPlayer(BaseHumanPlayer):
pass
class ComputerPlayer(BaseComputerPlayer):
enemy = None
turns = []
def __init__(self, grid, fleet):
super().__init__(grid, fleet)
self.enemy = []
self.turns = []
def turn(self):
target = None
if self.turns:
last = self.turns[-1]
if last.is_sunk():
self.enemy.clear()
elif last.is_hit():
self.enemy.append(last.position)
if self.enemy:
target = self.get_available_target(self.turns, self.enemy)
else:
target = self.get_available_target(self.turns)
if not target:
raise NoAvailableTargetError()
target = random.choice(target)
turn = Turn(self, target[0], target[1])
self.turns.append(turn)
return turn
def get_available_target(self, turns, enemy=None):
marked = []
for t in turns:
marked.append(t.position)
position = []
if enemy:
position = self.grid.near(enemy)
else:
for v in range(self.grid.config.size):
for h in range(self.grid.config.size):
position.append([v, h])
target = []
for p in filter((lambda p: p not in marked), position):
target.append(p)
return target
def prepare(self):
for u in self.fleet.units:
available = self.get_available_position(u.size)
available = random.choice(available)
(u.move_to(available) and u.place())
if not self.is_ready():
raise NoAvailablePlaceError()
def get_available_position(self, size):
available = []
position = []
for d in [1, 2]:
for v in range(self.grid.config.size):
position.clear()
position.append([])
for h in range(self.grid.config.size):
if d % 2:
p = [v, h]
else:
p = [h, v]
cell = self.grid.cell([p[0], p[1]])
if cell.is_locked():
position.append([])
else:
position[-1].append(p)
for p in filter((lambda p: len(p) >= size), position):
for i in range((len(p) - size + 1)):
available.append(p[i:(i + size):1])
return available
| |
"""
FuseCry encryption functions.
Cry objects `enc` and `dec` methods are symetric encrypt/decrypt functions.
Use `get_password_cry` and `get_rsa_cry` to generate proper Cry object.
Examples:
Generate new Cry object with user password:
get_password_cry(password)
Generate existing Cry object with user password:
get_password_cry(password, kdf_salt, kdf_iterations)
Generate new Cry object with RSA key:
get_rsa_cry(rsa_key):
Generate existing Cry object with RSA key and RSA encrypted AES key:
get_rsa_cry(rsa_key, encrypted_aes_key)
"""
from Crypto.Cipher import AES
from Crypto.Hash import HMAC, SHA256
from Crypto.Protocol.KDF import PBKDF2
from Crypto.PublicKey import RSA
from base64 import b32encode, b32decode
from fusecry import IntegrityCheckFail
from fusecry import config
from random import randint
import os
def get_password_cry(password, kdf_salt=None, kdf_iters=None):
"""Generate Cry object from password using KDF.
Optional arguments are not required if you generate your first Cry object.
If you already used it to encrypt data before, you will have to provide the
generated arguments (returned by this function) in order to create the
exact same Cry object.
Args:
password (str): User password.
kdf_salt (:obj:`bytes`, optional): KDF salt. Defaults to None.
kdf_iters (:obj:`int`, optional): Number of KDF iterations. Defaults to
None.
Returns:
(
Cry: Cry object ready for encryption use,
bytes: KDF salt,
int: KDF iterations
)
KDF salt and number of KDF iterations are returned for future reference
if they were generated and not provided. They will be returned in any
case.
"""
key_size = AES.key_size[2] # 256 bit key
kdf_salt = kdf_salt or os.urandom(config.kdf_salt_size)
kdf_iters = kdf_iters or randint(*config._kdf_iter_range)
aes_key = PBKDF2(str(password), kdf_salt, key_size, kdf_iters)
crypto = Cry(aes_key)
return crypto, kdf_salt, kdf_iters
def get_rsa_cry(rsa_key, enc_aes=None):
"""Generate Cry object using RSA key.
Optional arguemnt is not required if you generate your first Cry object. If
you already used it to encrypt data before, you will have to provide the
generated argument (returned by this function) in order to create the exact
same Cry object.
Args:
rsa_key (bytes): Public or private RSA key. If public key is used, only
encryption methods will be available in returned Cry object. The
RSA key is used to encrypt random generated 256 bit AES key.
enc_aes (:obj:`bytes`, optional): RSA encrypted AES key. It has to be
encrypted with the same key provided in rsa_key argument. Defaults
to None.
Returns:
(
Cry: Cry object ready for encryption use,
int: RSA key size,
bytes: encrypted AES key
)
RSA key size and encrypted AES key are returned to be stored for future
reference. If encrypted AES key was not provided, it will be generated,
but it is returned in any case.
"""
key_size = AES.key_size[2]
rsa = RSA.importKey(rsa_key)
rsa_size = int((rsa.size()+1)/8)
aes_key = None
if enc_aes:
aes_key = rsa.decrypt(enc_aes)
aes_key = b'\x00' * (AES.block_size - len(aes_key)) + aes_key
else:
aes_key = os.urandom(key_size)
enc_aes = rsa.encrypt(aes_key, 'K')[0]
enc_aes = b'\x00' * (rsa_size - len(enc_aes)) + enc_aes
crypto = Cry(aes_key)
return crypto, rsa_size, enc_aes
class Cry(object):
"""Contains methods and keys for encryption and decryption of byte chunks.
Cry uses AES in CBC mode to encrypt and decrypt bytes. Once created, it may
be reused multiple times.
Attributes:
hash_func (function): Function used for HMAC hashing.
aes_key (bytes): AES key in plain text.
hashed_aes_key (bytes): Hash value of aes_key used as HMAC key.
ks (int): AES key size.
vs (int): AES initialization vector size.
hs (int): Digest size of hash function defined in attribute hash_func.
ms (int): Meta size - size of non-data part of encrypted chunk. This
meta data consists of IV and HMAC.
"""
def __init__(self, aes_key):
"""Constructor.
Args:
aes_key (bytes): Plain text AES key.
"""
self.hash_func = SHA256
self.aes_key = aes_key
aes_hasher = self.hash_func.new()
aes_hasher.update(aes_key)
self.hashed_aes_key = aes_hasher.digest()
self.ks = len(aes_key)
self.vs = AES.block_size
self.hs = self.hash_func.digest_size
self.ms = self.vs + self.hs
def _aes_enc(self, raw_bytes):
iv = os.urandom(self.vs)
aes = AES.new(self.aes_key, AES.MODE_CBC, iv)
return iv + aes.encrypt(raw_bytes)
def _aes_dec(self, enc_bytes):
iv = enc_bytes[:self.vs]
aes = AES.new(self.aes_key, AES.MODE_CBC, iv)
return aes.decrypt(enc_bytes[self.vs:])
def enc(self, chunk):
"""Encrypt a chunk of bytes and returned encrypted chunk.
Initialization vector is randomly generated for each chunk.
Args:
chunk (bytes): Plain text data to be encrypted.
Returns:
bytes: Encrypted chunk.
Encrypted chunk is bytes object consisting of TAG and cipher text.
Cipher text is IV + encrypted chunk. TAG is HMAC of cipher text
using hashed_aes_key as key.
"""
if not chunk:
return bytes(0)
checksum = HMAC.new(self.hashed_aes_key, digestmod=self.hash_func)
chunk += bytes((AES.block_size - len(chunk)) % AES.block_size)
enc_data = self._aes_enc(chunk)
checksum.update(enc_data)
return checksum.digest() + enc_data
def dec(self, enc_chunk):
"""Decrypt encrypted chunk, perform validation and return plain text.
Args:
enc_chunk (bytes): Encrypted chunk returned by enc(chunk) method.
Returns:
bytes: Plain text data.
Raises:
IntegrityCheckFail: When integrity check fails.
"""
if not enc_chunk:
return b'', False
checksum = HMAC.new(self.hashed_aes_key, digestmod=self.hash_func)
checksum.update(enc_chunk[self.hs:])
if enc_chunk[:self.hs] != checksum.digest():
raise IntegrityCheckFail("Integrity check failed.")
return self._aes_dec(enc_chunk[self.hs:])
def enc_filename(self, name):
byte_name = name.encode()
byte_name += bytes((AES.block_size - len(byte_name)) % AES.block_size)
return b32encode(self._aes_enc(byte_name)).decode().rstrip('=')
def dec_filename(self, enc_name):
enc_name += '=' * ((8 - len(enc_name)) % 8)
byte_name = self._aes_dec(b32decode(enc_name.encode()))
return byte_name.rstrip(bytes(1)).decode()
| |
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
import webob
from webob import exc
import webtest
from neutron.api import api_common
from neutron.api import extensions
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.api.v2 import base as v2_base
from neutron.api.v2 import router
from neutron.common import exceptions as n_exc
from neutron import context
from neutron import manager
from neutron.openstack.common import policy as common_policy
from neutron.openstack.common import uuidutils
from neutron import policy
from neutron import quota
from neutron.tests import base
from neutron.tests import fake_notifier
from neutron.tests.unit import testlib_api
from neutron.tests.unit import testlib_plugin
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
EXTDIR = os.path.join(ROOTDIR, 'unit/extensions')
_uuid = uuidutils.generate_uuid
def _get_path(resource, id=None, action=None, fmt=None):
path = '/%s' % resource
if id is not None:
path = path + '/%s' % id
if action is not None:
path = path + '/%s' % action
if fmt is not None:
path = path + '.%s' % fmt
return path
class ResourceIndexTestCase(base.BaseTestCase):
def test_index_json(self):
index = webtest.TestApp(router.Index({'foo': 'bar'}))
res = index.get('')
self.assertIn('resources', res.json)
self.assertEqual(len(res.json['resources']), 1)
resource = res.json['resources'][0]
self.assertIn('collection', resource)
self.assertEqual(resource['collection'], 'bar')
self.assertIn('name', resource)
self.assertEqual(resource['name'], 'foo')
self.assertIn('links', resource)
self.assertEqual(len(resource['links']), 1)
link = resource['links'][0]
self.assertIn('href', link)
self.assertEqual(link['href'], 'http://localhost/bar')
self.assertIn('rel', link)
self.assertEqual(link['rel'], 'self')
class APIv2TestBase(base.BaseTestCase, testlib_plugin.PluginSetupHelper):
def setUp(self):
super(APIv2TestBase, self).setUp()
plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
# Create the default configurations
self.config_parse()
# Update the plugin
self.setup_coreplugin(plugin)
cfg.CONF.set_override('allow_pagination', True)
cfg.CONF.set_override('allow_sorting', True)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = True
instance._NeutronPluginBaseV2__native_sorting_support = True
api = router.APIRouter()
self.api = webtest.TestApp(api)
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
class _ArgMatcher(object):
"""An adapter to assist mock assertions, used to custom compare."""
def __init__(self, cmp, obj):
self.cmp = cmp
self.obj = obj
def __eq__(self, other):
return self.cmp(self.obj, other)
def _list_cmp(l1, l2):
return set(l1) == set(l2)
class APIv2TestCase(APIv2TestBase):
def _do_field_list(self, resource, base_fields):
attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[resource]
policy_attrs = [name for (name, info) in attr_info.items()
if info.get('required_by_policy')]
for name, info in attr_info.items():
if info.get('primary_key'):
policy_attrs.append(name)
fields = base_fields
fields.extend(policy_attrs)
return fields
def _get_collection_kwargs(self, skipargs=[], **kwargs):
args_list = ['filters', 'fields', 'sorts', 'limit', 'marker',
'page_reverse']
args_dict = dict(
(arg, mock.ANY) for arg in set(args_list) - set(skipargs))
args_dict.update(kwargs)
return args_dict
def test_fields(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': 'foo'})
fields = self._do_field_list('networks', ['foo'])
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
fields = self._do_field_list('networks', ['foo', 'bar'])
self.api.get(_get_path('networks'), {'fields': ['foo', 'bar']})
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple_with_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
fields = self._do_field_list('networks', ['foo'])
self.api.get(_get_path('networks'), {'fields': ['foo', '']})
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': ''})
kwargs = self._get_collection_kwargs(fields=[])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': ['', '']})
kwargs = self._get_collection_kwargs(fields=[])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar'})
filters = {'name': ['bar']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ''})
filters = {}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['', '']})
filters = {}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_with_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['bar', '']})
filters = {'name': ['bar']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_values(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['bar', 'bar2']})
filters = {'name': ['bar', 'bar2']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar',
'tenant_id': 'bar2'})
filters = {'name': ['bar'], 'tenant_id': ['bar2']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_fields(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar', 'fields': 'foo'})
filters = {'name': ['bar']}
fields = self._do_field_list('networks', ['foo'])
kwargs = self._get_collection_kwargs(filters=filters, fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_convert_to(self):
instance = self.plugin.return_value
instance.get_ports.return_value = []
self.api.get(_get_path('ports'), {'admin_state_up': 'true'})
filters = {'admin_state_up': [True]}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_ports.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_convert_list_to(self):
instance = self.plugin.return_value
instance.get_ports.return_value = []
self.api.get(_get_path('ports'),
{'fixed_ips': ['ip_address=foo', 'subnet_id=bar']})
filters = {'fixed_ips': {'ip_address': ['foo'], 'subnet_id': ['bar']}}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_ports.assert_called_once_with(mock.ANY, **kwargs)
def test_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'limit': '10'})
kwargs = self._get_collection_kwargs(limit=10)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_great_than_max_limit(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'limit': '1001'})
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_zero(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'limit': '0'})
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_unspecific(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_negative_value(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'), {'limit': -1},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_limit_with_non_integer(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'limit': 'abc'}, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_limit_with_infinite_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_override('pagination_max_limit', 'Infinite')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_negative_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_default('pagination_max_limit', '-1')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_non_integer_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_default('pagination_max_limit', 'abc')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_marker(self):
cfg.CONF.set_override('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
marker = _uuid()
self.api.get(_get_path('networks'),
{'marker': marker})
kwargs = self._get_collection_kwargs(limit=1000, marker=marker)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse(self):
calls = []
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'True'})
kwargs = self._get_collection_kwargs(page_reverse=True)
calls.append(mock.call.get_networks(mock.ANY, **kwargs))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
instance.get_networks.reset_mock()
self.api.get(_get_path('networks'),
{'page_reverse': 'False'})
kwargs = self._get_collection_kwargs(page_reverse=False)
calls.append(mock.call.get_networks(mock.ANY, **kwargs))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse_with_non_bool(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'abc'})
kwargs = self._get_collection_kwargs(page_reverse=False)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse_with_unspecific(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(page_reverse=False)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', True)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort_with_primary_key(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up', 'id'],
'sort_dir': ['desc', 'asc', 'desc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', False)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort_without_direction(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'), {'sort_key': ['name']},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_sort_with_invalid_attribute(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'sort_key': 'abc',
'sort_dir': 'asc'},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_sort_with_invalid_dirs(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'sort_key': 'name',
'sort_dir': 'abc'},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_emulated_sort(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance._NeutronPluginBaseV2__native_sorting_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'sort_key': ['name', 'status'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(
skipargs=['sorts', 'limit', 'marker', 'page_reverse'])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_emulated_sort_without_sort_field(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance._NeutronPluginBaseV2__native_sorting_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'sort_key': ['name', 'status'],
'sort_dir': ['desc', 'asc'],
'fields': ['subnets']})
kwargs = self._get_collection_kwargs(
skipargs=['sorts', 'limit', 'marker', 'page_reverse'],
fields=_ArgMatcher(_list_cmp, ['name',
'status',
'id',
'subnets',
'shared',
'tenant_id']))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_emulated_pagination(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'limit': 10,
'marker': 'foo',
'page_reverse': False})
kwargs = self._get_collection_kwargs(skipargs=['limit',
'marker',
'page_reverse'])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_native_pagination_without_native_sorting(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_sorting_support = False
self.assertRaises(n_exc.Invalid, router.APIRouter)
def test_native_pagination_without_allow_sorting(self):
cfg.CONF.set_override('allow_sorting', False)
instance = self.plugin.return_value
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', True)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
# Note: since all resources use the same controller and validation
# logic, we actually get really good coverage from testing just networks.
class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
def setUp(self):
super(JSONV2TestCase, self).setUp()
def _test_list(self, req_tenant_id, real_tenant_id):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
input_dict = {'id': uuidutils.generate_uuid(),
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': real_tenant_id,
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
res = self.api.get(_get_path('networks',
fmt=self.fmt), extra_environ=env)
res = self.deserialize(res)
self.assertIn('networks', res)
if not req_tenant_id or req_tenant_id == real_tenant_id:
# expect full list returned
self.assertEqual(len(res['networks']), 1)
output_dict = res['networks'][0]
input_dict['shared'] = False
self.assertEqual(len(input_dict), len(output_dict))
for k, v in input_dict.iteritems():
self.assertEqual(v, output_dict[k])
else:
# expect no results
self.assertEqual(len(res['networks']), 0)
def test_list_noauth(self):
self._test_list(None, _uuid())
def test_list_keystone(self):
tenant_id = _uuid()
self._test_list(tenant_id, tenant_id)
def test_list_keystone_bad(self):
tenant_id = _uuid()
self._test_list(tenant_id + "bad", tenant_id)
def test_list_pagination(self):
id1 = str(_uuid())
id2 = str(_uuid())
input_dict1 = {'id': id1,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
input_dict2 = {'id': id2,
'name': 'net2',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict1, input_dict2]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'sort_key': ['name'],
'sort_dir': ['asc']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 2)
self.assertEqual(sorted([id1, id2]),
sorted([res['networks'][0]['id'],
res['networks'][1]['id']]))
self.assertIn('networks_links', res)
next_links = []
previous_links = []
for r in res['networks_links']:
if r['rel'] == 'next':
next_links.append(r)
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(len(next_links), 1)
self.assertEqual(len(previous_links), 1)
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
params['marker'] = [id2]
self.assertEqual(urlparse.parse_qs(url.query), params)
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
params['marker'] = [id1]
params['page_reverse'] = ['True']
self.assertEqual(urlparse.parse_qs(url.query), params)
def test_list_pagination_with_last_page(self):
id = str(_uuid())
input_dict = {'id': id,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': str(_uuid())}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 1)
self.assertEqual(id, res['networks'][0]['id'])
self.assertIn('networks_links', res)
previous_links = []
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'next')
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(len(previous_links), 1)
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
expect_params['marker'] = [id]
expect_params['page_reverse'] = ['True']
self.assertEqual(urlparse.parse_qs(url.query), expect_params)
def test_list_pagination_with_empty_page(self):
return_value = []
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': str(_uuid())}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(res['networks'], [])
previous_links = []
if 'networks_links' in res:
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'next')
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(len(previous_links), 1)
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
del expect_params['marker']
expect_params['page_reverse'] = ['True']
self.assertEqual(urlparse.parse_qs(url.query), expect_params)
def test_list_pagination_reverse_with_last_page(self):
id = str(_uuid())
input_dict = {'id': id,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'page_reverse': ['True']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 1)
self.assertEqual(id, res['networks'][0]['id'])
self.assertIn('networks_links', res)
next_links = []
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'previous')
if r['rel'] == 'next':
next_links.append(r)
self.assertEqual(len(next_links), 1)
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expected_params = params.copy()
del expected_params['page_reverse']
expected_params['marker'] = [id]
self.assertEqual(urlparse.parse_qs(url.query),
expected_params)
def test_list_pagination_reverse_with_empty_page(self):
return_value = []
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'page_reverse': ['True']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(res['networks'], [])
next_links = []
if 'networks_links' in res:
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'previous')
if r['rel'] == 'next':
next_links.append(r)
self.assertEqual(len(next_links), 1)
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
del expect_params['marker']
del expect_params['page_reverse']
self.assertEqual(urlparse.parse_qs(url.query), expect_params)
def test_create(self):
net_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['status'], "ACTIVE")
def test_create_use_defaults(self):
net_id = _uuid()
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True,
'shared': False}}
full_input['network'].update(initial_input['network'])
return_value = {'id': net_id, 'status': "ACTIVE"}
return_value.update(full_input['network'])
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt)
instance.create_network.assert_called_with(mock.ANY,
network=full_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['admin_state_up'], True)
self.assertEqual(net['status'], "ACTIVE")
def test_create_no_keystone_env(self):
data = {'name': 'net1'}
self._test_create_failure_bad_request('networks', data)
def test_create_with_keystone_env(self):
tenant_id = _uuid()
net_id = _uuid()
env = {'neutron.context': context.Context('', tenant_id)}
# tenant_id should be fetched from env
initial_input = {'network': {'name': 'net1'}}
full_input = {'network': {'admin_state_up': True,
'shared': False, 'tenant_id': tenant_id}}
full_input['network'].update(initial_input['network'])
return_value = {'id': net_id, 'status': "ACTIVE"}
return_value.update(full_input['network'])
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt,
extra_environ=env)
instance.create_network.assert_called_with(mock.ANY,
network=full_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
def test_create_bad_keystone_tenant(self):
tenant_id = _uuid()
data = {'network': {'name': 'net1', 'tenant_id': tenant_id}}
env = {'neutron.context': context.Context('', tenant_id + "bad")}
self._test_create_failure_bad_request('networks', data,
extra_environ=env)
def test_create_no_body(self):
data = {'whoa': None}
self._test_create_failure_bad_request('networks', data)
def test_create_no_resource(self):
data = {}
self._test_create_failure_bad_request('networks', data)
def test_create_missing_attr(self):
data = {'port': {'what': 'who', 'tenant_id': _uuid()}}
self._test_create_failure_bad_request('ports', data)
def test_create_readonly_attr(self):
data = {'network': {'name': 'net1', 'tenant_id': _uuid(),
'status': "ACTIVE"}}
self._test_create_failure_bad_request('networks', data)
def test_create_bulk(self):
data = {'networks': [{'name': 'net1',
'admin_state_up': True,
'tenant_id': _uuid()},
{'name': 'net2',
'admin_state_up': True,
'tenant_id': _uuid()}]}
def side_effect(context, network):
net = network.copy()
net['network'].update({'subnets': []})
return net['network']
instance = self.plugin.return_value
instance.create_network.side_effect = side_effect
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
def _test_create_failure_bad_request(self, resource, data, **kwargs):
res = self.api.post(_get_path(resource, fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True, **kwargs)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_bulk_networks_none(self):
self._test_create_failure_bad_request('networks', {'networks': None})
def test_create_bulk_networks_empty_list(self):
self._test_create_failure_bad_request('networks', {'networks': []})
def test_create_bulk_missing_attr(self):
data = {'ports': [{'what': 'who', 'tenant_id': _uuid()}]}
self._test_create_failure_bad_request('ports', data)
def test_create_bulk_partial_body(self):
data = {'ports': [{'device_id': 'device_1',
'tenant_id': _uuid()},
{'tenant_id': _uuid()}]}
self._test_create_failure_bad_request('ports', data)
def test_create_attr_not_specified(self):
net_id = _uuid()
tenant_id = _uuid()
device_id = _uuid()
initial_input = {'port': {'name': '', 'network_id': net_id,
'tenant_id': tenant_id,
'device_id': device_id,
'admin_state_up': True}}
full_input = {'port': {'admin_state_up': True,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
'device_owner': ''}}
full_input['port'].update(initial_input['port'])
return_value = {'id': _uuid(), 'status': 'ACTIVE',
'admin_state_up': True,
'mac_address': 'ca:fe:de:ad:be:ef',
'device_id': device_id,
'device_owner': ''}
return_value.update(initial_input['port'])
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': unicode(tenant_id)}
instance.get_ports_count.return_value = 1
instance.create_port.return_value = return_value
res = self.api.post(_get_path('ports', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt)
instance.create_port.assert_called_with(mock.ANY, port=full_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('port', res)
port = res['port']
self.assertEqual(port['network_id'], net_id)
self.assertEqual(port['mac_address'], 'ca:fe:de:ad:be:ef')
def test_create_return_extra_attr(self):
net_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id, 'v2attrs:something': "123"}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['status'], "ACTIVE")
self.assertNotIn('v2attrs:something', net)
def test_fields(self):
return_value = {'name': 'net1', 'admin_state_up': True,
'subnets': []}
instance = self.plugin.return_value
instance.get_network.return_value = return_value
self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt))
def _test_delete(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': real_tenant_id,
'shared': False}
instance.delete_network.return_value = None
res = self.api.delete(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
extra_environ=env,
expect_errors=expect_errors)
self.assertEqual(res.status_int, expected_code)
def test_delete_noauth(self):
self._test_delete(None, _uuid(), exc.HTTPNoContent.code)
def test_delete_keystone(self):
tenant_id = _uuid()
self._test_delete(tenant_id, tenant_id, exc.HTTPNoContent.code)
def test_delete_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_delete(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def _test_get(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
shared = False
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
if req_tenant_id.endswith('another'):
shared = True
env['neutron.context'].roles = ['tenant_admin']
data = {'tenant_id': real_tenant_id, 'shared': shared}
instance = self.plugin.return_value
instance.get_network.return_value = data
res = self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
extra_environ=env,
expect_errors=expect_errors)
self.assertEqual(res.status_int, expected_code)
return res
def test_get_noauth(self):
self._test_get(None, _uuid(), 200)
def test_get_keystone(self):
tenant_id = _uuid()
self._test_get(tenant_id, tenant_id, 200)
def test_get_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_get(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def test_get_keystone_shared_network(self):
tenant_id = _uuid()
self._test_get(tenant_id + "another", tenant_id, 200)
def test_get_keystone_strip_admin_only_attribute(self):
tenant_id = _uuid()
# Inject rule in policy engine
policy.init()
self.addCleanup(policy.reset)
rules = {'get_network:name': common_policy.parse_rule(
"rule:admin_only")}
policy.set_rules(rules, overwrite=False)
res = self._test_get(tenant_id, tenant_id, 200)
res = self.deserialize(res)
self.assertNotIn('name', res['network'])
def _test_update(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
# leave out 'name' field intentionally
data = {'network': {'admin_state_up': True}}
return_value = {'subnets': []}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': real_tenant_id,
'shared': False}
instance.update_network.return_value = return_value
res = self.api.put(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
self.serialize(data),
extra_environ=env,
expect_errors=expect_errors)
# Ensure id attribute is included in fields returned by GET call
# in update procedure.
self.assertEqual(1, instance.get_network.call_count)
self.assertIn('id', instance.get_network.call_args[1]['fields'])
self.assertEqual(res.status_int, expected_code)
def test_update_noauth(self):
self._test_update(None, _uuid(), 200)
def test_update_keystone(self):
tenant_id = _uuid()
self._test_update(tenant_id, tenant_id, 200)
def test_update_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_update(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def test_update_readonly_field(self):
data = {'network': {'status': "NANANA"}}
res = self.api.put(_get_path('networks', id=_uuid()),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_invalid_attribute_field(self):
data = {'network': {'invalid_key1': "foo1", 'invalid_key2': "foo2"}}
res = self.api.put(_get_path('networks', id=_uuid()),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
class SubresourceTest(base.BaseTestCase, testlib_plugin.PluginSetupHelper):
def setUp(self):
super(SubresourceTest, self).setUp()
plugin = 'neutron.tests.unit.test_api_v2.TestSubresourcePlugin'
extensions.PluginAwareExtensionManager._instance = None
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
self.config_parse()
self.setup_coreplugin(plugin)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
router.SUB_RESOURCES['dummy'] = {
'collection_name': 'dummies',
'parent': {'collection_name': 'networks',
'member_name': 'network'}
}
attributes.RESOURCE_ATTRIBUTE_MAP['dummies'] = {
'foo': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '', 'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True}
}
api = router.APIRouter()
self.api = webtest.TestApp(api)
def tearDown(self):
router.SUB_RESOURCES = {}
# Restore the global RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
super(SubresourceTest, self).tearDown()
def test_index_sub_resource(self):
instance = self.plugin.return_value
self.api.get('/networks/id1/dummies')
instance.get_network_dummies.assert_called_once_with(mock.ANY,
filters=mock.ANY,
fields=mock.ANY,
network_id='id1')
def test_show_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id))
instance.get_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
fields=mock.ANY)
def test_create_sub_resource(self):
instance = self.plugin.return_value
body = {'dummy': {'foo': 'bar', 'tenant_id': _uuid()}}
self.api.post_json('/networks/id1/dummies', body)
instance.create_network_dummy.assert_called_once_with(mock.ANY,
network_id='id1',
dummy=body)
def test_update_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
body = {'dummy': {'foo': 'bar'}}
self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id),
body)
instance.update_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
dummy=body)
def test_update_subresource_to_none(self):
instance = self.plugin.return_value
dummy_id = _uuid()
body = {'dummy': {}}
self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id),
body)
instance.update_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
dummy=body)
def test_delete_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.delete('/networks/id1' + _get_path('dummies', id=dummy_id))
instance.delete_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1')
# Note: since all resources use the same controller and validation
# logic, we actually get really good coverage from testing just networks.
class V2Views(base.BaseTestCase):
def _view(self, keys, collection, resource):
data = dict((key, 'value') for key in keys)
data['fake'] = 'value'
attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[collection]
controller = v2_base.Controller(None, collection, resource, attr_info)
res = controller._view(context.get_admin_context(), data)
self.assertNotIn('fake', res)
for key in keys:
self.assertIn(key, res)
def test_network(self):
keys = ('id', 'name', 'subnets', 'admin_state_up', 'status',
'tenant_id')
self._view(keys, 'networks', 'network')
def test_port(self):
keys = ('id', 'network_id', 'mac_address', 'fixed_ips',
'device_id', 'admin_state_up', 'tenant_id', 'status')
self._view(keys, 'ports', 'port')
def test_subnet(self):
keys = ('id', 'network_id', 'tenant_id', 'gateway_ip',
'ip_version', 'cidr', 'enable_dhcp')
self._view(keys, 'subnets', 'subnet')
class NotificationTest(APIv2TestBase):
def setUp(self):
super(NotificationTest, self).setUp()
fake_notifier.reset()
def _resource_op_notifier(self, opname, resource, expected_errors=False):
initial_input = {resource: {'name': 'myname'}}
instance = self.plugin.return_value
instance.get_networks.return_value = initial_input
instance.get_networks_count.return_value = 0
expected_code = exc.HTTPCreated.code
if opname == 'create':
initial_input[resource]['tenant_id'] = _uuid()
res = self.api.post_json(
_get_path('networks'),
initial_input, expect_errors=expected_errors)
if opname == 'update':
res = self.api.put_json(
_get_path('networks', id=_uuid()),
initial_input, expect_errors=expected_errors)
expected_code = exc.HTTPOk.code
if opname == 'delete':
initial_input[resource]['tenant_id'] = _uuid()
res = self.api.delete(
_get_path('networks', id=_uuid()),
expect_errors=expected_errors)
expected_code = exc.HTTPNoContent.code
expected_events = ('.'.join([resource, opname, "start"]),
'.'.join([resource, opname, "end"]))
self.assertEqual(len(fake_notifier.NOTIFICATIONS),
len(expected_events))
for msg, event in zip(fake_notifier.NOTIFICATIONS, expected_events):
self.assertEqual('INFO', msg['priority'])
self.assertEqual(event, msg['event_type'])
self.assertEqual(res.status_int, expected_code)
def test_network_create_notifer(self):
self._resource_op_notifier('create', 'network')
def test_network_delete_notifer(self):
self._resource_op_notifier('delete', 'network')
def test_network_update_notifer(self):
self._resource_op_notifier('update', 'network')
class DHCPNotificationTest(APIv2TestBase):
def _test_dhcp_notifier(self, opname, resource, initial_input=None):
instance = self.plugin.return_value
instance.get_networks.return_value = initial_input
instance.get_networks_count.return_value = 0
expected_code = exc.HTTPCreated.code
with mock.patch.object(dhcp_rpc_agent_api.DhcpAgentNotifyAPI,
'notify') as dhcp_notifier:
if opname == 'create':
res = self.api.post_json(
_get_path('networks'),
initial_input)
if opname == 'update':
res = self.api.put_json(
_get_path('networks', id=_uuid()),
initial_input)
expected_code = exc.HTTPOk.code
if opname == 'delete':
res = self.api.delete(_get_path('networks', id=_uuid()))
expected_code = exc.HTTPNoContent.code
expected_item = mock.call(mock.ANY, mock.ANY,
resource + "." + opname + ".end")
if initial_input and resource not in initial_input:
resource += 's'
num = len(initial_input[resource]) if initial_input and isinstance(
initial_input[resource], list) else 1
expected = [expected_item for x in xrange(num)]
self.assertEqual(expected, dhcp_notifier.call_args_list)
self.assertEqual(num, dhcp_notifier.call_count)
self.assertEqual(expected_code, res.status_int)
def test_network_create_dhcp_notifer(self):
input = {'network': {'name': 'net',
'tenant_id': _uuid()}}
self._test_dhcp_notifier('create', 'network', input)
def test_network_delete_dhcp_notifer(self):
self._test_dhcp_notifier('delete', 'network')
def test_network_update_dhcp_notifer(self):
input = {'network': {'name': 'net'}}
self._test_dhcp_notifier('update', 'network', input)
def test_networks_create_bulk_dhcp_notifer(self):
input = {'networks': [{'name': 'net1',
'tenant_id': _uuid()},
{'name': 'net2',
'tenant_id': _uuid()}]}
self._test_dhcp_notifier('create', 'network', input)
class QuotaTest(APIv2TestBase):
def test_create_network_quota(self):
cfg.CONF.set_override('quota_network', 1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True, 'subnets': []}}
full_input['network'].update(initial_input['network'])
instance = self.plugin.return_value
instance.get_networks_count.return_value = 1
res = self.api.post_json(
_get_path('networks'), initial_input, expect_errors=True)
instance.get_networks_count.assert_called_with(mock.ANY,
filters=mock.ANY)
self.assertIn("Quota exceeded for resources",
res.json['NeutronError']['message'])
def test_create_network_quota_no_counts(self):
cfg.CONF.set_override('quota_network', 1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True, 'subnets': []}}
full_input['network'].update(initial_input['network'])
instance = self.plugin.return_value
instance.get_networks_count.side_effect = (
NotImplementedError())
instance.get_networks.return_value = ["foo"]
res = self.api.post_json(
_get_path('networks'), initial_input, expect_errors=True)
instance.get_networks_count.assert_called_with(mock.ANY,
filters=mock.ANY)
self.assertIn("Quota exceeded for resources",
res.json['NeutronError']['message'])
def test_create_network_quota_without_limit(self):
cfg.CONF.set_override('quota_network', -1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
instance = self.plugin.return_value
instance.get_networks_count.return_value = 3
res = self.api.post_json(
_get_path('networks'), initial_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
class ExtensionTestCase(base.BaseTestCase, testlib_plugin.PluginSetupHelper):
def setUp(self):
super(ExtensionTestCase, self).setUp()
plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
# Create the default configurations
self.config_parse()
# Update the plugin and extensions path
self.setup_coreplugin(plugin)
cfg.CONF.set_override('api_extensions_path', EXTDIR)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
# Instantiate mock plugin and enable the V2attributes extension
manager.NeutronManager.get_plugin().supported_extension_aliases = (
["v2attrs"])
api = router.APIRouter()
self.api = webtest.TestApp(api)
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
def tearDown(self):
super(ExtensionTestCase, self).tearDown()
self.api = None
self.plugin = None
# Restore the global RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
def test_extended_create(self):
net_id = _uuid()
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid(),
'v2attrs:something_else': "abc"}}
data = {'network': {'admin_state_up': True, 'shared': False}}
data['network'].update(initial_input['network'])
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id,
'v2attrs:something': "123"}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post_json(_get_path('networks'), initial_input)
instance.create_network.assert_called_with(mock.ANY,
network=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
self.assertIn('network', res.json)
net = res.json['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['status'], "ACTIVE")
self.assertEqual(net['v2attrs:something'], "123")
self.assertNotIn('v2attrs:something_else', net)
class TestSubresourcePlugin(object):
def get_network_dummies(self, context, network_id,
filters=None, fields=None):
return []
def get_network_dummy(self, context, id, network_id,
fields=None):
return {}
def create_network_dummy(self, context, network_id, dummy):
return {}
def update_network_dummy(self, context, id, network_id, dummy):
return {}
def delete_network_dummy(self, context, id, network_id):
return
class ListArgsTestCase(base.BaseTestCase):
def test_list_args(self):
path = '/?fields=4&foo=3&fields=2&bar=1'
request = webob.Request.blank(path)
expect_val = ['2', '4']
actual_val = api_common.list_args(request, 'fields')
self.assertEqual(sorted(actual_val), expect_val)
def test_list_args_with_empty(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
self.assertEqual([], api_common.list_args(request, 'fields'))
class FiltersTestCase(base.BaseTestCase):
def test_all_skip_args(self):
path = '/?fields=4&fields=3&fields=2&fields=1'
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, None,
["fields"]))
def test_blank_values(self):
path = '/?foo=&bar=&baz=&qux='
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, {}))
def test_no_attr_info(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, {})
self.assertEqual(actual_val, expect_val)
def test_attr_info_without_conversion(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'foo': {'key': 'val'}}
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(actual_val, expect_val)
def test_attr_info_with_convert_list_to(self):
path = '/?foo=key=4&bar=3&foo=key=2&qux=1'
request = webob.Request.blank(path)
attr_info = {
'foo': {
'convert_list_to': attributes.convert_kvp_list_to_dict,
}
}
expect_val = {'foo': {'key': ['2', '4']}, 'bar': ['3'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertOrderedEqual(expect_val, actual_val)
def test_attr_info_with_convert_to(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'foo': {'convert_to': attributes.convert_to_int}}
expect_val = {'foo': [4], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(actual_val, expect_val)
class CreateResourceTestCase(base.BaseTestCase):
def test_resource_creation(self):
resource = v2_base.create_resource('fakes', 'fake', None, {})
self.assertIsInstance(resource, webob.dec.wsgify)
| |
import ntpath
import os
import posixpath
import sys
import pretend
from pip._internal.utils import appdirs
class TestUserCacheDir:
def test_user_cache_dir_win(self, monkeypatch):
@pretend.call_recorder
def _get_win_folder(base):
return "C:\\Users\\test\\AppData\\Local"
monkeypatch.setattr(
appdirs,
"_get_win_folder",
_get_win_folder,
raising=False,
)
monkeypatch.setattr(appdirs, "WINDOWS", True)
monkeypatch.setattr(os, "path", ntpath)
assert (appdirs.user_cache_dir("pip") ==
"C:\\Users\\test\\AppData\\Local\\pip\\Cache")
assert _get_win_folder.calls == [pretend.call("CSIDL_LOCAL_APPDATA")]
def test_user_cache_dir_osx(self, monkeypatch):
monkeypatch.setattr(appdirs, "WINDOWS", False)
monkeypatch.setattr(os, "path", posixpath)
monkeypatch.setenv("HOME", "/home/test")
monkeypatch.setattr(sys, "platform", "darwin")
assert appdirs.user_cache_dir("pip") == "/home/test/Library/Caches/pip"
def test_user_cache_dir_linux(self, monkeypatch):
monkeypatch.setattr(appdirs, "WINDOWS", False)
monkeypatch.setattr(os, "path", posixpath)
monkeypatch.delenv("XDG_CACHE_HOME", raising=False)
monkeypatch.setenv("HOME", "/home/test")
monkeypatch.setattr(sys, "platform", "linux2")
assert appdirs.user_cache_dir("pip") == "/home/test/.cache/pip"
def test_user_cache_dir_linux_override(self, monkeypatch):
monkeypatch.setattr(appdirs, "WINDOWS", False)
monkeypatch.setattr(os, "path", posixpath)
monkeypatch.setenv("XDG_CACHE_HOME", "/home/test/.other-cache")
monkeypatch.setenv("HOME", "/home/test")
monkeypatch.setattr(sys, "platform", "linux2")
assert appdirs.user_cache_dir("pip") == "/home/test/.other-cache/pip"
def test_user_cache_dir_linux_home_slash(self, monkeypatch):
monkeypatch.setattr(appdirs, "WINDOWS", False)
monkeypatch.setattr(os, "path", posixpath)
# Verify that we are not affected by https://bugs.python.org/issue14768
monkeypatch.delenv("XDG_CACHE_HOME", raising=False)
monkeypatch.setenv("HOME", "/")
monkeypatch.setattr(sys, "platform", "linux2")
assert appdirs.user_cache_dir("pip") == "/.cache/pip"
def test_user_cache_dir_unicode(self, monkeypatch):
if sys.platform != 'win32':
return
def my_get_win_folder(csidl_name):
return u"\u00DF\u00E4\u03B1\u20AC"
monkeypatch.setattr(appdirs, "_get_win_folder", my_get_win_folder)
# Do not use the isinstance expression directly in the
# assert statement, as the Unicode characters in the result
# cause pytest to fail with an internal error on Python 2.7
result_is_str = isinstance(appdirs.user_cache_dir('test'), str)
assert result_is_str, "user_cache_dir did not return a str"
# Test against regression #3463
from pip._internal.cli.main_parser import create_main_parser
create_main_parser().print_help() # This should not crash
class TestSiteConfigDirs:
def test_site_config_dirs_win(self, monkeypatch):
@pretend.call_recorder
def _get_win_folder(base):
return "C:\\ProgramData"
monkeypatch.setattr(
appdirs,
"_get_win_folder",
_get_win_folder,
raising=False,
)
monkeypatch.setattr(appdirs, "WINDOWS", True)
monkeypatch.setattr(os, "path", ntpath)
assert appdirs.site_config_dirs("pip") == ["C:\\ProgramData\\pip"]
assert _get_win_folder.calls == [pretend.call("CSIDL_COMMON_APPDATA")]
def test_site_config_dirs_osx(self, monkeypatch):
monkeypatch.setattr(appdirs, "WINDOWS", False)
monkeypatch.setattr(os, "path", posixpath)
monkeypatch.setenv("HOME", "/home/test")
monkeypatch.setattr(sys, "platform", "darwin")
assert appdirs.site_config_dirs("pip") == \
["/Library/Application Support/pip"]
def test_site_config_dirs_linux(self, monkeypatch):
monkeypatch.setattr(appdirs, "WINDOWS", False)
monkeypatch.setattr(os, "path", posixpath)
monkeypatch.delenv("XDG_CONFIG_DIRS", raising=False)
monkeypatch.setattr(sys, "platform", "linux2")
assert appdirs.site_config_dirs("pip") == [
'/etc/xdg/pip',
'/etc'
]
def test_site_config_dirs_linux_override(self, monkeypatch):
monkeypatch.setattr(appdirs, "WINDOWS", False)
monkeypatch.setattr(os, "path", posixpath)
monkeypatch.setattr(os, "pathsep", ':')
monkeypatch.setenv("XDG_CONFIG_DIRS", "/spam:/etc:/etc/xdg")
monkeypatch.setattr(sys, "platform", "linux2")
assert appdirs.site_config_dirs("pip") == [
'/spam/pip',
'/etc/pip',
'/etc/xdg/pip',
'/etc'
]
class TestUserDataDir:
def test_user_data_dir_win_no_roaming(self, monkeypatch):
@pretend.call_recorder
def _get_win_folder(base):
return "C:\\Users\\test\\AppData\\Local"
monkeypatch.setattr(
appdirs,
"_get_win_folder",
_get_win_folder,
raising=False,
)
monkeypatch.setattr(appdirs, "WINDOWS", True)
monkeypatch.setattr(os, "path", ntpath)
assert (appdirs.user_data_dir("pip") ==
"C:\\Users\\test\\AppData\\Local\\pip")
assert _get_win_folder.calls == [pretend.call("CSIDL_LOCAL_APPDATA")]
def test_user_data_dir_win_yes_roaming(self, monkeypatch):
@pretend.call_recorder
def _get_win_folder(base):
return "C:\\Users\\test\\AppData\\Roaming"
monkeypatch.setattr(
appdirs,
"_get_win_folder",
_get_win_folder,
raising=False,
)
monkeypatch.setattr(appdirs, "WINDOWS", True)
monkeypatch.setattr(os, "path", ntpath)
assert (
appdirs.user_data_dir("pip", roaming=True) ==
"C:\\Users\\test\\AppData\\Roaming\\pip"
)
assert _get_win_folder.calls == [pretend.call("CSIDL_APPDATA")]
def test_user_data_dir_osx(self, monkeypatch):
monkeypatch.setattr(appdirs, "WINDOWS", False)
monkeypatch.setattr(os, "path", posixpath)
monkeypatch.setenv("HOME", "/home/test")
monkeypatch.setattr(sys, "platform", "darwin")
if os.path.isdir('/home/test/Library/Application Support/'):
assert (appdirs.user_data_dir("pip") ==
"/home/test/Library/Application Support/pip")
else:
assert (appdirs.user_data_dir("pip") ==
"/home/test/.config/pip")
def test_user_data_dir_linux(self, monkeypatch):
monkeypatch.setattr(appdirs, "WINDOWS", False)
monkeypatch.setattr(os, "path", posixpath)
monkeypatch.delenv("XDG_DATA_HOME", raising=False)
monkeypatch.setenv("HOME", "/home/test")
monkeypatch.setattr(sys, "platform", "linux2")
assert appdirs.user_data_dir("pip") == "/home/test/.local/share/pip"
def test_user_data_dir_linux_override(self, monkeypatch):
monkeypatch.setattr(appdirs, "WINDOWS", False)
monkeypatch.setattr(os, "path", posixpath)
monkeypatch.setenv("XDG_DATA_HOME", "/home/test/.other-share")
monkeypatch.setenv("HOME", "/home/test")
monkeypatch.setattr(sys, "platform", "linux2")
assert appdirs.user_data_dir("pip") == "/home/test/.other-share/pip"
def test_user_data_dir_linux_home_slash(self, monkeypatch):
monkeypatch.setattr(appdirs, "WINDOWS", False)
monkeypatch.setattr(os, "path", posixpath)
# Verify that we are not affected by https://bugs.python.org/issue14768
monkeypatch.delenv("XDG_DATA_HOME", raising=False)
monkeypatch.setenv("HOME", "/")
monkeypatch.setattr(sys, "platform", "linux2")
assert appdirs.user_data_dir("pip") == "/.local/share/pip"
class TestUserConfigDir:
def test_user_config_dir_win_no_roaming(self, monkeypatch):
@pretend.call_recorder
def _get_win_folder(base):
return "C:\\Users\\test\\AppData\\Local"
monkeypatch.setattr(
appdirs,
"_get_win_folder",
_get_win_folder,
raising=False,
)
monkeypatch.setattr(appdirs, "WINDOWS", True)
monkeypatch.setattr(os, "path", ntpath)
assert (
appdirs.user_config_dir("pip", roaming=False) ==
"C:\\Users\\test\\AppData\\Local\\pip"
)
assert _get_win_folder.calls == [pretend.call("CSIDL_LOCAL_APPDATA")]
def test_user_config_dir_win_yes_roaming(self, monkeypatch):
@pretend.call_recorder
def _get_win_folder(base):
return "C:\\Users\\test\\AppData\\Roaming"
monkeypatch.setattr(
appdirs,
"_get_win_folder",
_get_win_folder,
raising=False,
)
monkeypatch.setattr(appdirs, "WINDOWS", True)
monkeypatch.setattr(os, "path", ntpath)
assert (appdirs.user_config_dir("pip") ==
"C:\\Users\\test\\AppData\\Roaming\\pip")
assert _get_win_folder.calls == [pretend.call("CSIDL_APPDATA")]
def test_user_config_dir_osx(self, monkeypatch):
monkeypatch.setattr(appdirs, "WINDOWS", False)
monkeypatch.setattr(os, "path", posixpath)
monkeypatch.setenv("HOME", "/home/test")
monkeypatch.setattr(sys, "platform", "darwin")
if os.path.isdir('/home/test/Library/Application Support/'):
assert (appdirs.user_data_dir("pip") ==
"/home/test/Library/Application Support/pip")
else:
assert (appdirs.user_data_dir("pip") ==
"/home/test/.config/pip")
def test_user_config_dir_linux(self, monkeypatch):
monkeypatch.setattr(appdirs, "WINDOWS", False)
monkeypatch.setattr(os, "path", posixpath)
monkeypatch.delenv("XDG_CONFIG_HOME", raising=False)
monkeypatch.setenv("HOME", "/home/test")
monkeypatch.setattr(sys, "platform", "linux2")
assert appdirs.user_config_dir("pip") == "/home/test/.config/pip"
def test_user_config_dir_linux_override(self, monkeypatch):
monkeypatch.setattr(appdirs, "WINDOWS", False)
monkeypatch.setattr(os, "path", posixpath)
monkeypatch.setenv("XDG_CONFIG_HOME", "/home/test/.other-config")
monkeypatch.setenv("HOME", "/home/test")
monkeypatch.setattr(sys, "platform", "linux2")
assert appdirs.user_config_dir("pip") == "/home/test/.other-config/pip"
def test_user_config_dir_linux_home_slash(self, monkeypatch):
monkeypatch.setattr(appdirs, "WINDOWS", False)
monkeypatch.setattr(os, "path", posixpath)
# Verify that we are not affected by https://bugs.python.org/issue14768
monkeypatch.delenv("XDG_CONFIG_HOME", raising=False)
monkeypatch.setenv("HOME", "/")
monkeypatch.setattr(sys, "platform", "linux2")
assert appdirs.user_config_dir("pip") == "/.config/pip"
| |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
import re
import runpy
import sys
# debugpy.__main__ should have preloaded pydevd properly before importing this module.
# Otherwise, some stdlib modules above might have had imported threading before pydevd
# could perform the necessary detours in it.
assert "pydevd" in sys.modules
import pydevd
import debugpy
from debugpy.common import compat, fmt, log
from debugpy.server import api
TARGET = "<filename> | -m <module> | -c <code> | --pid <pid>"
HELP = """debugpy {0}
See https://aka.ms/debugpy for documentation.
Usage: debugpy --listen | --connect
[<host>:]<port>
[--wait-for-client]
[--configure-<name> <value>]...
[--log-to <path>] [--log-to-stderr]
{1}
[<arg>]...
""".format(
debugpy.__version__, TARGET
)
class Options(object):
mode = None
address = None
log_to = None
log_to_stderr = False
target = None # unicode
target_kind = None
wait_for_client = False
adapter_access_token = None
options = Options()
options.config = {"qt": "none", "subProcess": True}
def in_range(parser, start, stop):
def parse(s):
n = parser(s)
if start is not None and n < start:
raise ValueError(fmt("must be >= {0}", start))
if stop is not None and n >= stop:
raise ValueError(fmt("must be < {0}", stop))
return n
return parse
pid = in_range(int, 0, None)
def print_help_and_exit(switch, it):
print(HELP, file=sys.stderr)
sys.exit(0)
def print_version_and_exit(switch, it):
print(debugpy.__version__)
sys.exit(0)
def set_arg(varname, parser=(lambda x: x)):
def do(arg, it):
value = parser(next(it))
setattr(options, varname, value)
return do
def set_const(varname, value):
def do(arg, it):
setattr(options, varname, value)
return do
def set_address(mode):
def do(arg, it):
if options.address is not None:
raise ValueError("--listen and --connect are mutually exclusive")
# It's either host:port, or just port.
value = next(it)
host, sep, port = value.partition(":")
if not sep:
host = "127.0.0.1"
port = value
try:
port = int(port)
except Exception:
port = -1
if not (0 <= port < 2 ** 16):
raise ValueError("invalid port number")
options.mode = mode
options.address = (host, port)
return do
def set_config(arg, it):
prefix = "--configure-"
assert arg.startswith(prefix)
name = arg[len(prefix) :]
value = next(it)
if name not in options.config:
raise ValueError(fmt("unknown property {0!r}", name))
expected_type = type(options.config[name])
try:
if expected_type is bool:
value = {"true": True, "false": False}[value.lower()]
else:
value = expected_type(value)
except Exception:
raise ValueError(fmt("{0!r} must be a {1}", name, expected_type.__name__))
options.config[name] = value
def set_target(kind, parser=(lambda x: x), positional=False):
def do(arg, it):
options.target_kind = kind
target = parser(arg if positional else next(it))
if isinstance(target, bytes):
# target may be the code, so, try some additional encodings...
try:
target = target.decode(sys.getfilesystemencoding())
except UnicodeDecodeError:
try:
target = target.decode("utf-8")
except UnicodeDecodeError:
import locale
target = target.decode(locale.getpreferredencoding(False))
options.target = target
return do
# fmt: off
switches = [
# Switch Placeholder Action
# ====== =========== ======
# Switches that are documented for use by end users.
("-(\\?|h|-help)", None, print_help_and_exit),
("-(V|-version)", None, print_version_and_exit),
("--log-to" , "<path>", set_arg("log_to")),
("--log-to-stderr", None, set_const("log_to_stderr", True)),
("--listen", "<address>", set_address("listen")),
("--connect", "<address>", set_address("connect")),
("--wait-for-client", None, set_const("wait_for_client", True)),
("--configure-.+", "<value>", set_config),
# Switches that are used internally by the client or debugpy itself.
("--adapter-access-token", "<token>", set_arg("adapter_access_token")),
# Targets. The "" entry corresponds to positional command line arguments,
# i.e. the ones not preceded by any switch name.
("", "<filename>", set_target("file", positional=True)),
("-m", "<module>", set_target("module")),
("-c", "<code>", set_target("code")),
("--pid", "<pid>", set_target("pid", pid)),
]
# fmt: on
def consume_argv():
while len(sys.argv) >= 2:
value = sys.argv[1]
del sys.argv[1]
yield value
def parse_argv():
seen = set()
it = consume_argv()
while True:
try:
arg = next(it)
except StopIteration:
raise ValueError("missing target: " + TARGET)
switch = compat.filename(arg)
if not switch.startswith("-"):
switch = ""
for pattern, placeholder, action in switches:
if re.match("^(" + pattern + ")$", switch):
break
else:
raise ValueError("unrecognized switch " + switch)
if switch in seen:
raise ValueError("duplicate switch " + switch)
else:
seen.add(switch)
try:
action(arg, it)
except StopIteration:
assert placeholder is not None
raise ValueError(fmt("{0}: missing {1}", switch, placeholder))
except Exception as exc:
raise ValueError(fmt("invalid {0} {1}: {2}", switch, placeholder, exc))
if options.target is not None:
break
if options.mode is None:
raise ValueError("either --listen or --connect is required")
if options.adapter_access_token is not None and options.mode != "connect":
raise ValueError("--adapter-access-token requires --connect")
if options.target_kind == "pid" and options.wait_for_client:
raise ValueError("--pid does not support --wait-for-client")
assert options.target is not None
assert options.target_kind is not None
assert options.address is not None
def start_debugging(argv_0):
# We need to set up sys.argv[0] before invoking either listen() or connect(),
# because they use it to report the "process" event. Thus, we can't rely on
# run_path() and run_module() doing that, even though they will eventually.
sys.argv[0] = compat.filename_str(argv_0)
log.debug("sys.argv after patching: {0!r}", sys.argv)
debugpy.configure(options.config)
if options.mode == "listen":
debugpy.listen(options.address)
elif options.mode == "connect":
debugpy.connect(options.address, access_token=options.adapter_access_token)
else:
raise AssertionError(repr(options.mode))
if options.wait_for_client:
debugpy.wait_for_client()
def run_file():
target = options.target
start_debugging(target)
target_as_str = compat.filename_str(target)
# run_path has one difference with invoking Python from command-line:
# if the target is a file (rather than a directory), it does not add its
# parent directory to sys.path. Thus, importing other modules from the
# same directory is broken unless sys.path is patched here.
if os.path.isfile(target_as_str):
dir = os.path.dirname(target_as_str)
sys.path.insert(0, dir)
else:
log.debug("Not a file: {0!r}", target)
log.describe_environment("Pre-launch environment:")
log.info("Running file {0!r}", target)
runpy.run_path(target_as_str, run_name=compat.force_str("__main__"))
def run_module():
# Add current directory to path, like Python itself does for -m. This must
# be in place before trying to use find_spec below to resolve submodules.
sys.path.insert(0, str(""))
# We want to do the same thing that run_module() would do here, without
# actually invoking it. On Python 3, it's exposed as a public API, but
# on Python 2, we have to invoke a private function in runpy for this.
# Either way, if it fails to resolve for any reason, just leave argv as is.
argv_0 = sys.argv[0]
target_as_str = compat.filename_str(options.target)
try:
if sys.version_info >= (3,):
from importlib.util import find_spec
spec = find_spec(target_as_str)
if spec is not None:
argv_0 = spec.origin
else:
_, _, _, argv_0 = runpy._get_module_details(target_as_str)
except Exception:
log.swallow_exception("Error determining module path for sys.argv")
start_debugging(argv_0)
# On Python 2, module name must be a non-Unicode string, because it ends up
# a part of module's __package__, and Python will refuse to run the module
# if __package__ is Unicode.
log.describe_environment("Pre-launch environment:")
log.info("Running module {0!r}", options.target)
# Docs say that runpy.run_module is equivalent to -m, but it's not actually
# the case for packages - -m sets __name__ to "__main__", but run_module sets
# it to "pkg.__main__". This breaks everything that uses the standard pattern
# __name__ == "__main__" to detect being run as a CLI app. On the other hand,
# runpy._run_module_as_main is a private function that actually implements -m.
try:
run_module_as_main = runpy._run_module_as_main
except AttributeError:
log.warning("runpy._run_module_as_main is missing, falling back to run_module.")
runpy.run_module(target_as_str, alter_sys=True)
else:
run_module_as_main(target_as_str, alter_argv=True)
def run_code():
# Add current directory to path, like Python itself does for -c.
sys.path.insert(0, str(""))
code = compile(options.target, str("<string>"), str("exec"))
start_debugging(str("-c"))
log.describe_environment("Pre-launch environment:")
log.info("Running code:\n\n{0}", options.target)
eval(code, {})
def attach_to_pid():
pid = options.target
log.info("Attaching to process with PID={0}", pid)
encode = lambda s: list(bytearray(s.encode("utf-8"))) if s is not None else None
script_dir = os.path.dirname(debugpy.server.__file__)
assert os.path.exists(script_dir)
script_dir = encode(script_dir)
setup = {
"mode": options.mode,
"address": options.address,
"wait_for_client": options.wait_for_client,
"log_to": options.log_to,
"adapter_access_token": options.adapter_access_token,
}
setup = encode(json.dumps(setup))
python_code = """
import codecs;
import json;
import sys;
decode = lambda s: codecs.utf_8_decode(bytearray(s))[0] if s is not None else None;
script_dir = decode({script_dir});
setup = json.loads(decode({setup}));
sys.path.insert(0, script_dir);
import attach_pid_injected;
del sys.path[0];
attach_pid_injected.attach(setup);
"""
python_code = (
python_code.replace("\r", "")
.replace("\n", "")
.format(script_dir=script_dir, setup=setup)
)
log.info("Code to be injected: \n{0}", python_code.replace(";", ";\n"))
# pydevd restriction on characters in injected code.
assert not (
{'"', "'", "\r", "\n"} & set(python_code)
), "Injected code should not contain any single quotes, double quotes, or newlines."
pydevd_attach_to_process_path = os.path.join(
os.path.dirname(pydevd.__file__), "pydevd_attach_to_process"
)
assert os.path.exists(pydevd_attach_to_process_path)
sys.path.append(pydevd_attach_to_process_path)
try:
import add_code_to_python_process # noqa
log.info("Injecting code into process with PID={0} ...", pid)
add_code_to_python_process.run_python_code(
pid,
python_code,
connect_debugger_tracing=True,
show_debug_info=int(os.getenv("DEBUGPY_ATTACH_BY_PID_DEBUG_INFO", "0")),
)
except Exception:
log.reraise_exception("Code injection into PID={0} failed:", pid)
log.info("Code injection into PID={0} completed.", pid)
def main():
original_argv = list(sys.argv)
try:
parse_argv()
except Exception as exc:
print(str(HELP) + str("\nError: ") + str(exc), file=sys.stderr)
sys.exit(2)
if options.log_to is not None:
debugpy.log_to(options.log_to)
if options.log_to_stderr:
debugpy.log_to(sys.stderr)
api.ensure_logging()
log.info(
str("sys.argv before parsing: {0!r}\n" " after parsing: {1!r}"),
original_argv,
sys.argv,
)
try:
run = {
"file": run_file,
"module": run_module,
"code": run_code,
"pid": attach_to_pid,
}[options.target_kind]
run()
except SystemExit as exc:
log.reraise_exception(
"Debuggee exited via SystemExit: {0!r}", exc.code, level="debug"
)
| |
import pdb
import time
import os
import sys
import pygame
import numpy as np
from pygame.constants import K_w, K_a, K_d, K_s
import copy
class RayCastPlayer():
"""
Loosely based on code from Lode's `Computer Graphics Tutorial`_.
.. _Computer Graphics Tutorial: http://lodev.org/cgtutor/raycasting.html
Takes input from key presses and traverses a map
"""
def __init__(self, map_, init_pos, init_dir,
width, height, resolution, move_speed,
turn_speed, plane, actions, block_types):
self.actions = actions
self.map_ = map_
self.width = width
self.height = height
self.pos = np.array([init_pos], dtype=np.float32)
self.dir = np.array([init_dir], dtype=np.float32)
self.plane = np.array([plane], dtype=np.float32)
self.resolution = resolution
self.move_speed = move_speed
self.turn_speed = turn_speed
self.eps = 1e-7
self.block_types = block_types
def _handle_player_events(self, dt):
dt = dt / 1000.0
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
key = event.key
new_location = self.pos
if key == self.actions["forward"]:
new_location = self.pos + self.dir * self.move_speed * dt
if key == self.actions["backward"]:
new_location = self.pos - self.dir * self.move_speed * dt
new_location = new_location.astype(int)
newX, newY = new_location[0, :]
if newX < self.map_.shape[0] and newY < self.map_.shape[1]:
new_map = self.map_[newX, newY]
if self.block_types[new_map]["pass_through"]:
if key == self.actions["forward"]:
self.pos[0, 0] += self.dir[0, 0] * \
self.move_speed * dt
self.pos[0, 1] += self.dir[0, 1] * \
self.move_speed * dt
if key == self.actions["backward"]:
self.pos[0, 0] -= self.dir[0, 0] * \
self.move_speed * dt
self.pos[0, 1] -= self.dir[0, 1] * \
self.move_speed * dt
if key == self.actions["right"]:
X_TURN = np.cos(self.turn_speed * dt)
Y_TURN = np.sin(self.turn_speed * dt)
_dirX = self.dir[0, 0] * X_TURN - self.dir[0, 1] * Y_TURN
_dirY = self.dir[0, 0] * Y_TURN + self.dir[0, 1] * X_TURN
_planeX = self.plane[0, 0] * \
X_TURN - self.plane[0, 1] * Y_TURN
_planeY = self.plane[0, 0] * \
Y_TURN + self.plane[0, 1] * X_TURN
self.dir[0, 0] = _dirX
self.dir[0, 1] = _dirY
self.plane[0, 0] = _planeX
self.plane[0, 1] = _planeY
if key == self.actions["left"]:
X_INV_TURN = np.cos(-self.turn_speed * dt)
Y_INV_TURN = np.sin(-self.turn_speed * dt)
_dirX = self.dir[0, 0] * X_INV_TURN - \
self.dir[0, 1] * Y_INV_TURN
_dirY = self.dir[0, 0] * Y_INV_TURN + \
self.dir[0, 1] * X_INV_TURN
_planeX = self.plane[0, 0] * X_INV_TURN - \
self.plane[0, 1] * Y_INV_TURN
_planeY = self.plane[0, 0] * Y_INV_TURN + \
self.plane[0, 1] * X_INV_TURN
self.dir[0, 0] = _dirX
self.dir[0, 1] = _dirY
self.plane[0, 0] = _planeX
self.plane[0, 1] = _planeY
def draw(self):
#N = width/resolution
# N,2
cameraX = np.arange(
0.0,
self.width,
self.resolution).astype(
np.float32)[
:,
np.newaxis]
cameraX = 2.0 * cameraX / float(self.width) - 1.0
# set the rayPos to the players current position
ray_pos = np.tile(self.pos, [cameraX.shape[0], 1]) # N,2
# ray direction
ray_dir = self.dir + self.plane * cameraX # N,2
# which box of the map we're in
map_ = ray_pos.astype(int)
ray_pow = np.power(ray_dir, 2.0) + self.eps
ray_div = ray_pow[:, 0] / (ray_pow[:, 1])
delta_dist = np.sqrt(
1.0 + np.array([1.0 / (ray_div), ray_div])).T # N,2
# N,2
step = np.ones(ray_dir.shape).astype(int)
step[ray_dir[:, 0] < 0, 0] = -1
step[ray_dir[:, 1] < 0, 1] = -1
# N,2
side_dist = (map_ + 1.0 - ray_pos) * delta_dist
_value = (ray_pos - map_) * delta_dist
side_dist[ray_dir[:, 0] < 0, 0] = _value[ray_dir[:, 0] < 0, 0]
side_dist[ray_dir[:, 1] < 0, 1] = _value[ray_dir[:, 1] < 0, 1]
side_dist, delta_dist, map_, side = self._DDA(
side_dist, delta_dist, map_, step)
perpWallDistX = (map_[:, 0] - ray_pos[:, 0] + (1.0 - step[:, 0]) / 2.0)
perpWallDistX = perpWallDistX / (ray_dir[:, 0] + self.eps)
perpWallDistX = perpWallDistX[:, np.newaxis]
perpWallDistY = (map_[:, 1] - ray_pos[:, 1] + (1.0 - step[:, 1]) / 2.0)
perpWallDistY = perpWallDistY / (ray_dir[:, 1] + self.eps)
perpWallDistY = perpWallDistY[:, np.newaxis]
perpWallDist = perpWallDistY
perpWallDist[side == 0] = perpWallDistX[side == 0]
lineHeights = (self.height / (perpWallDist + self.eps)).astype(int)
tops = -(lineHeights) / 2.0 + self.height / 2.0
tops[tops < 0] = 0.0
tops = tops.astype(int)
bottoms = lineHeights / 2.0 + self.height / 2.0
bottoms[bottoms >= self.height] = self.height - 1
bottoms = bottoms.astype(int)
visible_blocks = self.map_[map_[:, 0], map_[:, 1]]
coloring = np.ones((bottoms.shape[0], 3)) * 255.0
for k in self.block_types.keys():
if self.block_types[k] is not None:
c = self.block_types[k]["color"]
sel = visible_blocks == k
coloring[sel] = np.tile(c, [bottoms.shape[0], 1])[sel]
shading = np.abs(perpWallDist * 15) * 1.5
coloring = coloring - shading
coloring = np.clip(coloring, 0, 255)
coloring[(side == 1.0).flatten(), :] *= 0.65 # lighting apparently
cameraX = np.arange(0, self.width, self.resolution)
returns = [cameraX, tops, bottoms, coloring]
return [r.astype(int) for r in returns]
def _DDA(self, side_dist, delta_dist, map_, step):
# tested against for-loop version using line_profiler
# for-loop take about 0.005968s per call
# this version takes 0.000416s per call
hits = np.zeros((map_.shape[0], 1))
side = np.zeros((map_.shape[0], 1))
while np.sum(hits) < side_dist.shape[0]:
# only update values that havent hit a wall. So are 0 still.
update_mask = np.logical_not(hits).astype(np.bool)
# 1 => 1, 0
# 0 => 0, 1
mask = (side_dist[:, 0] < side_dist[:, 1])[:, np.newaxis]
sel = (update_mask & (mask == True)).flatten()
side_dist[sel, 0] += delta_dist[sel, 0]
map_[sel, 0] += step[sel, 0]
side[sel] = np.zeros(side.shape)[sel]
sel = (update_mask & (mask == False)).flatten()
side_dist[sel, 1] += delta_dist[sel, 1]
map_[sel, 1] += step[sel, 1]
side[sel] = np.ones(side.shape)[sel]
# once it becomes 1 it never goes back to 0.
hits = np.logical_or(
hits, (self.map_[
map_[
:, 0], map_[
:, 1]] > 0)[
:, np.newaxis])
return side_dist, delta_dist, map_, side
def make_map(dim):
map_grid = np.zeros((dim, dim))
map_grid[0, :] = 1.0
map_grid[:, 0] = 1.0
map_grid[:, -1] = 1.0
map_grid[-1, :] = 1.0
return map_grid
def make_box(grid, p0, p1, fill=0, isFilled=True):
x0, y0 = p0
x1, y1 = p1
if isFilled:
grid[x0:x1, y0:y1] = fill
else:
grid[x0, y0:y1 + 1] = fill
grid[x1, y0:y1 + 1] = fill
grid[x0:x1, y0] = fill
grid[x0:x1, y1] = fill
return grid
if __name__ == "__main__":
map_grid = make_map(15)
block_types = {
0: {
"pass_through": True,
"color": None
},
1: {
"pass_through": False,
"color": (255, 255, 255)
},
2: {
"pass_through": False,
"color": (220, 100, 100)
},
3: {
"pass_through": False,
"color": (100, 220, 100)
},
4: {
"pass_through": False,
"color": (100, 100, 220)
}
}
map_grid = make_box(map_grid, (5, 5), (9, 9), fill=2, isFilled=False)
map_grid = make_box(map_grid, (8, 8), (14, 14), fill=3, isFilled=True)
map_grid = make_box(map_grid, (1, 2), (3, 9), fill=4, isFilled=False)
map_grid = make_box(map_grid, (11, 6), (12, 11), fill=0, isFilled=True)
map_grid = make_box(map_grid, (6, 11), (12, 12), fill=0, isFilled=True)
map_grid = make_box(map_grid, (2, 6), (7, 7), fill=0, isFilled=True)
map_grid[map_grid > 0] = np.random.randint(
2, high=5, size=map_grid[map_grid > 0].shape)
init_dir = (1.0, 0.0)
init_pos = (1, 1)
width = 128
height = 128
resolution = 1
move_speed = 15
turn_speed = 10.5
plane = (0.0, 0.66)
actions = {
"forward": K_w,
"left": K_a,
"right": K_d,
"backward": K_s
}
rc = RayCastPlayer(
map_grid,
init_pos,
init_dir,
width,
height,
resolution,
move_speed,
turn_speed,
plane,
actions,
block_types
)
pygame.init()
screen = pygame.display.set_mode((width, height), 0, 24)
clock = pygame.time.Clock()
while True:
dt = clock.tick(60)
screen.fill((0, 0, 0))
pygame.draw.rect(screen, (92, 92, 92), (0, height / 2, width, height))
rc._handle_player_events(dt)
c, t, b, col = rc.draw()
for i in range(len(c)):
pygame.draw.line(screen, (col[i][0], col[i][1], col[i][2]), (c[
i], t[i]), (c[i], b[i]), rc.resolution)
pygame.display.update()
| |
import json
import logging
import os
import sys
import traceback
from .Plugin import JigsawPlugin
if sys.version_info[0] > 3 or (sys.version[0] == 3 and sys.version[1] >= 5):
PY3 = True
import importlib.util
else:
PY3 = False
import imp
class PluginLoader(object):
"""
The main plugin loader class
"""
def __init__(self, plugin_paths=(), log_level=logging.INFO, plugin_class=JigsawPlugin):
"""
Initializes the plugin loader
:param plugin_paths: Paths to load plugins from
:param log_level: Log level
:param plugin_class: Parent class of all plugins
"""
logging.basicConfig(format="{%(asctime)s} (%(name)s) [%(levelname)s]: %(message)s",
datefmt="%x, %X",
level=log_level)
self._logger = logging.getLogger("Jigsaw")
if len(plugin_paths) == 0:
self.plugin_paths = (os.path.join(os.getcwd(), "plugins"),)
self._logger.debug("No plugin path specified, using {}.".format(self.plugin_paths))
else:
self.plugin_paths = plugin_paths
self._logger.debug("Using specified plugin paths of {}.".format(", ".join(self.plugin_paths)))
self._plugin_class = plugin_class
self._manifests = []
self._plugins = {}
self._modules = {}
def load_manifests(self):
"""
Loads all plugin manifests on the plugin path
"""
for path in self.plugin_paths:
for item in os.listdir(path):
item_path = os.path.join(path, item)
if os.path.isdir(item_path):
self.load_manifest(item_path)
def load_manifest(self, path):
"""
Loads a plugin manifest from a given path
:param path: The folder to load the plugin manifest from
"""
manifest_path = os.path.join(path, "plugin.json")
self._logger.debug("Attempting to load plugin manifest from {}.".format(manifest_path))
try:
with open(manifest_path) as f:
manifest = json.load(f)
manifest["path"] = path
self._manifests.append(manifest)
self._logger.debug("Loaded plugin manifest from {}.".format(manifest_path))
except ValueError:
self._logger.error("Failed to decode plugin manifest at {}.".format(manifest_path))
except (OSError, IOError) as e:
self._logger.error("Failed to load plugin manifest at {}.".format(manifest_path))
def get_manifest(self, plugin_name):
"""
Gets the manifest for a specified plugin
:param plugin_name: The name of the plugin
:return: The manifest for the specified plugin
"""
for manifest in self._manifests:
if manifest["name"] == plugin_name:
return manifest
def get_plugin_loaded(self, plugin_name):
"""
Returns if a given plugin is loaded
:param plugin_name: The plugin to check to loaded status for
:return: Whether the specified plugin is loaded
"""
return plugin_name in self._plugins
def load_plugin(self, manifest, *args):
"""
Loads a plugin from the given manifest
:param manifest: The manifest to use to load the plugin
:param args: Arguments to pass to the plugin
"""
if self.get_plugin_loaded(manifest["name"]):
self._logger.debug("Plugin {} is already loaded.".format(manifest["name"]))
return
try:
self._logger.debug("Attempting to load plugin {}.".format(manifest["name"]))
for dependency in manifest.get("dependencies", []):
if not self.get_plugin_loaded(dependency):
self._logger.debug("Must load dependency {} first.".format(dependency))
if self.get_manifest(dependency) is None:
self._logger.error("Dependency {} could not be found.".format(dependency))
else:
self.load_plugin(self.get_manifest(dependency), *args)
not_loaded = [i for i in manifest.get("dependencies", []) if not self.get_plugin_loaded(i)]
if len(not_loaded) != 0:
self._logger.error("Plugin {} failed to load due to missing dependencies. Dependencies: {}".format(
manifest["name"], ", ".join(not_loaded)
))
return
if PY3:
spec = importlib.util.spec_from_file_location(
manifest.get("module_name", manifest["name"].replace(" ", "_")),
os.path.join(manifest["path"], manifest.get("main_path", "__init__.py"))
)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
else:
module = imp.load_source(
manifest.get("module_name", manifest["name"].replace(" ", "_")),
os.path.join(manifest["path"], manifest.get("main_path", "__init__.py"))
)
module_class = manifest.get("main_class", "Plugin")
plugin_class = getattr(module, module_class)
if issubclass(plugin_class, self._plugin_class):
plugin = plugin_class(manifest, *args)
else:
self._logger.error("Failed to load {} due to invalid baseclass.".format(manifest["name"]))
return
self._plugins[manifest["name"]] = plugin
self._modules[manifest["name"]] = module
self._logger.debug("Plugin {} loaded.".format(manifest["name"]))
except:
exc_path = os.path.join(manifest["path"], "error.log")
with open(exc_path, "w") as f:
f.write(traceback.format_exc(5))
self._logger.error("Failed to load plugin {}. Error log written to {}.".format(manifest["name"], exc_path))
def load_plugins(self, *args):
"""
Loads all plugins
:param args: Arguments to pass to the plugins
"""
for manifest in self._manifests:
self.load_plugin(manifest, *args)
def get_plugin(self, name):
"""
Gets a loaded plugin
:param name: Name of the plugin
:return: The plugin
"""
try:
return self._plugins[name]
except KeyError:
return None
def get_module(self, name):
"""
Gets the module for a plugin
:param name: Name of the plugin
:return: The module
"""
try:
return self._modules[name]
except KeyError:
return None
def get_all_plugins(self):
"""
Gets all loaded plugins
:return: List of all plugins
"""
return [{
"manifest": i,
"plugin": self.get_plugin(i["name"]),
"module": self.get_module(i["name"])
} for i in self._manifests]
def disable_all_plugins(self):
"""
Calls the disable method on all initialized plugins
"""
for plugin in self._plugins:
self._plugins[plugin].disable()
def enable_all_plugins(self):
"""
Calls the enable method on all initialized plugins
"""
for plugin in self._plugins:
self._plugins[plugin].enable()
def reload_manifest(self, manifest):
"""
Reloads a manifest from the disk
:param manifest: The manifest to reload
"""
self._logger.debug("Reloading manifest for {}.".format(manifest.get("name", "Unnamed Plugin")))
self._manifests.remove(manifest)
self.load_manifest(manifest["path"])
self._logger.debug("Manifest reloaded.")
def reload_all_manifests(self):
"""
Reloads all loaded manifests, and loads any new manifests
"""
self._logger.debug("Reloading all manifests.")
self._manifests = []
self.load_manifests()
self._logger.debug("All manifests reloaded.")
def reload_plugin(self, name, *args):
"""
Reloads a given plugin
:param name: The name of the plugin
:param args: The args to pass to the plugin
"""
self._logger.debug("Reloading {}.".format(name))
self._logger.debug("Disabling {}.".format(name))
self.get_plugin(name).disable()
self._logger.debug("Removing plugin instance.")
del self._plugins[name]
self._logger.debug("Unloading module.")
del self._modules[name]
self._logger.debug("Reloading manifest.")
old_manifest = self.get_manifest(name)
self._manifests.remove(old_manifest)
self.load_manifest(old_manifest["path"])
self._logger.debug("Loading {}.".format(name))
self.load_plugin(self.get_manifest(name), *args)
self._logger.debug("Enabling {}.".format(name))
self.get_plugin(name).enable()
self._logger.debug("Plugin {} reloaded.".format(name))
def reload_all_plugins(self, *args):
"""
Reloads all initialized plugins
"""
for manifest in self._manifests[:]:
if self.get_plugin(manifest["name"]) is not None:
self.reload_plugin(manifest["name"], *args)
def unload_plugin(self, name):
"""
Unloads a specified plugin
:param name: The name of the plugin
"""
self._logger.debug("Unloading {}.".format(name))
self._logger.debug("Removing plugin instance.")
del self._plugins[name]
self._logger.debug("Unloading module.")
del self._modules[name]
self._logger.debug("Unloading manifest...")
manifest = self.get_manifest(name)
self._manifests.remove(manifest)
self._logger.debug("{} unloaded.".format(name))
def quickload(self, *args):
"""
Loads all manifests, loads all plugins, and then enables all plugins
:param args: The args to pass to the plugin
"""
self.load_manifests()
self.load_plugins(args)
self.enable_all_plugins()
| |
# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Key manager implementation for Barbican
"""
import array
import base64
import binascii
import re
from barbicanclient import client as barbican_client
from keystoneclient.auth import identity
from keystoneclient import session
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from jacket.storage import exception
from jacket.storage.i18n import _, _LE
from jacket.storage.keymgr import key as keymgr_key
from jacket.storage.keymgr import key_mgr
CONF = cfg.CONF
CONF.import_opt('encryption_auth_url', 'jacket.storage.keymgr.key_mgr', group='storage_keymgr')
CONF.import_opt('encryption_api_url', 'jacket.storage.keymgr.key_mgr', group='storage_keymgr')
LOG = logging.getLogger(__name__)
URL_PATTERN = re.compile(
"(?P<url_base>http[s]?://[^/]*)[/]?(?P<url_version>(v[0-9.]+)?).*")
class BarbicanKeyManager(key_mgr.KeyManager):
"""Key Manager Interface that wraps the Barbican client API."""
def __init__(self):
self._base_url = CONF.storage_keymgr.encryption_api_url
self._parse_barbican_api_url()
self._barbican_client = None
self._current_context = None
def _parse_barbican_api_url(self):
"""Setup member variables to reference the Barbican URL.
The key manipulation functions in this module need to use the
barbican URL with the version appended. But the barbicanclient
Client() class needs the URL without the version appended.
So set up a member variables here for each case.
"""
m = URL_PATTERN.search(self._base_url)
if m is None:
raise exception.KeyManagerError(_(
"Invalid url: must be in the form "
"'http[s]://<ipaddr>|<fqdn>[:port]/<version>', "
"url specified is: %s"), self._base_url)
url_info = dict(m.groupdict())
if 'url_version' not in url_info or url_info['url_version'] == "":
raise exception.KeyManagerError(_(
"Invalid barbican api url: version is required, "
"e.g. 'http[s]://<ipaddr>|<fqdn>[:port]/<version>' "
"url specified is: %s") % self._base_url)
# We will also need the barbican API URL without the '/v1'.
# So save that now.
self._barbican_endpoint = url_info['url_base']
def _get_barbican_client(self, ctxt):
"""Creates a client to connect to the Barbican service.
:param ctxt: the user context for authentication
:return: a Barbican Client object
:throws NotAuthorized: if the ctxt is None
:throws KeyManagerError: if ctxt is missing project_id
or project_id is None
"""
# Confirm context is provided, if not raise not authorized
if not ctxt:
msg = _("User is not authorized to use key manager.")
LOG.error(msg)
raise exception.NotAuthorized(msg)
if not hasattr(ctxt, 'project_id') or ctxt.project_id is None:
msg = _("Unable to create Barbican Client without project_id.")
LOG.error(msg)
raise exception.KeyManagerError(msg)
# If same context, return cached barbican client
if self._barbican_client and self._current_context == ctxt:
return self._barbican_client
try:
auth = identity.v3.Token(
auth_url=CONF.storage_keymgr.encryption_auth_url,
token=ctxt.auth_token,
project_id=ctxt.project_id)
sess = session.Session(auth=auth)
self._barbican_client = barbican_client.Client(
session=sess,
endpoint=self._barbican_endpoint)
self._current_context = ctxt
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Error creating Barbican client."))
return self._barbican_client
def create_key(self, ctxt, expiration=None, name='Cinder Volume Key',
payload_content_type='application/octet-stream', mode='CBC',
algorithm='AES', length=256):
"""Creates a key.
:param ctxt: contains information of the user and the environment
for the request (storage/context.py)
:param expiration: the date the key will expire
:param name: a friendly name for the secret
:param payload_content_type: the format/type of the secret data
:param mode: the algorithm mode (e.g. CBC or CTR mode)
:param algorithm: the algorithm associated with the secret
:param length: the bit length of the secret
:return: the UUID of the new key
:throws Exception: if key creation fails
"""
barbican_client = self._get_barbican_client(ctxt)
try:
key_order = barbican_client.orders.create_key(
name,
algorithm,
length,
mode,
payload_content_type,
expiration)
order_ref = key_order.submit()
order = barbican_client.orders.get(order_ref)
secret_uuid = order.secret_ref.rpartition('/')[2]
return secret_uuid
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Error creating key."))
def store_key(self, ctxt, key, expiration=None, name='Cinder Volume Key',
payload_content_type='application/octet-stream',
payload_content_encoding='base64', algorithm='AES',
bit_length=256, mode='CBC', from_copy=False):
"""Stores (i.e., registers) a key with the key manager.
:param ctxt: contains information of the user and the environment for
the request (storage/context.py)
:param key: the unencrypted secret data. Known as "payload" to the
barbicanclient api
:param expiration: the expiration time of the secret in ISO 8601
format
:param name: a friendly name for the key
:param payload_content_type: the format/type of the secret data
:param payload_content_encoding: the encoding of the secret data
:param algorithm: the algorithm associated with this secret key
:param bit_length: the bit length of this secret key
:param mode: the algorithm mode used with this secret key
:param from_copy: establishes whether the function is being used
to copy a key. In case of the latter, it does not
try to decode the key
:returns: the UUID of the stored key
:throws Exception: if key storage fails
"""
barbican_client = self._get_barbican_client(ctxt)
try:
if key.get_algorithm():
algorithm = key.get_algorithm()
if payload_content_type == 'text/plain':
payload_content_encoding = None
encoded_key = key.get_encoded()
elif (payload_content_type == 'application/octet-stream' and
not from_copy):
key_list = key.get_encoded()
string_key = ''.join(map(lambda byte: "%02x" % byte, key_list))
encoded_key = base64.b64encode(binascii.unhexlify(string_key))
else:
encoded_key = key.get_encoded()
secret = barbican_client.secrets.create(name,
encoded_key,
payload_content_type,
payload_content_encoding,
algorithm,
bit_length,
None,
mode,
expiration)
secret_ref = secret.store()
secret_uuid = secret_ref.rpartition('/')[2]
return secret_uuid
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Error storing key."))
def copy_key(self, ctxt, key_id):
"""Copies (i.e., clones) a key stored by barbican.
:param ctxt: contains information of the user and the environment for
the request (storage/context.py)
:param key_id: the UUID of the key to copy
:return: the UUID of the key copy
:throws Exception: if key copying fails
"""
barbican_client = self._get_barbican_client(ctxt)
try:
secret_ref = self._create_secret_ref(key_id, barbican_client)
secret = self._get_secret(ctxt, secret_ref)
con_type = secret.content_types['default']
secret_data = self._get_secret_data(secret,
payload_content_type=con_type)
key = keymgr_key.SymmetricKey(secret.algorithm, secret_data)
copy_uuid = self.store_key(ctxt, key, secret.expiration,
secret.name, con_type,
'base64',
secret.algorithm, secret.bit_length,
secret.mode, True)
return copy_uuid
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Error copying key."))
def _create_secret_ref(self, key_id, barbican_client):
"""Creates the URL required for accessing a secret.
:param key_id: the UUID of the key to copy
:param barbican_client: barbican key manager object
:return: the URL of the requested secret
"""
if not key_id:
msg = "Key ID is None"
raise exception.KeyManagerError(msg)
return self._base_url + "/secrets/" + key_id
def _get_secret_data(self,
secret,
payload_content_type='application/octet-stream'):
"""Retrieves the secret data given a secret_ref and content_type.
:param ctxt: contains information of the user and the environment for
the request (storage/context.py)
:param secret_ref: URL to access the secret
:param payload_content_type: the format/type of the secret data
:returns: the secret data
:throws Exception: if data cannot be retrieved
"""
try:
generated_data = secret.payload
if payload_content_type == 'application/octet-stream':
secret_data = base64.b64encode(generated_data)
else:
secret_data = generated_data
return secret_data
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Error getting secret data."))
def _get_secret(self, ctxt, secret_ref):
"""Creates the URL required for accessing a secret's metadata.
:param ctxt: contains information of the user and the environment for
the request (storage/context.py)
:param secret_ref: URL to access the secret
:return: the secret's metadata
:throws Exception: if there is an error retrieving the data
"""
barbican_client = self._get_barbican_client(ctxt)
try:
return barbican_client.secrets.get(secret_ref)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Error getting secret metadata."))
def get_key(self, ctxt, key_id,
payload_content_type='application/octet-stream'):
"""Retrieves the specified key.
:param ctxt: contains information of the user and the environment for
the request (storage/context.py)
:param key_id: the UUID of the key to retrieve
:param payload_content_type: The format/type of the secret data
:return: SymmetricKey representation of the key
:throws Exception: if key retrieval fails
"""
try:
secret_ref = self._create_secret_ref(key_id, barbican_client)
secret = self._get_secret(ctxt, secret_ref)
secret_data = self._get_secret_data(secret,
payload_content_type)
if payload_content_type == 'application/octet-stream':
# convert decoded string to list of unsigned ints for each byte
key_data = array.array('B',
base64.b64decode(secret_data)).tolist()
else:
key_data = secret_data
key = keymgr_key.SymmetricKey(secret.algorithm, key_data)
return key
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Error getting key."))
def delete_key(self, ctxt, key_id):
"""Deletes the specified key.
:param ctxt: contains information of the user and the environment for
the request (storage/context.py)
:param key_id: the UUID of the key to delete
:throws Exception: if key deletion fails
"""
barbican_client = self._get_barbican_client(ctxt)
try:
secret_ref = self._create_secret_ref(key_id, barbican_client)
barbican_client.secrets.delete(secret_ref)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Error deleting key."))
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2017-2021 AVSystem <avsystem@avsystem.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
assert sys.version_info >= (3, 5), "Python < 3.5 is unsupported"
import unittest
import os
import collections.abc
import argparse
import time
import tempfile
import textwrap
import shutil
import logging
from framework.pretty_test_runner import PrettyTestRunner
from framework.pretty_test_runner import COLOR_DEFAULT, COLOR_YELLOW, COLOR_GREEN, COLOR_RED
from framework.test_suite import Lwm2mTest, ensure_dir, get_full_test_name, get_suite_name, \
test_or_suite_matches_query_regex, LogType
if sys.version_info[0] >= 3:
sys.stderr = os.fdopen(2, 'w', 1) # force line buffering
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
UNITTEST_PATH = os.path.join(ROOT_DIR, 'suites')
DEFAULT_SUITE_REGEX = r'^default\.'
def traverse(tree, cls=None):
if cls is None or isinstance(tree, cls):
yield tree
if isinstance(tree, collections.abc.Iterable):
for elem in tree:
for sub_elem in traverse(elem, cls):
yield sub_elem
def discover_test_suites(test_config):
loader = unittest.TestLoader()
loader.testMethodPrefix = 'runTest'
suite = loader.discover(UNITTEST_PATH, pattern='*.py', top_level_dir=UNITTEST_PATH)
for error in loader.errors:
print(error)
if len(loader.errors):
sys.exit(-1)
for test in traverse(suite, cls=Lwm2mTest):
test.set_config(test_config)
return suite
def list_tests(suite, header='Available tests:'):
print(header)
for test in traverse(suite, cls=Lwm2mTest):
print('* %s' % get_full_test_name(test))
print('')
def run_tests(suites, config):
test_runner = PrettyTestRunner(config)
start_time = time.time()
for suite in suites:
if suite.countTestCases() == 0:
continue
log_dir = os.path.join(config.logs_path, 'test')
ensure_dir(log_dir)
log_filename = os.path.join(log_dir, '%s.log' % (get_suite_name(suite),))
with open(log_filename, 'w') as logfile:
test_runner.run(suite, logfile)
seconds_elapsed = time.time() - start_time
all_tests = sum(r.testsRun for r in test_runner.results)
successes = sum(r.testsPassed for r in test_runner.results)
errors = sum(r.testsErrors for r in test_runner.results)
failures = sum(r.testsFailed for r in test_runner.results)
print('\nFinished in %f s; %s%d/%d successes%s, %s%d/%d errors%s, %s%d/%d failures%s\n'
% (seconds_elapsed,
COLOR_GREEN if successes == all_tests else COLOR_YELLOW, successes, all_tests,
COLOR_DEFAULT,
COLOR_RED if errors else COLOR_GREEN, errors, all_tests, COLOR_DEFAULT,
COLOR_RED if failures else COLOR_GREEN, failures, all_tests, COLOR_DEFAULT))
return test_runner.results
def filter_tests(suite, query_regex):
matching_tests = []
for test in suite:
if isinstance(test, unittest.TestCase):
if test_or_suite_matches_query_regex(test, query_regex):
matching_tests.append(test)
elif isinstance(test, unittest.TestSuite):
if test.countTestCases() == 0:
continue
if test_or_suite_matches_query_regex(test, query_regex):
matching_tests.append(test)
else:
matching_suite = filter_tests(test, query_regex)
if matching_suite.countTestCases() > 0:
matching_tests.append(matching_suite)
return unittest.TestSuite(matching_tests)
def merge_directory(src, dst):
"""
Move all contents of SRC into DST, preserving directory structure.
"""
for item in os.listdir(src):
src_item = os.path.join(src, item)
dst_item = os.path.join(dst, item)
if os.path.isdir(src_item):
merge_directory(src_item, dst_item)
else:
ensure_dir(os.path.dirname(dst_item))
shutil.move(src_item, dst_item)
def remove_tests_logs(tests):
for test in tests:
for log_type in LogType:
try:
os.remove(test.logs_path(log_type))
except FileNotFoundError:
pass
if __name__ == "__main__":
LOG_LEVEL = os.getenv('LOGLEVEL', 'info').upper()
try:
import coloredlogs
coloredlogs.install(level=LOG_LEVEL)
except ImportError:
logging.basicConfig(level=LOG_LEVEL)
parser = argparse.ArgumentParser(description=textwrap.dedent('''
Runs Anjay demo client against Python integration tests.
Following environment variables are recognized:
VALGRIND - can be set to Valgrind executable path + optional arguments. If set,
demo client execution command will be prefixed with the value of this
variable. Note that some tests ignore this command.
NO_DUMPCAP - if set and not empty, PCAP traffic recordings between demo client
and mock server are not recorded.
RR - if set and not empty, demo client execution command is prefixed
with `rr record` to allow post-mortem debugging with `rr replay`.
Takes precedence over RRR.
RRR - if set and not empty, its value is is used for regex-matching applicable
tests or test suites. See REGEX MATCH RULES below. For matching
tests/suites, demo client execution command is prefixed with `rr record`
to allow post-mortem debugging with `rr replay`.
REGEX MATCH RULES
=================
{regex_match_rules_help}
'''.format(regex_match_rules_help=textwrap.indent(
textwrap.dedent(
test_or_suite_matches_query_regex.__doc__),
prefix=' ' * 8))),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--list', '-l',
action='store_true',
help='only list matching test cases, do not execute them')
parser.add_argument('--client', '-c',
type=str, required=True,
help='path to the demo application to use')
parser.add_argument('--keep-success-logs',
action='store_true',
help='keep logs from all tests, including ones that passed')
parser.add_argument('--target-logs-path', type=str,
help='path where to leave the logs stored')
parser.add_argument('query_regex',
type=str, default=DEFAULT_SUITE_REGEX, nargs='?',
help='regex used to filter test cases. See REGEX MATCH RULES for details.')
cmdline_args = parser.parse_args(sys.argv[1:])
with tempfile.TemporaryDirectory() as tmp_log_dir:
class TestConfig:
demo_cmd = os.path.basename(cmdline_args.client)
demo_path = os.path.abspath(os.path.dirname(cmdline_args.client))
logs_path = tmp_log_dir
suite_root_path = os.path.abspath(UNITTEST_PATH)
target_logs_path = os.path.abspath(
cmdline_args.target_logs_path or os.path.join(demo_path, '../test/integration/log'))
def config_to_string(cfg):
config = sorted((k, v) for k, v in cfg.__dict__.items()
if not k.startswith('_')) # skip builtins
max_key_len = max(len(k) for k, _ in config)
return '\n '.join(
['Test config:'] + ['%%-%ds = %%s' % max_key_len % kv for kv in config])
test_suites = discover_test_suites(TestConfig)
header = '%d tests:' % test_suites.countTestCases()
if cmdline_args.query_regex:
test_suites = filter_tests(test_suites, cmdline_args.query_regex)
header = '%d tests match pattern %s:' % (
test_suites.countTestCases(), cmdline_args.query_regex)
list_tests(test_suites, header=header)
result = None
if not cmdline_args.list:
sys.stderr.write('%s\n\n' % config_to_string(TestConfig))
try:
results = run_tests(test_suites, TestConfig)
for r in results:
if r.errors or r.failures:
print(r.errorSummary(log_root=TestConfig.target_logs_path))
if not cmdline_args.keep_success_logs:
remove_tests_logs(r.successes)
if any(r.errors or r.failures for r in results):
raise SystemError("Some tests failed, inspect log for details")
finally:
# calculate logs path based on executable path to prevent it
# from creating files in source directory if building out of source
ensure_dir(os.path.dirname(TestConfig.target_logs_path))
merge_directory(TestConfig.logs_path, TestConfig.target_logs_path)
| |
#!/usr/bin/env python
import logging
from multiprocessing.dummy import Pool
import os
import time
import sys
import begin
import coloredlogs
from tqdm import tqdm
from client import QueryClient, ModifyClient
from ltu.engine.result import Result
from ltu.engine.stat import Stat
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
global oStat
oStat = Stat()
def print_result_per_action(nb_threads, actions):
"""print stat per actions"""
logger.info("Result per actions called:")
for action in actions:
# global time to excecute all the queries per an action
time = oStat.get_value('queries.' +action +'._time')
stat_path = 'queries.' + action + '.already'
#how many images to perform
nbImages = oStat.get_total(action, 'queries') - oStat.get_count(stat_path)
if time == 0:
imagesPerSec = nbImages / 1
else:
imagesPerSec = nbImages/time
stat_path = "queries." + action + ".errors"
if nbImages > 0:
bench = "{} done: {} images in {} sec on {} threads, {} images per sec, {} failded".format(action, nbImages , time, nb_threads, imagesPerSec, oStat.get_count(stat_path))
else:
bench = "{} done: 0 images, {} failed".format(action, oStat.get_count(stat_path))
logger.info(bench)
def print_stat_global():
"""print the global statistics"""
logger.info("")
logger.info("Queries Statistics: ")
logger.info("{} images to process".format(oStat.get_count('images')))
logger.info("{} queries have been correctly performed on the {} to excute".format(oStat.get_total('ok', 'queries'), oStat.get_count('queries')))
logger.info("{} actions failed to be performed: {} add, {} search and {} delete".format(oStat.get_total('errors', 'queries'), oStat.get_count('queries.add.errors'), oStat.get_count('queries.search.errors'), oStat.get_count('queries.delete.errors')))
logger.info("{} actions have been already processed and not forced to be performed again".format(oStat.get_total('already', 'quieries')))
def print_stat(nb_threads, actions):
""" print all the statistics global and per action """
print_stat_global()
logger.info("")
print_result_per_action(nb_threads, actions)
def get_action_name_from_function(function):
"""return from a function (add_image, search_image, delete_image) the name of the action concerned
"""
return function.__name__.split('_')[0]
def run_single_task(item):
"""Run given action for one file
"""
in_file = item[0]["in"]
action_function = item[1]
action = get_action_name_from_function(item[1])
out_file = ""
if action in item[0]:
out_file = item[0][action]
if out_file:
# launch action
try:
result = action_function(in_file)
logger.debug("Finish with status %s" %(result.status_code))
if result.status_code < 0:
logger.debug('An issue occuted with the file {}. Consult the json result. file'.format(in_file))
stat_path = "queries." + action + ".errors." + str(result.status_code)
oStat.incremente(stat_path)
else:
stat_path = "queries." + action + ".ok"
oStat.incremente(stat_path)
except Exception as e:
logger.critical('An issue has occured. Could not perform the action {}. The process is stopped: {}'.format(action,e))
sys.exit(-1)
# save the result in a json file
try:
result.save_json(out_file)
except Exception as e:
logger.critical('Could not save the result for the action {}: {}'.format(action,e))
sys.exit(-1)
def run_task_mono_thread(action_function, files, action_label, nb_threads=1, offset=0):
"""Run given action on every files, one at a time.
"""
for file in files[offset:]:
item = (file, action_function)
logger.info("")
logger.info("%s: %s" % (action_label, file["in"]))
run_single_task(item)
def run_task_multi_thread(action_function, files, action_label, nb_threads=2, offset=0):
"""Run given action on every files using a threading pool.
It uses a progress bar instead of a usual verbose log.
"""
pool = Pool(processes=nb_threads)
items = [(file, action_function) for file in files[offset:]]
pool_iterable = pool.imap_unordered(run_single_task, items)
progress_bar_items = tqdm(total=len(items),
iterable=pool_iterable,
unit='images',
desc='{0: <30}'.format(action_label))
for item in progress_bar_items:
pass
def generate_actions_list_per_images(actions_list, input_dir, force):
"""Generate a list of actions to process per image. For each image are saved:
- input path of the image
- out path where save the result for each action to perform
"""
# create results paths if they don't exist
# result main repertory: out_result
out_base_path = os.path.join(os.getcwd(), "out_result")
if not os.path.exists(out_base_path):
try:
os.mkdir(out_base_path)
except Exception as e:
logger.critical('Could not create the out path "out_result": {}'.format(e))
sys.exit(-1)
# create one result folder per action
for action in actions_list:
action_path = os.path.join(out_base_path, action)
if not os.path.exists(action_path):
try:
os.mkdir(action_path)
except Exception as e:
logger.critical('Could not create the action {} out path: {}'.format(e,action))
sys.exit(-1)
files = []
b_file = False # indicate if there are files to performe
image_path = os.path.basename(input_dir)
for dirpath, _, fnames in os.walk(input_dir):
# relative path from the input images folder, repertory per repertory
relativ_path = os.path.relpath(dirpath,input_dir)
if relativ_path == ".":
relativ_path = ""
# create actions repertories
for action in actions_list:
complete_path = os.path.join(out_base_path, action, image_path)
if not os.path.exists(complete_path):
try:
os.mkdir(complete_path)
except Exception as e:
logger.critical('Could not create the out path {}: {}'.format(complete_path, e))
sys.exit(-1)
complete_path = os.path.join(out_base_path, action, image_path, relativ_path)
if not os.path.exists(complete_path):
try:
os.mkdir(complete_path)
except Exception as e:
logger.critical('Could not create the out path {}: {}'.format(complete_path, e))
sys.exit(-1)
for file in fnames:
# files_path["in"]: input image file path
# files_path[action]: result json file path per action
if not file == ".DS_Store":
# Except Mac store file
oStat.incremente("images")
b_file = True
files_path = {}
b_action = False
for action in actions_list:
json_path = os.path.join(out_base_path, action, image_path,relativ_path, file) + ".json"
# if the folder don't exist or if the action is forced to be executed
# the imge will be processed
if not os.path.exists(json_path) or force:
b_action = True
files_path[action] = json_path
else:
# the image won't be performed
stat_path = "queries." + action + ".already"
oStat.incremente(stat_path)
logger.debug("%s action already performed for this file. You can consult the result in the Json file. To generate new result, delete the Json File or force the %s action by adding the --force parameter in the command." %(action, action))
if b_action:
files_path["in"] = os.path.join(dirpath, file)
files.append(files_path)
# if no file to treat
if not b_file and not files:
assert files, "No input file found in %s" % input_dir
elif b_file and not files:
logger.info("No new file to process. Delete old results folders or force the treatment by adding the --force parameter in the command.")
return files
@begin.start
def ltuengine_process_dir(actions: "A list(separate each action by a comma) of actions to execute on a folder: add|delete|search or bench(that performs 'delete,add,search,delete') ",
application_key: "LTU Engine application key",
input_dir:"Folder with all needed inputs",
host:"server URL that hosts the application, default is LTU OnDemand"=None,
force:" a boolean to indicate what to do if a request has already been executed: force or not"=False,
nb_threads:"a list(separate each action by a comma) of number of threads"="1",
offset:"starting offset"=0):
"""
Parse given directory for images and perform action [add|search|delete] on given LTU Engine
application. Useful to add/delete a batch of images on multiple threads.
"""
coloredlogs.install(level='info')
## process input parameters
# get all actions
if actions == "bench":
actions = "delete,add,search,delete"
force = True # for a bench actions are forced to be performed
actions_list = actions.split(',')
# verify if the action call is valid
actions = ["add", "search", "delete"]
for a in actions_list:
if a not in actions:
logger.error("Unknown action {}".format(a))
sys.exit(-1)
# get all threads nbr
all_threads = nb_threads.split(',')
for i in range(0, len(all_threads)):
all_threads[i] = int(all_threads[i])
# other parameters
offset = int(offset)
# lit of images to performed
files = []
# get input and output files path for each image
files = generate_actions_list_per_images(actions_list, input_dir, force)
if files:
# nb images to treat
nb_files = len(files) - offset
# create client
logger.info("")
modifyClient = ModifyClient(application_key, server_url=host)
for nb_threads in all_threads:
for action in actions_list:
logger.info("")
start_time = time.time()
nb_files = - oStat.get_count('queries.'+action+'.ok') - oStat.get_count('queries.'+action+'.errors')
# get the appropriate function to run the task
# - run_task_mono_thread will run on 1 thread and show some logs
# - run_task_multi_thread will run on multiple threads and use a progress bar
run_task = run_task_mono_thread if nb_threads == 1 else run_task_multi_thread
# get the action to perform
if action == actions[0]:
logger.info("Adding directory %s images into application %s" % (input_dir, application_key))
run_task(modifyClient.add_image, files, "Adding image", nb_threads, offset)
elif action == actions[2]:
logger.info("Deleting directory %s images from application %s" % (input_dir, application_key))
run_task(modifyClient.delete_imagefile, files, "Deleting image", nb_threads, offset)
elif action == actions[1]:
queryClient = QueryClient(application_key, server_url=host)
logger.info("Searching directory %s images into application %s" % (input_dir, application_key))
run_task(queryClient.search_image, files, "Searching image", nb_threads, offset)
nb_files += oStat.get_count('queries.'+action+'.ok') + oStat.get_count('queries.'+action+'.errors')
end_time = (time.time() - start_time)
# save action statistics per
stat_path = "queries." + action + "._time"
oStat.add_and_average_stat(stat_path, end_time)
bench = "%s done, %d images, in %f sec on %d threads, %f images per sec" % (action, nb_files, end_time, nb_threads, nb_files/end_time)
logger.debug(bench)
print_stat(nb_threads, actions)
| |
from corehq.apps.data_interfaces.models import (
AutomaticUpdateRule,
MatchPropertyDefinition,
CreateScheduleInstanceActionDefinition,
)
from corehq.messaging.scheduling.models import (
Schedule,
AlertSchedule,
TimedSchedule,
TimedEvent,
SMSContent,
)
from corehq.messaging.scheduling.scheduling_partitioned.models import CaseScheduleInstanceMixin
from django.core.management.base import BaseCommand, CommandError
import copy
import json
import jsonobject
SIMPLE_SMS_DAILY_SCHEDULE_WITH_TIME = 1
SIMPLE_SMS_ALERT_SCHEDULE = 2
class MatchPropertyCriterion(jsonobject.JsonObject):
property_name = jsonobject.StringProperty()
property_value = jsonobject.StringProperty()
match_type = jsonobject.StringProperty()
class SimpleSchedulingRule(jsonobject.JsonObject):
name = jsonobject.StringProperty()
case_type = jsonobject.StringProperty()
criteria = jsonobject.ListProperty(MatchPropertyCriterion)
recipients = jsonobject.ListProperty(jsonobject.ListProperty(jsonobject.StringProperty(required=False)))
reset_case_property_name = jsonobject.StringProperty()
start_date_case_property = jsonobject.StringProperty()
specific_start_date = jsonobject.DateProperty()
scheduler_module_info = jsonobject.ObjectProperty(CreateScheduleInstanceActionDefinition.SchedulerModuleInfo)
class ExtraSchedulingOptions(jsonobject.JsonObject):
active = jsonobject.BooleanProperty()
include_descendant_locations = jsonobject.BooleanProperty()
default_language_code = jsonobject.StringProperty()
custom_metadata = jsonobject.DictProperty(str)
use_utc_as_default_timezone = jsonobject.BooleanProperty()
user_data_filter = jsonobject.DictProperty(jsonobject.ListProperty(str))
stop_date_case_property_name = jsonobject.StringProperty()
class SimpleSMSDailyScheduleWithTime(jsonobject.JsonObject):
schedule_type = SIMPLE_SMS_DAILY_SCHEDULE_WITH_TIME
time = jsonobject.TimeProperty()
message = jsonobject.DictProperty(str)
total_iterations = jsonobject.IntegerProperty()
start_offset = jsonobject.IntegerProperty()
start_day_of_week = jsonobject.IntegerProperty()
extra_options = jsonobject.ObjectProperty(ExtraSchedulingOptions)
repeat_every = jsonobject.IntegerProperty()
class SimpleSMSAlertSchedule(jsonobject.JsonObject):
schedule_type = SIMPLE_SMS_ALERT_SCHEDULE
message = jsonobject.DictProperty(str)
extra_options = jsonobject.ObjectProperty(ExtraSchedulingOptions)
class Command(BaseCommand):
help = "Export conditional alerts to file."
def add_arguments(self, parser):
parser.add_argument(
'domain',
help="The project space from which to export conditional alerts.",
)
def get_json_rule(self, rule):
json_rule = SimpleSchedulingRule(
name=rule.name,
case_type=rule.case_type,
)
for criterion in rule.memoized_criteria:
definition = criterion.definition
if not isinstance(definition, MatchPropertyDefinition):
raise CommandError(
"Rule %s references currently unsupported criterion definition for export. "
"Either add support to this script for unsupported criteria or exclude rule "
"from export." % rule.pk
)
json_rule.criteria.append(MatchPropertyCriterion(
property_name=definition.property_name,
property_value=definition.property_value,
match_type=definition.match_type,
))
if len(rule.memoized_actions) != 1:
raise CommandError(
"Expected exactly one action for rule %s. This is an unexpected configuration. "
"Was this rule created with the UI?" % rule.pk
)
action = rule.memoized_actions[0].definition
if not isinstance(action, CreateScheduleInstanceActionDefinition):
raise CommandError(
"Expected CreateScheduleInstanceActionDefinition for rule %s. This is an unexpected "
"configuration. Was this rule created with the UI?" % rule.pk
)
for recipient_type, recipient_id in action.recipients:
if recipient_type not in (
CaseScheduleInstanceMixin.RECIPIENT_TYPE_SELF,
CaseScheduleInstanceMixin.RECIPIENT_TYPE_CASE_OWNER,
CaseScheduleInstanceMixin.RECIPIENT_TYPE_LAST_SUBMITTING_USER,
CaseScheduleInstanceMixin.RECIPIENT_TYPE_PARENT_CASE,
CaseScheduleInstanceMixin.RECIPIENT_TYPE_CUSTOM,
):
raise CommandError(
"Unsupported recipient_type %s referenced in rule %s. That's probably because the "
"recipient type references a specific object like a user or location whose id cannot "
"be guaranteed to be the same in the imported project. This rule must be excluded "
"from export" % (recipient_type, rule.pk)
)
if action.get_scheduler_module_info().enabled:
raise CommandError(
"Scheduler module integration is not supported for export because it references "
"a form whose unique id is not guaranteed to be the same in the imported project. Please "
"exclude rule %s from export." % rule.pk
)
json_rule.recipients = copy.deepcopy(action.recipients)
json_rule.reset_case_property_name = action.reset_case_property_name
json_rule.start_date_case_property = action.start_date_case_property
json_rule.specific_start_date = action.specific_start_date
json_rule.scheduler_module_info = CreateScheduleInstanceActionDefinition.SchedulerModuleInfo(enabled=False)
return json_rule
def get_json_scheduling_options(self, schedule):
return ExtraSchedulingOptions(
active=schedule.active,
include_descendant_locations=schedule.include_descendant_locations,
default_language_code=schedule.default_language_code,
custom_metadata=copy.deepcopy(schedule.custom_metadata),
use_utc_as_default_timezone=schedule.use_utc_as_default_timezone,
user_data_filter=copy.deepcopy(schedule.user_data_filter),
stop_date_case_property_name=schedule.stop_date_case_property_name,
)
def get_json_timed_schedule(self, schedule):
if schedule.ui_type != Schedule.UI_TYPE_DAILY:
raise CommandError(
"Only simple daily TimedSchedules are supported by this export script. Either exclude "
"rules with other types of TimedSchedules from export or add support to this script "
"for the missing schedule types."
)
json_schedule = SimpleSMSDailyScheduleWithTime(
total_iterations=schedule.total_iterations,
start_offset=schedule.start_offset,
start_day_of_week=schedule.start_day_of_week,
repeat_every=schedule.repeat_every,
extra_options=self.get_json_scheduling_options(schedule),
)
event = schedule.memoized_events[0]
if not isinstance(event, TimedEvent):
raise CommandError(
"Only TimedSchedules which use simple TimedEvents are supported by this export "
"script. Either exclude rules with other types of TimedEvents from export or add "
"support to this script for the missing use cases."
)
json_schedule.time = event.time
content = event.content
if not isinstance(content, SMSContent):
raise CommandError(
"Only Schedules which send SMSContent are supported by this export script. "
"Either exclude rules with other content types from export or add support "
"to this script for the missing use cases."
)
json_schedule.message = copy.deepcopy(content.message)
return json_schedule
def get_json_alert_schedule(self, schedule):
if schedule.ui_type != Schedule.UI_TYPE_IMMEDIATE:
raise CommandError(
"Only simple immediate AlertSchedules are supported by this export script. Either exclude "
"rules with other types of AlertSchedules from export or add support to this script "
"for the missing schedule types."
)
json_schedule = SimpleSMSAlertSchedule(
extra_options=self.get_json_scheduling_options(schedule),
)
event = schedule.memoized_events[0]
content = event.content
if not isinstance(content, SMSContent):
raise CommandError(
"Only Schedules which send SMSContent are supported by this export script. "
"Either exclude rules with other content types from export or add support "
"to this script for the missing use cases."
)
json_schedule.message = copy.deepcopy(content.message)
return json_schedule
def handle(self, domain, **options):
result = []
for rule in AutomaticUpdateRule.by_domain(
domain,
AutomaticUpdateRule.WORKFLOW_SCHEDULING,
active_only=False,
):
json_rule = self.get_json_rule(rule)
action = rule.memoized_actions[0].definition
if action.schedule.location_type_filter:
raise CommandError(
"Expected location_type_filter to be empty for rule %s. Location type filtering "
"references primary keys of LocationType objects which aren't guaranteed to be "
"the same in the imported project. This rule must be excluded from export." % rule.pk
)
if isinstance(action.schedule, TimedSchedule):
json_schedule = self.get_json_timed_schedule(action.schedule)
elif isinstance(action.schedule, AlertSchedule):
json_schedule = self.get_json_alert_schedule(action.schedule)
else:
raise CommandError(
"Unexpected Schedule type for rule %s. Support must be added to this script for "
"anything other than TimedSchedules or AlertSchedules." % rule.pk
)
result.append(json.dumps({
'rule': json_rule.to_json(),
'schedule': json_schedule.to_json(),
}))
with open('conditional_alerts_for_%s.txt' % domain, 'w', encoding='utf-8') as f:
for line in result:
f.write(line)
f.write('\n')
print("Done")
| |
import unittest
import operator
from collections import abc
from g1.bases.collections import (
LoadingDict,
LruCache,
Multiset,
Namespace,
)
class LoadingDictTest(unittest.TestCase):
def test_loading_dict(self):
ks = []
def load(key):
if key == 'no-such-key':
raise KeyError(key)
ks.append(key)
return key
d = LoadingDict(load, p=1, q=2)
self.assertEqual(d, {'p': 1, 'q': 2})
self.assertEqual(ks, [])
for _ in range(3):
self.assertEqual(d['x'], 'x')
self.assertEqual(d['y'], 'y')
self.assertEqual(d, {'p': 1, 'q': 2, 'x': 'x', 'y': 'y'})
self.assertEqual(ks, ['x', 'y'])
with self.assertRaises(KeyError):
d['no-such-key'] # pylint: disable=pointless-statement
self.assertIsNone(d.get('no-such-key'))
self.assertEqual(d, {'p': 1, 'q': 2, 'x': 'x', 'y': 'y'})
self.assertEqual(ks, ['x', 'y'])
class LruCacheTest(unittest.TestCase):
def assert_cache(self, actual, expect):
self.assertEqual(list(actual._cache.items()), expect)
def test_lru_cache(self):
cache = LruCache(2)
cache['a'] = 1
cache['b'] = 2
self.assert_cache(cache, [('a', 1), ('b', 2)])
with self.assertRaises(KeyError):
cache['x'] # pylint: disable=pointless-statement
self.assert_cache(cache, [('a', 1), ('b', 2)])
self.assertIsNone(cache.get('x'))
self.assert_cache(cache, [('a', 1), ('b', 2)])
cache['c'] = 3 # 'a' should be evicted.
self.assert_cache(cache, [('b', 2), ('c', 3)])
self.assertEqual(2, cache['b']) # 'b' should be moved to last.
self.assert_cache(cache, [('c', 3), ('b', 2)])
cache['d'] = 4 # 'c' should be evicted.
self.assert_cache(cache, [('b', 2), ('d', 4)])
del cache['b']
self.assert_cache(cache, [('d', 4)])
cache['a'] = 1
cache['b'] = 2
self.assert_cache(cache, [('a', 1), ('b', 2)])
with self.assertRaises(KeyError):
del cache['x']
self.assert_cache(cache, [('a', 1), ('b', 2)])
with self.assertRaises(KeyError):
cache.pop('x')
self.assert_cache(cache, [('a', 1), ('b', 2)])
self.assertEqual(('a', 1), cache.popitem())
self.assert_cache(cache, [('b', 2)])
cache['c'] = 3
self.assert_cache(cache, [('b', 2), ('c', 3)])
self.assertEqual(3, cache.pop('c'))
self.assert_cache(cache, [('b', 2)])
self.assertEqual(4, cache.setdefault('d', 4))
self.assert_cache(cache, [('b', 2), ('d', 4)])
self.assertEqual(2, cache.setdefault('b', 99))
self.assert_cache(cache, [('d', 4), ('b', 2)])
# Test ``__contains__``.
self.assertTrue('d' in cache)
self.assert_cache(cache, [('b', 2), ('d', 4)])
def test_methods_operate_on_entire_cache(self):
"""Test methods that should not alter cache eviction order."""
cache = LruCache(2)
cache['a'] = 1
cache['b'] = 2
self.assert_cache(cache, [('a', 1), ('b', 2)])
self.assertEqual(len(cache), 2)
self.assert_cache(cache, [('a', 1), ('b', 2)])
self.assertEqual(list(cache), ['a', 'b'])
self.assert_cache(cache, [('a', 1), ('b', 2)])
self.assertEqual(list(cache.keys()), ['a', 'b'])
self.assert_cache(cache, [('a', 1), ('b', 2)])
self.assertEqual(list(cache.items()), [('a', 1), ('b', 2)])
self.assert_cache(cache, [('a', 1), ('b', 2)])
self.assertEqual(list(cache.values()), [1, 2])
self.assert_cache(cache, [('a', 1), ('b', 2)])
class MultisetTest(unittest.TestCase):
def test_abc(self):
self.assertTrue(Multiset, abc.MutableSet)
def test_multiset(self):
ms = Multiset('abacc')
for _ in range(3):
ms.discard('d')
self.assertTrue(ms)
self.assertEqual(sorted(ms), ['a', 'a', 'b', 'c', 'c'])
self.assertEqual(len(ms), 5)
self.assertIn('a', ms)
self.assertIn('b', ms)
self.assertIn('c', ms)
self.assertNotIn('d', ms)
ms.discard('a')
self.assertTrue(ms)
self.assertEqual(sorted(ms), ['a', 'b', 'c', 'c'])
self.assertEqual(len(ms), 4)
self.assertIn('a', ms)
self.assertIn('b', ms)
self.assertIn('c', ms)
self.assertNotIn('d', ms)
for _ in range(3):
ms.discard('a')
self.assertTrue(ms)
self.assertEqual(sorted(ms), ['b', 'c', 'c'])
self.assertEqual(len(ms), 3)
self.assertNotIn('a', ms)
self.assertIn('b', ms)
self.assertIn('c', ms)
self.assertNotIn('d', ms)
ms.discard('c')
self.assertTrue(ms)
self.assertEqual(sorted(ms), ['b', 'c'])
self.assertEqual(len(ms), 2)
self.assertNotIn('a', ms)
self.assertIn('b', ms)
self.assertIn('c', ms)
self.assertNotIn('d', ms)
for _ in range(3):
ms.discard('c')
self.assertTrue(ms)
self.assertEqual(sorted(ms), ['b'])
self.assertEqual(len(ms), 1)
self.assertNotIn('a', ms)
self.assertIn('b', ms)
self.assertNotIn('c', ms)
self.assertNotIn('d', ms)
for _ in range(3):
ms.discard('b')
self.assertFalse(ms)
self.assertEqual(sorted(ms), [])
self.assertEqual(len(ms), 0)
self.assertNotIn('a', ms)
self.assertNotIn('b', ms)
self.assertNotIn('c', ms)
self.assertNotIn('d', ms)
def test_copy(self):
m1 = Multiset('aabccd')
m2 = m1.copy()
self.assertEqual(m1, m2)
self.assertEqual(len(m1), len(m2))
def test_comparators(self):
self.assertTrue(Multiset('aab').isdisjoint(Multiset('cdd')))
self.assertLess(Multiset('aa'), Multiset('aab'))
self.assertLessEqual(Multiset('aab'), Multiset('aab'))
self.assertEqual(Multiset('aab'), Multiset('aab'))
self.assertEqual(Multiset('aabc'), Multiset('caba'))
self.assertGreater(Multiset('aab'), Multiset('aa'))
self.assertGreaterEqual(Multiset('aab'), Multiset('aab'))
self.assertFalse(Multiset('aab') < Multiset('aac'))
self.assertFalse(Multiset('aab') > Multiset('aac'))
self.assertNotEqual(Multiset('aab'), Multiset('aac'))
self.assertNotEqual(Multiset('aab'), Multiset('caa'))
def test_operators(self):
checks = [
('and', Multiset('aab'), Multiset('aac'), Multiset('aa')),
('or', Multiset('aab'), Multiset('aac'), Multiset('aabc')),
('xor', Multiset('aab'), Multiset('aac'), Multiset('bc')),
('xor', Multiset('aaab'), Multiset('aac'), Multiset('abc')),
('add', Multiset('aab'), Multiset('aac'), Multiset('aaaabc')),
('sub', Multiset('aab'), Multiset('aac'), Multiset('b')),
]
for op, p, q, expect in checks:
iop = 'i' + op
if op in ('and', 'or'):
op += '_'
with self.subTest(check=op):
self.assertEqual(getattr(operator, op)(p, q), expect)
with self.subTest(check=iop):
pp = p.copy()
self.assertEqual(getattr(operator, iop)(pp, q), expect)
self.assertEqual(pp, expect)
def test_count(self):
m = Multiset('aabcc')
self.assertEqual(m.count('a'), 2)
self.assertEqual(m.count('b'), 1)
self.assertEqual(m.count('c'), 2)
self.assertEqual(m.count('d'), 0)
def test_remove(self):
m = Multiset('aabcc')
m.remove('a')
self.assertEqual(m, Multiset('abcc'))
with self.assertRaises(KeyError):
m.remove('d')
def test_pop(self):
m = Multiset('aabcc')
self.assertEqual(
frozenset(m.pop() for _ in range(len(m))),
frozenset('aabcc'),
)
with self.assertRaises(KeyError):
m.pop()
class NamespaceTest(unittest.TestCase):
def test_namespace(self):
ns = Namespace('a', 'b', 'c')
for name in ('a', 'b', 'c'):
self.assertEqual(getattr(ns, name), name)
self.assertIn(name, ns)
self.assertEqual(ns[name], name)
with self.assertRaises(AttributeError):
getattr(ns, 'd')
self.assertNotIn('d', ns)
with self.assertRaises(KeyError):
ns['d'] # pylint: disable=pointless-statement
with self.assertRaises(TypeError):
ns.d = 1
self.assertEqual(tuple(ns), ('a', 'b', 'c'))
self.assertEqual(ns._asdict(), {'a': 'a', 'b': 'b', 'c': 'c'})
expect = {'a': 1, 'b': 2, 'c': 3}
ns = Namespace(**expect)
for name, value in expect.items():
self.assertEqual(getattr(ns, name), value)
self.assertEqual(ns[name], value)
with self.assertRaises(AttributeError):
getattr(ns, 'd')
with self.assertRaises(KeyError):
ns['d'] # pylint: disable=pointless-statement
with self.assertRaises(TypeError):
ns.d = 1
self.assertEqual(tuple(ns), ('a', 'b', 'c'))
self.assertEqual(ns._asdict(), {'a': 1, 'b': 2, 'c': 3})
ns = Namespace()
self.assertEqual(tuple(ns), ())
with self.assertRaisesRegex(ValueError, r'overwrite'):
Namespace('a', a=1)
with self.assertRaisesRegex(ValueError, r'starts with \'_\''):
Namespace('_a')
with self.assertRaisesRegex(ValueError, r'starts with \'_\''):
Namespace(_a=1)
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.