text stringlengths 4 1.02M | meta dict |
|---|---|
"""
Boxy Theme Changelog
"""
import sublime
import sublime_plugin
import webbrowser
STYLES = '''
.mdpopups {
{{'.background'|css}}
}
.boxy-changelog a {
text-decoration: none;
}
.boxy-changelog h1,
.boxy-changelog h2,
.boxy-changelog h3,
.boxy-changelog h4,
.boxy-changelog h5,
.boxy-changelog h6 {
margin: 1rem;
{{'.string'|css('color')}}
}
.boxy-changelog ul {
margin: 0.75rem 1rem;
}
'''
class BoxyChangelogCommand(sublime_plugin.WindowCommand):
def on_navigate(self, href):
webbrowser.open_new_tab(href)
def run(self):
import mdpopups
text = sublime.load_resource('Packages/Boxy Theme/CHANGELOG.md')
view = self.window.new_file()
view.set_name('Boxy Theme Changelog')
view.settings().set('gutter', False)
html = '<div class="boxy-changelog">%s</div>' % mdpopups.md2html(view,
text)
mdpopups.add_phantom(view, 'changelog', sublime.Region(0), html,
sublime.LAYOUT_INLINE, css=STYLES,
on_navigate=self.on_navigate)
view.set_read_only(True)
view.set_scratch(True)
def is_enabled(self):
try:
import mdpopups
except Exception:
return False
return ((mdpopups.version() >= (1, 9, 0)) and
(int(sublime.version()) >= 3119))
is_visible = is_enabled
| {
"content_hash": "ab6639a4d99d38416a151a53a97c3c91",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 78,
"avg_line_length": 24.383333333333333,
"alnum_prop": 0.5686944634313056,
"repo_name": "oivva/boxy",
"id": "64ebd323d6151451bd47654d17e2845163f60acd",
"size": "1488",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "utils/changelog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "2111070"
},
{
"name": "Python",
"bytes": "35299"
}
],
"symlink_target": ""
} |
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "AWS Step Functions"
prefix = "states"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
CreateActivity = Action("CreateActivity")
CreateStateMachine = Action("CreateStateMachine")
DeleteActivity = Action("DeleteActivity")
DeleteStateMachine = Action("DeleteStateMachine")
DescribeActivity = Action("DescribeActivity")
DescribeExecution = Action("DescribeExecution")
DescribeStateMachine = Action("DescribeStateMachine")
DescribeStateMachineForExecution = Action("DescribeStateMachineForExecution")
GetActivityTask = Action("GetActivityTask")
GetExecutionHistory = Action("GetExecutionHistory")
ListActivities = Action("ListActivities")
ListExecutions = Action("ListExecutions")
ListStateMachines = Action("ListStateMachines")
ListTagsForResource = Action("ListTagsForResource")
SendTaskFailure = Action("SendTaskFailure")
SendTaskHeartbeat = Action("SendTaskHeartbeat")
SendTaskSuccess = Action("SendTaskSuccess")
StartExecution = Action("StartExecution")
StartSyncExecution = Action("StartSyncExecution")
StopExecution = Action("StopExecution")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
UpdateStateMachine = Action("UpdateStateMachine")
| {
"content_hash": "377d70af5dd7827723c0ac1c0456e734",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 88,
"avg_line_length": 36.904761904761905,
"alnum_prop": 0.7638709677419355,
"repo_name": "cloudtools/awacs",
"id": "1e6b30f5bfaf37b8ccc2bf31fa87f194a54cad24",
"size": "1666",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "awacs/states.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "343"
},
{
"name": "Python",
"bytes": "963483"
}
],
"symlink_target": ""
} |
"""
fabcloudkit
Functions for managing supervisor.
This module provides functions that check for installation, install, and manage an
installation of, supervisor. If an installation already exists (e.g., as part of an
AMI), the files directories used by that installation are (for now) assumed to conform
to the same files and directories used by these functions.
The relevant files and directories are:
/etc/init.d/supervisor:
The "init script" for supervisor that allows supervisord to be automatically run
automatically at system startup. Note that on some systems (e.g., Amazon Linux AMI)
that the existence of this file isn't sufficient to guarantee automatic launch
after boot, and use of the program chkconfig is required. The functions in this
module will use chkconfig where necessary, if it exists.
/etc/supervisord.conf:
The root or main supervisor configuration file. This file is read by supervisord
when it launches. This file contains an include directive that tells supervisord
to also load configurations from a different directory.
/etc/supervisor/conf.d/:
The directory indicated by the include directive in the root supervisor configuration
file. Individual program configurations, the "[program:x]" section for that program,
are contained in files in this directory.
/etc/supervisor/conf.d/*.conf:
Individual program configuration files containing a program's "[program:x]" section.
<deploy_root>/<name>/logs/supervisor.log:
The supervisord log file for an individual program. This is the default location,
and it can overridden in the call to write_program_config().
For more information on supervisor check out: http://supervisord.org/
Idea and code (heavily modified) for this module taken from Brent Tubb's "silk" project.
:copyright: (c) 2013 by Rick Bohrer.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
# standard
import posixpath as path
import time
# pypi
from fabric.operations import run, sudo
# package
from fabcloudkit import cfg, put_string
from ..internal import *
from ..toolbase import *
class SupervisorTool(Tool):
def check(self, **kwargs):
"""
Detects if supervisor is installed on the remote machine.
:return: None
"""
start_msg('----- Checking for "supervisord" installation:')
result = run('supervisord --version')
if result.return_code != 0:
failed_msg('"supervisord" is not installed.')
return False
succeed_msg('"supervisord" is installed ({0}).'.format(result))
return True
def install(self, **kwargs):
"""
Installs and configures supervisor on the remote machine.
Supervisor is installed via easy_install, a supervisor.conf file is created with an entry
to include additional conf files in the directory: cfg().supervisord_include_conf. An init.d
script is written, and if the program "chkconfig" exists, supervisor is added.
:return: None
"""
start_msg('----- Install "supervisord" via "easy_install".')
result = sudo('easy_install supervisor')
if result.return_code != 0:
HaltError('Failed to install "supervisord".')
result = run('which supervisord')
if result.return_code != 0:
raise HaltError('Confusion: just installed "supervisord" but its not there?')
message('Install successful; setting configuration.')
# create the root supervisord.conf with an [include] entry that allows loading additional
# from the folder: /etc/supervisor/conf.d/*.conf
# we use this location for site-specific config (e.g., a site's [program] section).
# start with the default conf file; the grep strips comment lines.
result = run('echo_supervisord_conf | grep "^;.*$" -v')
if result.failed:
raise HaltError('Unable to retrieve default supervisord configuration.')
# build the new configuration by just appending the include definition,
# then write it to: /etc/supervisord.conf
files = path.join(cfg().supervisord_include_conf, '*.conf')
new_conf = '{result}\n\n[include]\nfiles = {files}\n'.format(**locals())
put_string(new_conf, '/etc/supervisord.conf', use_sudo=True)
# make sure the directory exists.
result = sudo('mkdir -p {0}'.format(cfg().supervisord_include_conf))
if result.failed:
raise HaltError('Unable to create include dir: "{0}"'.format(cfg().supervisord_include_conf))
# finally write an init-script to /etc/init.d so supervisord gets run at startup.
# TODO: write system-dependent script using "uname -s": OSX=Darwin, Amazon Linux AMI=Linux, ??
put_string(_INIT_SCRIPT_LINUX, '/etc/init.d/supervisor', use_sudo=True, mode=00755)
# the Amazon Linux AMI uses chkconfig; the init.d script won't do the job by itself.
# set supervisord so it can be managed by chkconfig; and turn on boot startup.
# ubuntu (and Debian?) use UpStart or update-rc.d, so check them out.
result = run('which chkconfig')
if result.succeeded:
message('System has chkconfig; configuring.')
result = sudo('chkconfig --add supervisor')
if result.failed:
raise HaltError('"chkconfig --add supervisor" failed.')
result = sudo('chkconfig supervisor on')
if result.failed:
raise HaltError('"chkconfig supervisor on" failed.')
succeed_msg('"supervisord" is installed ({0}).'.format(result))
return self
def write_config(self, name, cmd, dir=None, log_root=None, env=None):
"""
Writes a supervisor [program] entry to a "conf" file.
The conf file is named "<name>.conf", and is located in the directory identified by:
cfg().supervisord_include_conf
Calling this function is typically followed soon after by a call to reload_config().
:param name:
specifies the program name.
:param cmd:
specifies the command to start the program.
:param dir:
specifies the directory to chdir to before executing command. default: no chdir.
:param log_root:
specifies the location for supervisor log file.
default is 'logs' in the deployment root directory.
:param env:
specifies the child process environment. default: None.
"""
start_msg('----- Writing supervisor conf file for "{0}":'.format(name))
if dir is None: dir = ''
if env is None: env = ''
if not log_root:
log_root = path.join(cfg().deploy_root, 'logs')
# first be sure the log directory exists. if not supervisor will fail to load the config.
result = sudo('mkdir -p {0}'.format(log_root))
if result.failed:
raise HaltError('Unable to create log directory: "{0}"'.format(log_root))
# now write the entry.
entry = (
"[program:{name}]\n"
"command={cmd}\n"
"directory={dir}\n"
"user=nobody\n"
"autostart=true\n"
"autorestart=true\n"
"stdout_logfile={log_root}/{name}.log\n"
"redirect_stderr=True\n"
"environment={env}\n".format(**locals()))
dest = path.join(cfg().supervisord_include_conf, '{name}.conf'.format(**locals()))
message('Writing to file: "{0}"'.format(dest))
put_string(entry, dest, use_sudo=True)
succeed_msg('Wrote conf file for "{0}".'.format(name))
return self
def delete_config(self, name):
"""
Deletes a program entry previously written by write_program_config().
:param name: specifies the program name used with write_program_config().
:return: None
"""
start_msg('----- Removing supervisor program entry for "{0}":'.format(name))
dest = path.join(cfg().supervisord_include_conf, '{name}.conf'.format(**locals()))
result = sudo('rm -f {dest}'.format(**locals()))
if result.failed:
raise HaltError('Unable to remove entry.')
succeed_msg('Removed successfully.')
return self
def reload(self):
"""
Tells supervisor to reload it's configuration. This method is normally used after writing
or deleting program entries to update the currently running supervisord.
:param name: identifies the program whose config should be activated. if none, no action is taken.
:return: None
"""
start_msg('----- Telling supervisor to reread configuration:')
result = sudo('supervisorctl update')
if result.failed or 'error' in result.lower():
raise HaltError('"supervisorctl update" failed ({0}).'.format(result))
succeed_msg('Successfully reloaded.')
return self
def start(self, name):
"""
Starts monitoring the specified program.
The write_program_config() function should have been called previously. This function will
cause supervisor to reread its configuration, then it will add the specified program to
the list of active programs.
:param name: identifies the program name (same as that used with write_program_config()).
:return: None
"""
start_msg('----- Starting supervisord monitoring of "{0}":'.format(name))
# reload configuration so supervisord knows about the program, then start monitoring.
self.reload()
result = sudo('supervisorctl add {0}'.format(name))
if result.failed:
raise HaltError('Failed to add "{0}" to supervisor.'.format(name))
succeed_msg('Monitoring of "{0}" started.'.format(name))
return self
def stop_and_remove(self, name):
"""
Stops monitoring the specified program.
Stops monitoring, removes the program from the list of active programs, removes the programs
config file using delete_program_config(), then causes supervisord to reread configuration.
:param name: identifies the progra name (same as that used with write_program_config()).
:return: None
"""
start_msg('----- Stopping supervisord monitoring and removing program "{0}":'.format(name))
# tell supervisord to stop and remove the program.
result = sudo('supervisorctl stop {0}'.format(name))
if result.failed:
message('Ignoring "supervisorctl stop {0}" failure ({1})'.format(name, result))
result = sudo('supervisorctl remove {0}'.format(name))
if result.failed:
message('Ignoring "supervisorctl remove {0}" failure ({1})'.format(name, result))
# remove the program from configuration and reload.
self.delete_config(name).reload()
succeed_msg('Stopped monitoring and removed program "{0}".'.format(name))
return self
def get_status(self, name):
"""
Returns the supervisor status for the specified program.
:param name: name of the program (same as that used with write_program_config()).
:return: status string (e.g., 'RUNNING', 'FATAL')
"""
result = sudo('supervisorctl status {0}'.format(name))
if result.failed:
raise HaltError('Unable to get status for "{0}".'.format(name))
return result.strip().split()[1]
def wait_until_running(self, name, tries=3, wait=2):
"""
Waits until the specific supervisor program has a running status or has failed.
:param name: name of the program (same as that used with write_program_config()).
:param tries: number of times to check status.
:param wait: initial wait time between status checks.
Given the name of a supervisord process, tell you whether it's running
or not. If status is 'starting', will wait until status has settled.
# Status return from supervisorctl will look something like this::
# mysite_20110623_162319 RUNNING pid 628, uptime 0:34:13
# mysite_20110623_231206 FATAL Exited too quickly (process log may have details)
"""
try:
status = self.get_status(name)
except HaltError:
status = 'EXCEPTION'
if status == 'RUNNING':
succeed_msg('Found "RUNNING" status for program "{0}".'.format(name))
return True
elif status == 'FATAL':
failed_msg('Program seems to have failed.')
return False
elif status == 'EXCEPTION':
failed_msg('Unable to get program status; assuming it failed.')
return False
if tries > 0:
message('Status({name})="{status}", waiting: tries={tries}, wait={wait}.'.format(**locals()))
time.sleep(wait)
return self.wait_until_running(name, tries-1, wait*2)
failed_msg('Did not see a "RUNNING" status for program "{0}"; assuming it failed.'.format(name))
return False
# register.
Tool.__tools__['supervisord'] = SupervisorTool
_INIT_SCRIPT_LINUX = """
#!/bin/sh
# Amazon Linux AMI startup script for a supervisor instance
#
# chkconfig: 2345 80 20
# description: Autostarts supervisord.
# Source function library.
. /etc/rc.d/init.d/functions
supervisorctl="/usr/bin/supervisorctl"
supervisord="/usr/bin/supervisord"
name="supervisor-python"
[ -f $supervisord ] || exit 1
[ -f $supervisorctl ] || exit 1
RETVAL=0
start() {
echo -n "Starting $name: "
$supervisord
RETVAL=$?
echo
return $RETVAL
}
stop() {
echo -n "Stopping $name: "
$supervisorctl shutdown
RETVAL=$?
echo
return $RETVAL
}
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
stop
start
;;
esac
exit $RETVAL
""".lstrip()
_INIT_SCRIPT_UBUNTU = """
#! /bin/sh
#
# skeleton example file to build /etc/init.d/ scripts.
# This file should be used to construct scripts for /etc/init.d.
#
# Written by Miquel van Smoorenburg <miquels@cistron.nl>.
# Modified for Debian
# by Ian Murdock <imurdock@gnu.ai.mit.edu>.
# Further changes by Javier Fernandez-Sanguino <jfs@debian.org>
#
# Version: @(#)skeleton 1.9 26-Feb-2001 miquels@cistron.nl
#
### BEGIN INIT INFO
# Provides: supervisor
# Required-Start: $remote_fs $network $named
# Required-Stop: $remote_fs $network $named
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start/stop supervisor
# Description: Start/stop supervisor daemon and its configured
# subprocesses.
### END INIT INFO
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
DAEMON=/usr/bin/supervisord
NAME=supervisord
DESC=supervisor
test -x $DAEMON || exit 0
LOGDIR=/var/log/supervisor
PIDFILE=/var/run/$NAME.pid
DODTIME=5 # Time to wait for the server to die, in seconds
# If this value is set too low you might not
# let some servers to die gracefully and
# 'restart' will not work
# Include supervisor defaults if available
if [ -f /etc/default/supervisor ] ; then
. /etc/default/supervisor
fi
set -e
running_pid()
{
# Check if a given process pid's cmdline matches a given name
pid=$1
name=$2
[ -z "$pid" ] && return 1
[ ! -d /proc/$pid ] && return 1
(cat /proc/$pid/cmdline | tr "\000" "\n"|grep -q $name) || return 1
return 0
}
running()
{
# Check if the process is running looking at /proc
# (works for all users)
# No pidfile, probably no daemon present
[ ! -f "$PIDFILE" ] && return 1
# Obtain the pid and check it against the binary name
pid=`cat $PIDFILE`
running_pid $pid $DAEMON || return 1
return 0
}
force_stop() {
# Forcefully kill the process
[ ! -f "$PIDFILE" ] && return
if running ; then
kill -15 $pid
# Is it really dead?
[ -n "$DODTIME" ] && sleep "$DODTIME"s
if running ; then
kill -9 $pid
[ -n "$DODTIME" ] && sleep "$DODTIME"s
if running ; then
echo "Cannot kill $LABEL (pid=$pid)!"
exit 1
fi
fi
fi
rm -f $PIDFILE
return 0
}
case "$1" in
start)
echo -n "Starting $DESC: "
start-stop-daemon --start --quiet --pidfile $PIDFILE \
--exec $DAEMON -- $DAEMON_OPTS
test -f $PIDFILE || sleep 1
if running ; then
echo "$NAME."
else
echo " ERROR."
fi
;;
stop)
echo -n "Stopping $DESC: "
start-stop-daemon --stop --quiet --oknodo --pidfile $PIDFILE
echo "$NAME."
;;
force-stop)
echo -n "Forcefully stopping $DESC: "
force_stop
if ! running ; then
echo "$NAME."
else
echo " ERROR."
fi
;;
#reload)
#
# If the daemon can reload its config files on the fly
# for example by sending it SIGHUP, do it here.
#
# If the daemon responds to changes in its config file
# directly anyway, make this a do-nothing entry.
#
# echo "Reloading $DESC configuration files."
# start-stop-daemon --stop --signal 1 --quiet --pidfile \
# /var/run/$NAME.pid --exec $DAEMON
#;;
force-reload)
#
# If the "reload" option is implemented, move the "force-reload"
# option to the "reload" entry above. If not, "force-reload" is
# just the same as "restart" except that it does nothing if the
# daemon isn't already running.
# check wether $DAEMON is running. If so, restart
start-stop-daemon --stop --test --quiet --pidfile \
/var/run/$NAME.pid --exec $DAEMON \
&& $0 restart \
|| exit 0
;;
restart)
echo -n "Restarting $DESC: "
start-stop-daemon --stop --quiet --pidfile \
/var/run/$NAME.pid --exec $DAEMON
[ -n "$DODTIME" ] && sleep $DODTIME
start-stop-daemon --start --quiet --pidfile \
/var/run/$NAME.pid --exec $DAEMON -- $DAEMON_OPTS
echo "$NAME."
;;
status)
echo -n "$LABEL is "
if running ; then
echo "running"
else
echo " not running."
exit 1
fi
;;
*)
N=/etc/init.d/$NAME
# echo "Usage: $N {start|stop|restart|reload|force-reload}" >&2
echo "Usage: $N {start|stop|restart|force-reload|status|force-stop}" >&2
exit 1
;;
esac
exit 0
""".lstrip()
| {
"content_hash": "7fdfab1f4fa6168eead75deaedefa272",
"timestamp": "",
"source": "github",
"line_count": 539,
"max_line_length": 106,
"avg_line_length": 34.534322820037104,
"alnum_prop": 0.624529923713334,
"repo_name": "waxkinetic/fabcloudkit",
"id": "aa422619cdd6b508fbf5bd8a20723be7d0bd0c7c",
"size": "18614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabcloudkit/tool/supervisord.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "103982"
}
],
"symlink_target": ""
} |
import operator
import six
from nova.openstack.common import jsonutils
from paxes_nova.scheduler import filters
class JsonFilter(filters.BaseHostFilter):
"""Host Filter to allow simple JSON-based grammar for
selecting hosts.
"""
def _op_compare(self, args, op):
"""Returns True if the specified operator can successfully
compare the first item in the args with all the rest. Will
return False if only one item is in the list.
"""
if len(args) < 2:
return False
if op is operator.contains:
bad = args[0] not in args[1:]
else:
bad = [arg for arg in args[1:]
if not op(args[0], arg)]
return not bool(bad)
def _equals(self, args):
"""First term is == all the other terms."""
return self._op_compare(args, operator.eq)
def _less_than(self, args):
"""First term is < all the other terms."""
return self._op_compare(args, operator.lt)
def _greater_than(self, args):
"""First term is > all the other terms."""
return self._op_compare(args, operator.gt)
def _in(self, args):
"""First term is in set of remaining terms."""
return self._op_compare(args, operator.contains)
def _less_than_equal(self, args):
"""First term is <= all the other terms."""
return self._op_compare(args, operator.le)
def _greater_than_equal(self, args):
"""First term is >= all the other terms."""
return self._op_compare(args, operator.ge)
def _not(self, args):
"""Flip each of the arguments."""
return [not arg for arg in args]
def _or(self, args):
"""True if any arg is True."""
return any(args)
def _and(self, args):
"""True if all args are True."""
return all(args)
commands = {
'=': _equals,
'<': _less_than,
'>': _greater_than,
'in': _in,
'<=': _less_than_equal,
'>=': _greater_than_equal,
'not': _not,
'or': _or,
'and': _and,
}
def _parse_string(self, string, host_state):
"""Strings prefixed with $ are capability lookups in the
form '$variable' where 'variable' is an attribute in the
HostState class. If $variable is a dictionary, you may
use: $variable.dictkey
"""
if not string:
return None
if not string.startswith("$"):
return string
path = string[1:].split(".")
obj = getattr(host_state, path[0], None)
if obj is None:
return None
for item in path[1:]:
obj = obj.get(item, None)
if obj is None:
return None
return obj
def _process_filter(self, query, host_state):
"""Recursively parse the query structure."""
if not query:
return True
cmd = query[0]
method = self.commands[cmd]
cooked_args = []
for arg in query[1:]:
if isinstance(arg, list):
arg = self._process_filter(arg, host_state)
elif isinstance(arg, six.string_types):
arg = self._parse_string(arg, host_state)
if arg is not None:
cooked_args.append(arg)
result = method(self, cooked_args)
return result
def host_passes(self, host_state, filter_properties):
"""Return a list of hosts that can fulfill the requirements
specified in the query.
"""
try:
query = filter_properties['scheduler_hints']['query']
except KeyError:
query = None
if not query:
return True
# NOTE(comstud): Not checking capabilities or service for
# enabled/disabled so that a provided json filter can decide
result = self._process_filter(jsonutils.loads(query), host_state)
if isinstance(result, list):
# If any succeeded, include the host
result = any(result)
if result:
# Filter it out.
return True
return False
| {
"content_hash": "0bd3c01400871c119cb1be65652a60da",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 73,
"avg_line_length": 31.32330827067669,
"alnum_prop": 0.5544887181949112,
"repo_name": "windskyer/k_nova",
"id": "1be8eeb17ce76816e0631bc67a4fb8744bda2c9e",
"size": "4807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "paxes_nova/scheduler/filters/json_filter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "371"
},
{
"name": "HTML",
"bytes": "2364"
},
{
"name": "JavaScript",
"bytes": "116320"
},
{
"name": "Python",
"bytes": "3193811"
},
{
"name": "Shell",
"bytes": "7129"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(name='cbtests',
version='0.2',
description='Simple tests for json api testing',
url='https://github.com/codebolab/cbtests',
author='code.bo',
author_email='contact@josezambrana.com',
license='MIT',
packages=find_packages(),
include_package_data=True,
package_data={'cbtests': ['fixtures/*.json']},
zip_safe=False)
| {
"content_hash": "a1fc3637aff97d06d50363c18f0a187a",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 54,
"avg_line_length": 32.69230769230769,
"alnum_prop": 0.6423529411764706,
"repo_name": "codebolab/cbtests",
"id": "3bfdf8cb1fa28a880cd548a8cb8792efc6905c1c",
"size": "425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6494"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.core import validators
from django.db import models, migrations
from django.utils import timezone
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '__first__'),
]
operations = [
migrations.CreateModel(
name='Permission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50, verbose_name='name')),
('content_type', models.ForeignKey(to='contenttypes.ContentType', to_field='id')),
('codename', models.CharField(max_length=100, verbose_name='codename')),
],
options={
'ordering': ('content_type__app_label', 'content_type__model', 'codename'),
'unique_together': set([('content_type', 'codename')]),
'verbose_name': 'permission',
'verbose_name_plural': 'permissions',
},
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=80, verbose_name='name')),
('permissions', models.ManyToManyField(to='auth.Permission', verbose_name='permissions', blank=True)),
],
options={
'verbose_name': 'group',
'verbose_name_plural': 'groups',
},
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, max_length=30, verbose_name='username', validators=[validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')])),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(to='auth.Group', verbose_name='groups', blank=True)),
('user_permissions', models.ManyToManyField(to='auth.Permission', verbose_name='user permissions', blank=True)),
],
options={
'swappable': 'AUTH_USER_MODEL',
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
),
]
| {
"content_hash": "d13325ab342d4d59446c37f4b4a516ac",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 277,
"avg_line_length": 57.13846153846154,
"alnum_prop": 0.5893914916532041,
"repo_name": "dhoffman34/django",
"id": "de38c960fbf60a21896b07bdf0ca978d1a717ba8",
"size": "3738",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "django/contrib/auth/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import pytest
from team.person import Person
def test_init_from_args():
person = Person(name="Theodore Burton Fox Ruoff", github="nobody")
assert person.name == "Theodore Burton Fox Ruoff"
assert person.github == "nobody"
def test_init_from_dict():
d = { "name": "Theodore Burton Fox Ruoff", "github": "nobody" }
person = Person(d)
assert person.name == "Theodore Burton Fox Ruoff"
assert person.github == "nobody"
| {
"content_hash": "9c8116e60a7aac04faa7b89fd5d67577",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 70,
"avg_line_length": 34.23076923076923,
"alnum_prop": 0.6786516853932584,
"repo_name": "LandRegistry/team-dashboard",
"id": "6ef10af94f2b4cfb67012a61fb7d9779c6f0c84e",
"size": "445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_person.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1580"
},
{
"name": "Python",
"bytes": "11682"
},
{
"name": "Shell",
"bytes": "429"
}
],
"symlink_target": ""
} |
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(name='zaphodIFU',
version='0.1',
description='Fake IFU generator for MaNGA spectral fitting diagnosis',
url='http://github.com/zpace/zaphod',
author='Zach Pace',
author_email='zpace@astro.wisc.edu',
license='MIT',
packages=['fake'],
install_requires=[
'numpy'],
include_package_data=True,
zip_safe=False,
test_suite='nose.collector',
tests_require=['nose', 'nose-cover3'])
| {
"content_hash": "87bdb41d55f577de61b9f4926f5ebcff",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 76,
"avg_line_length": 26.857142857142858,
"alnum_prop": 0.6081560283687943,
"repo_name": "zpace/zaphod",
"id": "546cb58fff380448b5089323a2ebc42012cb549e",
"size": "564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "134610"
},
{
"name": "Python",
"bytes": "5370"
}
],
"symlink_target": ""
} |
from fusion.openstack.common import gettextutils
gettextutils.install('fusion')
| {
"content_hash": "295c2bf408405cd9c131bbf2d797e9c9",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 48,
"avg_line_length": 27,
"alnum_prop": 0.8395061728395061,
"repo_name": "vineethtw/fusion",
"id": "9bd52036b66bb799b3aef0c4ee122ebdea8c448e",
"size": "751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fusion/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "253536"
}
],
"symlink_target": ""
} |
{
'name' : 'United Backend Theme for Odoo 9c',
'version' : '0.3',
'author' : 'Openworx',
'category' : 'Website',
'summary': 'Give Odoo 9 community version a cleaner backend theme',
'website' : 'http://www.openworx.nl',
'description': """
Give Odoo 9 community version a cleaner theme, based on Bootstrap United template. The theme has also some css fixes for Microsoft Internet Explorer 11.
""",
'images':[
'images/sales.png'
],
'depends' : ['base'],
'data':[
'views.xml',
],
'installable': True
}
| {
"content_hash": "34176faa4549ab75e86936116ce06168",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 152,
"avg_line_length": 30.210526315789473,
"alnum_prop": 0.5905923344947736,
"repo_name": "vileopratama/vitech",
"id": "05b488608cf0b9f7cae5388415009c5d66b4d928",
"size": "574",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/addons/united_backend_theme/__openerp__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import argparse
import json
import sys
from cgi import escape
from collections import defaultdict
import types
def html_escape(item, escape_quote=False):
if isinstance(item, types.StringTypes):
rv = escape(item)
if escape_quote:
rv = rv.replace('"', """)
return rv
else:
return item
class Raw(object):
"""Simple wrapper around a string to stop it being escaped by html_escape"""
def __init__(self, value):
self.value = value
def __unicode__(self):
return unicode(self.value)
class Node(object):
"""Node structure used when building HTML"""
def __init__(self, name, attrs, children):
#Need list of void elements
self.name = name
self.attrs = attrs
self.children = children
def __unicode__(self):
if self.attrs:
#Need to escape
attrs_unicode = " " + " ".join("%s=\"%s\"" % (html_escape(key),
html_escape(value,
escape_quote=True))
for key, value in self.attrs.items())
else:
attrs_unicode = ""
return "<%s%s>%s</%s>\n" % (self.name,
attrs_unicode,
"".join(unicode(html_escape(item))
for item in self.children),
self.name)
def __str__(self):
return unicode(self).encode("utf8")
class RootNode(object):
"""Special Node representing the document root"""
def __init__(self, *children):
self.children = ["<!DOCTYPE html>"] + list(children)
def __unicode__(self):
return "".join(unicode(item) for item in self.children)
def __str__(self):
return unicode(self).encode("utf8")
def flatten(iterable):
"""Flatten a list of lists by one level so that
[1,["abc"], "def",[2, [3]]]
becomes
[1, "abc", "def", 2, [3]]"""
rv = []
for item in iterable:
if hasattr(item, "__iter__") and not isinstance(item, types.StringTypes):
rv.extend(item)
else:
rv.append(item)
return rv
class HTML(object):
"""Simple HTML templating system. An instance of this class can create
element nodes by calling methods with the same name as the element,
passing in children as positional arguments or as a list, and attributes
as keyword arguments, with _ replacing - and trailing _ for python keywords
e.g.
h = HTML()
print(h.html(
html.head(),
html.body([html.h1("Hello World!")], class_="body-class")
))
Would give
<!DOCTYPE html><html><head></head><body class="body-class"><h1>Hello World!</h1></body></html>"""
def __getattr__(self, name):
def make_html(self, *content, **attrs):
for attr_name in attrs.keys():
if "_" in attr_name:
new_name = attr_name.replace("_", "-")
if new_name.endswith("-"):
new_name = new_name[:-1]
attrs[new_name] = attrs.pop(attr_name)
return Node(name, attrs, flatten(content))
method = types.MethodType(make_html, self, HTML)
setattr(self, name, method)
return method
def __call__(self, *children):
return RootNode(*flatten(children))
h = HTML()
class TestResult(object):
"""Simple holder for the results of a single test in a single UA"""
def __init__(self, test):
self.test = test
self.results = {}
def __cmp__(self, other):
return self.test == other.test
def __hash__(self):
return hash(self.test)
def load_data(args):
"""Load data treating args as a list of UA name, filename pairs"""
pairs = []
for i in xrange(0, len(args), 2):
pairs.append(args[i:i+2])
rv = {}
for UA, filename in pairs:
with open(filename) as f:
rv[UA] = json.load(f)
return rv
def test_id(id):
"""Convert a test id in JSON into an immutable object that
can be used as a dictionary key"""
if isinstance(id, list):
return tuple(id)
else:
return id
def all_tests(data):
tests = defaultdict(set)
for UA, results in data.items():
for result in results["results"]:
id = test_id(result["test"])
tests[id] |= set(subtest["name"] for subtest in result["subtests"])
return tests
def group_results(data):
"""Produce a list of UAs and a dictionary mapping specific tests to their
status in all UAs e.g.
["UA1", "UA2"], {"test_id":{"harness":{"UA1": (status1, message1),
"UA2": (status2, message2)},
"subtests":{"subtest1": "UA1": (status1-1, message1-1),
"UA2": (status2-1, message2-1)}}}
Status and message are None if the test didn't run in a particular UA.
Message is None if the test didn't produce a message"""
tests = all_tests(data)
UAs = data.keys()
def result():
return {
"harness": dict((UA, (None, None)) for UA in UAs),
"subtests": None # init this later
}
results_by_test = defaultdict(result)
for UA, results in data.items():
for test_data in results["results"]:
id = test_id(test_data["test"])
result = results_by_test[id]
if result["subtests"] is None:
result["subtests"] = dict(
(name, dict((UA, (None, None)) for UA in UAs)) for name in tests[id]
)
result["harness"][UA] = (test_data["status"], test_data["message"])
for subtest in test_data["subtests"]:
result["subtests"][subtest["name"]][UA] = (subtest["status"],
subtest["message"])
return UAs, results_by_test
def status_cell(status, message=None):
"""Produce a table cell showing the status of a test"""
status = status if status is not None else "NONE"
kwargs = {}
if message:
kwargs["title"] = message
status_text = status.title()
return h.td(status_text, class_="status " + status,
**kwargs)
def test_link(test_id, subtest=None):
"""Produce an <a> element linking to a test"""
if isinstance(test_id, types.StringTypes):
rv = [h.a(test_id, href=test_id)]
else:
rv = [h.a(test_id[0], href=test_id[0]),
" %s " % test_id[1],
h.a(test_id[2], href=test_id[2])]
if subtest is not None:
rv.append(" [%s]" % subtest)
return rv
def summary(UAs, results_by_test):
"""Render the implementation report summary"""
not_passing = []
for test, results in results_by_test.items():
if not any(item[0] in ("PASS", "OK") for item in results["harness"].values()):
not_passing.append((test, None))
for subtest_name, subtest_results in results["subtests"].items():
if not any(item[0] == "PASS" for item in subtest_results.values()):
not_passing.append((test, subtest_name))
if not_passing:
rv = [
h.p("The following tests failed to pass in all UAs:"),
h.ul([h.li(test_link(test, subtest))
for test, subtest in not_passing])
]
else:
rv = "All tests passed in at least one UA"
return rv
def result_rows(UAs, test, result):
"""Render the results for each test run"""
yield h.tr(
h.td(
test_link(test),
rowspan=(1 + len(result["subtests"]))
),
h.td(),
[status_cell(status, message)
for UA, (status, message) in sorted(result["harness"].items())],
class_="test"
)
for name, subtest_result in sorted(result["subtests"].items()):
yield h.tr(
h.td(name),
[status_cell(status, message)
for UA, (status, message) in sorted(subtest_result.items())],
class_="subtest"
)
def result_bodies(UAs, results_by_test):
return [h.tbody(result_rows(UAs, test, result))
for test, result in sorted(results_by_test.items())]
def generate_html(UAs, results_by_test):
"""Generate all the HTML output"""
return h(h.html(
h.head(
h.meta(charset="utf8"),
h.title("Implementation Report"),
h.link(href="report.css", rel="stylesheet")),
h.body(
h.h1("Implementation Report"),
h.h2("Summary"),
summary(UAs, results_by_test),
h.h2("Full Results"),
h.table(
h.thead(
h.tr(
h.th("Test"),
h.th("Subtest"),
[h.th(UA) for UA in sorted(UAs)])),
result_bodies(UAs, results_by_test)))))
def main(filenames):
data = load_data(filenames)
UAs, results_by_test = group_results(data)
return generate_html(UAs, results_by_test)
if __name__ == "__main__":
if not sys.argv[1:]:
print("""Please supply a list of UA name, filename pairs e.g.
python report.py Firefox firefox.json Chrome chrome.json IE internet_explorer.json""")
print(main(sys.argv[1:]))
| {
"content_hash": "5e43ec0e3424d35b80a52e16f18ad156",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 101,
"avg_line_length": 31.172638436482085,
"alnum_prop": 0.5344827586206896,
"repo_name": "youtube/cobalt",
"id": "6ef9fbd20e3f1c6c10beb7fa11e005e5659c8d7e",
"size": "9570",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "third_party/web_platform_tests/tools/runner/report.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from selenium.webdriver.common import by
from openstack_dashboard.test.integration_tests.pages import basepage
from openstack_dashboard.test.integration_tests.pages.settings import \
changepasswordpage
from openstack_dashboard.test.integration_tests.regions import forms
class UsersettingsPage(basepage.BaseNavigationPage):
DEFAULT_LANGUAGE = "en"
DEFAULT_TIMEZONE = "UTC"
DEFAULT_PAGESIZE = "20"
DEFAULT_LOGLINES = "35"
DEFAULT_SETTINGS = {
"language": DEFAULT_LANGUAGE,
"timezone": DEFAULT_TIMEZONE,
"pagesize": DEFAULT_PAGESIZE,
"loglines": DEFAULT_LOGLINES
}
SETTINGS_FORM_FIELDS = (
"language", "timezone", "pagesize", "instance_log_length")
_settings_form_locator = (by.By.ID, 'user_settings_modal')
_change_password_tab_locator = (by.By.CSS_SELECTOR,
'a[href*="/settings/password/"]')
def __init__(self, driver, conf):
super().__init__(driver, conf)
self._page_title = "User Settings"
@property
def settings_form(self):
src_elem = self._get_element(*self._settings_form_locator)
return forms.FormRegion(
self.driver, self.conf, src_elem=src_elem,
field_mappings=self.SETTINGS_FORM_FIELDS)
@property
def changepassword(self):
return changepasswordpage.ChangePasswordPage(self.driver, self.conf)
@property
def change_password_tab(self):
return self._get_element(*self._change_password_tab_locator)
def change_language(self, lang=DEFAULT_LANGUAGE):
self.settings_form.language.value = lang
self.settings_form.submit()
def change_timezone(self, timezone=DEFAULT_TIMEZONE):
self.settings_form.timezone.value = timezone
self.settings_form.submit()
def change_pagesize(self, size=DEFAULT_PAGESIZE):
self.settings_form.pagesize.value = size
self.settings_form.submit()
def change_loglines(self, lines=DEFAULT_LOGLINES):
self.settings_form.instance_log_length.value = lines
self.settings_form.submit()
def return_to_default_settings(self):
self.change_language()
self.change_timezone()
self.change_pagesize()
self.change_loglines()
def go_to_change_password_page(self):
self.change_password_tab.click()
return changepasswordpage.ChangePasswordPage(self.driver, self.conf)
| {
"content_hash": "9806775887ef99ffb26cf54d3de182f4",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 76,
"avg_line_length": 34.32394366197183,
"alnum_prop": 0.6680344686089454,
"repo_name": "ChameleonCloud/horizon",
"id": "e8412fb9a54e7195f6ffb6dacd125361fc6bb643",
"size": "3010",
"binary": false,
"copies": "2",
"ref": "refs/heads/chameleoncloud/xena",
"path": "openstack_dashboard/test/integration_tests/pages/settings/usersettingspage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601681"
},
{
"name": "JavaScript",
"bytes": "2486133"
},
{
"name": "Python",
"bytes": "5346021"
},
{
"name": "SCSS",
"bytes": "129668"
},
{
"name": "Shell",
"bytes": "7466"
}
],
"symlink_target": ""
} |
from django import http
from django.contrib import messages
from django.core.urlresolvers import reverse
from mox import IsA
from novaclient import exceptions as novaclient_exceptions
from horizon import api
from horizon import test
class AccessAndSecurityTests(test.BaseViewTests):
def setUp(self):
super(AccessAndSecurityTests, self).setUp()
keypair = api.KeyPair(None)
keypair.name = 'keyName'
self.keypairs = (keypair,)
server = api.Server(None, self.request)
server.id = 1
server.name = 'serverName'
self.server = server
self.servers = (server, )
floating_ip = api.FloatingIp(None)
floating_ip.id = 1
floating_ip.fixed_ip = '10.0.0.4'
floating_ip.instance_id = 1
floating_ip.ip = '58.58.58.58'
self.floating_ip = floating_ip
self.floating_ips = (floating_ip,)
security_group = api.SecurityGroup(None)
security_group.id = '1'
security_group.name = 'default'
self.security_groups = (security_group,)
def test_index(self):
self.mox.StubOutWithMock(api, 'tenant_floating_ip_list')
self.mox.StubOutWithMock(api, 'security_group_list')
self.mox.StubOutWithMock(api.nova, 'keypair_list')
api.nova.keypair_list(IsA(http.HttpRequest)).AndReturn(self.keypairs)
api.tenant_floating_ip_list(IsA(http.HttpRequest)).\
AndReturn(self.floating_ips)
api.security_group_list(IsA(http.HttpRequest)).\
AndReturn(self.security_groups)
self.mox.ReplayAll()
res = self.client.get(
reverse('horizon:nova:access_and_security:index'))
self.assertTemplateUsed(res, 'nova/access_and_security/index.html')
self.assertItemsEqual(res.context['keypairs_table'].data,
self.keypairs)
self.assertItemsEqual(res.context['security_groups_table'].data,
self.security_groups)
self.assertItemsEqual(res.context['floating_ips_table'].data,
self.floating_ips)
| {
"content_hash": "38b618de7e4278eb8a6399c98ed9eda1",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 79,
"avg_line_length": 36.43333333333333,
"alnum_prop": 0.6171088746569076,
"repo_name": "citrix-openstack/horizon",
"id": "3caf353f1fb00d01ba8e8e0df0b75af5565340a4",
"size": "2995",
"binary": false,
"copies": "2",
"ref": "refs/heads/everett",
"path": "horizon/horizon/dashboards/nova/access_and_security/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "184925"
},
{
"name": "Python",
"bytes": "655627"
},
{
"name": "Shell",
"bytes": "11106"
}
],
"symlink_target": ""
} |
"""Test suite for XenAPI."""
import ast
import base64
import contextlib
import copy
import functools
import os
import re
import uuid
import mock
from mox3 import mox
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
import six
import testtools
from nova.compute import api as compute_api
from nova.compute import arch
from nova.compute import hv_type
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import context
from nova import crypto
from nova import db
from nova import exception
from nova import objects
from nova.objects import base
from nova import test
from nova.tests.unit.db import fakes as db_fakes
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
from nova.tests.unit import fake_processutils
import nova.tests.unit.image.fake as fake_image
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_aggregate
from nova.tests.unit import utils as test_utils
from nova.tests.unit.virt.xenapi import stubs
from nova.virt import fake
from nova.virt.xenapi import agent
from nova.virt.xenapi.client import session as xenapi_session
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import host
from nova.virt.xenapi.image import glance
from nova.virt.xenapi import pool
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('network_manager', 'nova.service')
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('default_availability_zone', 'nova.availability_zones')
CONF.import_opt('login_timeout', 'nova.virt.xenapi.client.session',
group="xenserver")
IMAGE_MACHINE = '1'
IMAGE_KERNEL = '2'
IMAGE_RAMDISK = '3'
IMAGE_RAW = '4'
IMAGE_VHD = '5'
IMAGE_ISO = '6'
IMAGE_IPXE_ISO = '7'
IMAGE_FROM_VOLUME = '8'
IMAGE_FIXTURES = {
IMAGE_MACHINE: {
'image_meta': {'name': 'fakemachine', 'size': 0,
'disk_format': 'ami',
'container_format': 'ami',
'id': 'fake-image'},
},
IMAGE_KERNEL: {
'image_meta': {'name': 'fakekernel', 'size': 0,
'disk_format': 'aki',
'container_format': 'aki',
'id': 'fake-kernel'},
},
IMAGE_RAMDISK: {
'image_meta': {'name': 'fakeramdisk', 'size': 0,
'disk_format': 'ari',
'container_format': 'ari',
'id': 'fake-ramdisk'},
},
IMAGE_RAW: {
'image_meta': {'name': 'fakeraw', 'size': 0,
'disk_format': 'raw',
'container_format': 'bare',
'id': 'fake-image-raw'},
},
IMAGE_VHD: {
'image_meta': {'name': 'fakevhd', 'size': 0,
'disk_format': 'vhd',
'container_format': 'ovf',
'id': 'fake-image-vhd'},
},
IMAGE_ISO: {
'image_meta': {'name': 'fakeiso', 'size': 0,
'disk_format': 'iso',
'container_format': 'bare',
'id': 'fake-image-iso'},
},
IMAGE_IPXE_ISO: {
'image_meta': {'name': 'fake_ipxe_iso', 'size': 0,
'disk_format': 'iso',
'container_format': 'bare',
'id': 'fake-image-pxe',
'properties': {'ipxe_boot': 'true'}},
},
IMAGE_FROM_VOLUME: {
'image_meta': {'name': 'fake_ipxe_iso',
'id': 'fake-image-volume',
'properties': {'foo': 'bar'}},
},
}
def get_session():
return xenapi_session.XenAPISession('test_url', 'root', 'test_pass')
def set_image_fixtures():
image_service = fake_image.FakeImageService()
image_service.images.clear()
for image_id, image_meta in IMAGE_FIXTURES.items():
image_meta = image_meta['image_meta']
image_meta['id'] = image_id
image_service.create(None, image_meta)
def get_fake_device_info():
# FIXME: 'sr_uuid', 'introduce_sr_keys', sr_type and vdi_uuid
# can be removed from the dict when LP bug #1087308 is fixed
fake_vdi_ref = xenapi_fake.create_vdi('fake-vdi', None)
fake_vdi_uuid = xenapi_fake.get_record('VDI', fake_vdi_ref)['uuid']
fake = {'block_device_mapping':
[{'connection_info': {'driver_volume_type': 'iscsi',
'data': {'sr_uuid': 'falseSR',
'introduce_sr_keys': ['sr_type'],
'sr_type': 'iscsi',
'vdi_uuid': fake_vdi_uuid,
'target_discovered': False,
'target_iqn': 'foo_iqn:foo_volid',
'target_portal': 'localhost:3260',
'volume_id': 'foo_volid',
'target_lun': 1,
'auth_password': 'my-p@55w0rd',
'auth_username': 'johndoe',
'auth_method': u'CHAP'}, },
'mount_device': 'vda',
'delete_on_termination': False}, ],
'root_device_name': '/dev/sda',
'ephemerals': [],
'swap': None, }
return fake
def stub_vm_utils_with_vdi_attached_here(function):
"""vm_utils.with_vdi_attached_here needs to be stubbed out because it
calls down to the filesystem to attach a vdi. This provides a
decorator to handle that.
"""
@functools.wraps(function)
def decorated_function(self, *args, **kwargs):
@contextlib.contextmanager
def fake_vdi_attached_here(*args, **kwargs):
fake_dev = 'fakedev'
yield fake_dev
def fake_image_download(*args, **kwargs):
pass
orig_vdi_attached_here = vm_utils.vdi_attached_here
orig_image_download = fake_image._FakeImageService.download
try:
vm_utils.vdi_attached_here = fake_vdi_attached_here
fake_image._FakeImageService.download = fake_image_download
return function(self, *args, **kwargs)
finally:
fake_image._FakeImageService.download = orig_image_download
vm_utils.vdi_attached_here = orig_vdi_attached_here
return decorated_function
def create_instance_with_system_metadata(context, instance_values):
inst = objects.Instance(context=context,
system_metadata={})
for k, v in instance_values.items():
setattr(inst, k, v)
inst.flavor = objects.Flavor.get_by_id(context,
instance_values['instance_type_id'])
inst.old_flavor = None
inst.new_flavor = None
inst.create()
inst.pci_devices = objects.PciDeviceList(objects=[])
return inst
class XenAPIVolumeTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for Volume operations."""
def setUp(self):
super(XenAPIVolumeTestCase, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.instance = fake_instance.fake_db_instance(name='foo')
@classmethod
def _make_connection_info(cls):
target_iqn = 'iqn.2010-10.org.openstack:volume-00000001'
return {'driver_volume_type': 'iscsi',
'data': {'volume_id': 1,
'target_iqn': target_iqn,
'target_portal': '127.0.0.1:3260,fake',
'target_lun': None,
'auth_method': 'CHAP',
'auth_username': 'username',
'auth_password': 'password'}}
def test_attach_volume(self):
# This shows how to test Ops classes' methods.
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm = xenapi_fake.create_vm(self.instance['name'], 'Running')
conn_info = self._make_connection_info()
self.assertIsNone(
conn.attach_volume(None, conn_info, self.instance, '/dev/sdc'))
# check that the VM has a VBD attached to it
# Get XenAPI record for VBD
vbds = xenapi_fake.get_all('VBD')
vbd = xenapi_fake.get_record('VBD', vbds[0])
vm_ref = vbd['VM']
self.assertEqual(vm_ref, vm)
def test_attach_volume_raise_exception(self):
# This shows how to test when exceptions are raised.
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(self.instance['name'], 'Running')
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
None, {'driver_volume_type': 'nonexist'},
self.instance, '/dev/sdc')
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIVMTestCase(stubs.XenAPITestBase):
"""Unit tests for VM operations."""
def setUp(self):
super(XenAPIVMTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.network = importutils.import_object(CONF.network_manager)
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', 'fake_br1')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stub_out_vm_methods(self.stubs)
fake_processutils.stub_out_processutils_execute(self.stubs)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.conn._session.is_local_connection = False
fake_image.stub_out_image_service(self.stubs)
set_image_fixtures()
stubs.stubout_image_service_download(self.stubs)
stubs.stubout_stream_disk(self.stubs)
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
fake_inject_instance_metadata)
def fake_safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
name_label = "fakenamelabel"
disk_type = "fakedisktype"
virtual_size = 777
return vm_utils.create_vdi(
session, sr_ref, instance, name_label, disk_type,
virtual_size)
self.stubs.Set(vm_utils, '_safe_copy_vdi', fake_safe_copy_vdi)
def tearDown(self):
fake_image.FakeImageService_reset()
super(XenAPIVMTestCase, self).tearDown()
def test_init_host(self):
session = get_session()
vm = vm_utils._get_this_vm_ref(session)
# Local root disk
vdi0 = xenapi_fake.create_vdi('compute', None)
vbd0 = xenapi_fake.create_vbd(vm, vdi0)
# Instance VDI
vdi1 = xenapi_fake.create_vdi('instance-aaaa', None,
other_config={'nova_instance_uuid': 'aaaa'})
xenapi_fake.create_vbd(vm, vdi1)
# Only looks like instance VDI
vdi2 = xenapi_fake.create_vdi('instance-bbbb', None)
vbd2 = xenapi_fake.create_vbd(vm, vdi2)
self.conn.init_host(None)
self.assertEqual(set(xenapi_fake.get_all('VBD')), set([vbd0, vbd2]))
def test_instance_exists(self):
self.mox.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup(mox.IgnoreArg(), 'foo').AndReturn(True)
self.mox.ReplayAll()
self.stubs.Set(objects.Instance, 'name', 'foo')
instance = objects.Instance(uuid='fake-uuid')
self.assertTrue(self.conn.instance_exists(instance))
def test_instance_not_exists(self):
self.mox.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup(mox.IgnoreArg(), 'bar').AndReturn(None)
self.mox.ReplayAll()
self.stubs.Set(objects.Instance, 'name', 'bar')
instance = objects.Instance(uuid='fake-uuid')
self.assertFalse(self.conn.instance_exists(instance))
def test_list_instances_0(self):
instances = self.conn.list_instances()
self.assertEqual(instances, [])
def test_list_instance_uuids_0(self):
instance_uuids = self.conn.list_instance_uuids()
self.assertEqual(instance_uuids, [])
def test_list_instance_uuids(self):
uuids = []
for x in range(1, 4):
instance = self._create_instance()
uuids.append(instance['uuid'])
instance_uuids = self.conn.list_instance_uuids()
self.assertEqual(len(uuids), len(instance_uuids))
self.assertEqual(set(uuids), set(instance_uuids))
def test_get_rrd_server(self):
self.flags(connection_url='myscheme://myaddress/',
group='xenserver')
server_info = vm_utils._get_rrd_server()
self.assertEqual(server_info[0], 'myscheme')
self.assertEqual(server_info[1], 'myaddress')
expected_raw_diagnostics = {
'vbd_xvdb_write': '0.0',
'memory_target': '4294967296.0000',
'memory_internal_free': '1415564.0000',
'memory': '4294967296.0000',
'vbd_xvda_write': '0.0',
'cpu0': '0.0042',
'vif_0_tx': '287.4134',
'vbd_xvda_read': '0.0',
'vif_0_rx': '1816.0144',
'vif_2_rx': '0.0',
'vif_2_tx': '0.0',
'vbd_xvdb_read': '0.0',
'last_update': '1328795567',
}
def test_get_diagnostics(self):
def fake_get_rrd(host, vm_uuid):
path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(path, 'vm_rrd.xml')) as f:
return re.sub(r'\s', '', f.read())
self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
expected = self.expected_raw_diagnostics
instance = self._create_instance()
actual = self.conn.get_diagnostics(instance)
self.assertThat(actual, matchers.DictMatches(expected))
def test_get_instance_diagnostics(self):
def fake_get_rrd(host, vm_uuid):
path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(path, 'vm_rrd.xml')) as f:
return re.sub(r'\s', '', f.read())
self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
expected = {
'config_drive': False,
'state': 'running',
'driver': 'xenapi',
'version': '1.0',
'uptime': 0,
'hypervisor_os': None,
'cpu_details': [{'time': 0}, {'time': 0},
{'time': 0}, {'time': 0}],
'nic_details': [{'mac_address': '00:00:00:00:00:00',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 0,
'rx_packets': 0,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 0,
'read_requests': 0,
'write_bytes': 0,
'write_requests': 0}],
'memory_details': {'maximum': 8192, 'used': 0}}
instance = self._create_instance(obj=True)
actual = self.conn.get_instance_diagnostics(instance)
self.assertEqual(expected, actual.serialize())
def test_get_vnc_console(self):
instance = self._create_instance(obj=True)
session = get_session()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = vm_utils.lookup(session, instance['name'])
console = conn.get_vnc_console(self.context, instance)
# Note(sulo): We don't care about session id in test
# they will always differ so strip that out
actual_path = console.internal_access_path.split('&')[0]
expected_path = "/console?ref=%s" % str(vm_ref)
self.assertEqual(expected_path, actual_path)
def test_get_vnc_console_for_rescue(self):
instance = self._create_instance(obj=True)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue',
'Running')
# Set instance state to rescued
instance['vm_state'] = 'rescued'
console = conn.get_vnc_console(self.context, instance)
# Note(sulo): We don't care about session id in test
# they will always differ so strip that out
actual_path = console.internal_access_path.split('&')[0]
expected_path = "/console?ref=%s" % str(rescue_vm)
self.assertEqual(expected_path, actual_path)
def test_get_vnc_console_instance_not_ready(self):
instance = self._create_instance(obj=True, spawn=False)
instance.vm_state = 'building'
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InstanceNotFound,
conn.get_vnc_console, self.context, instance)
def test_get_vnc_console_rescue_not_ready(self):
instance = self._create_instance(obj=True, spawn=False)
instance.vm_state = 'rescued'
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InstanceNotReady,
conn.get_vnc_console, self.context, instance)
def test_instance_snapshot_fails_with_no_primary_vdi(self):
def create_bad_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=False,
osvol=False):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': 'fake',
'currently_attached': False}
vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
self.stubs.Set(vm_utils, 'create_vbd', create_bad_vbd)
stubs.stubout_instance_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
image_id = "my_snapshot_id"
self.assertRaises(exception.NovaException, self.conn.snapshot,
self.context, instance, image_id,
lambda *args, **kwargs: None)
def test_instance_snapshot(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
image_id = "my_snapshot_id"
stubs.stubout_instance_snapshot(self.stubs)
stubs.stubout_is_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
self.fake_upload_called = False
def fake_image_upload(_self, ctx, session, inst, img_id, vdi_uuids):
self.fake_upload_called = True
self.assertEqual(ctx, self.context)
self.assertEqual(inst, instance)
self.assertIsInstance(vdi_uuids, list)
self.assertEqual(img_id, image_id)
self.stubs.Set(glance.GlanceStore, 'upload_image',
fake_image_upload)
self.conn.snapshot(self.context, instance, image_id,
func_call_matcher.call)
# Ensure VM was torn down
vm_labels = []
for vm_ref in xenapi_fake.get_all('VM'):
vm_rec = xenapi_fake.get_record('VM', vm_ref)
if not vm_rec["is_control_domain"]:
vm_labels.append(vm_rec["name_label"])
self.assertEqual(vm_labels, [instance['name']])
# Ensure VBDs were torn down
vbd_labels = []
for vbd_ref in xenapi_fake.get_all('VBD'):
vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
vbd_labels.append(vbd_rec["vm_name_label"])
self.assertEqual(vbd_labels, [instance['name']])
# Ensure task states changed in correct order
self.assertIsNone(func_call_matcher.match())
# Ensure VDIs were torn down
for vdi_ref in xenapi_fake.get_all('VDI'):
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
name_label = vdi_rec["name_label"]
self.assertFalse(name_label.endswith('snapshot'))
self.assertTrue(self.fake_upload_called)
def create_vm_record(self, conn, os_type, name):
instances = conn.list_instances()
self.assertEqual(instances, [name])
# Get Nova record for VM
vm_info = conn.get_info({'name': name})
# Get XenAPI record for VM
vms = [rec for ref, rec
in six.iteritems(xenapi_fake.get_all_records('VM'))
if not rec['is_control_domain']]
vm = vms[0]
self.vm_info = vm_info
self.vm = vm
def check_vm_record(self, conn, instance_type_id, check_injection):
flavor = db.flavor_get(conn, instance_type_id)
mem_kib = int(flavor['memory_mb']) << 10
mem_bytes = str(mem_kib << 10)
vcpus = flavor['vcpus']
vcpu_weight = flavor['vcpu_weight']
self.assertEqual(self.vm_info.max_mem_kb, mem_kib)
self.assertEqual(self.vm_info.mem_kb, mem_kib)
self.assertEqual(self.vm['memory_static_max'], mem_bytes)
self.assertEqual(self.vm['memory_dynamic_max'], mem_bytes)
self.assertEqual(self.vm['memory_dynamic_min'], mem_bytes)
self.assertEqual(self.vm['VCPUs_max'], str(vcpus))
self.assertEqual(self.vm['VCPUs_at_startup'], str(vcpus))
if vcpu_weight is None:
self.assertEqual(self.vm['VCPUs_params'], {})
else:
self.assertEqual(self.vm['VCPUs_params'],
{'weight': str(vcpu_weight), 'cap': '0'})
# Check that the VM is running according to Nova
self.assertEqual(self.vm_info.state, power_state.RUNNING)
# Check that the VM is running according to XenAPI.
self.assertEqual(self.vm['power_state'], 'Running')
if check_injection:
xenstore_data = self.vm['xenstore_data']
self.assertNotIn('vm-data/hostname', xenstore_data)
key = 'vm-data/networking/DEADBEEF0001'
xenstore_value = xenstore_data[key]
tcpip_data = ast.literal_eval(xenstore_value)
self.assertJsonEqual({'broadcast': '192.168.1.255',
'dns': ['192.168.1.4', '192.168.1.3'],
'gateway': '192.168.1.1',
'gateway_v6': '2001:db8:0:1::1',
'ip6s': [{'enabled': '1',
'ip': '2001:db8:0:1:dcad:beff:feef:1',
'netmask': 64,
'gateway': '2001:db8:0:1::1'}],
'ips': [{'enabled': '1',
'ip': '192.168.1.100',
'netmask': '255.255.255.0',
'gateway': '192.168.1.1'},
{'enabled': '1',
'ip': '192.168.1.101',
'netmask': '255.255.255.0',
'gateway': '192.168.1.1'}],
'label': 'test1',
'mac': 'DE:AD:BE:EF:00:01'}, tcpip_data)
def check_vm_params_for_windows(self):
self.assertEqual(self.vm['platform']['nx'], 'true')
self.assertEqual(self.vm['HVM_boot_params'], {'order': 'dc'})
self.assertEqual(self.vm['HVM_boot_policy'], 'BIOS order')
# check that these are not set
self.assertEqual(self.vm['PV_args'], '')
self.assertEqual(self.vm['PV_bootloader'], '')
self.assertEqual(self.vm['PV_kernel'], '')
self.assertEqual(self.vm['PV_ramdisk'], '')
def check_vm_params_for_linux(self):
self.assertEqual(self.vm['platform']['nx'], 'false')
self.assertEqual(self.vm['PV_args'], '')
self.assertEqual(self.vm['PV_bootloader'], 'pygrub')
# check that these are not set
self.assertEqual(self.vm['PV_kernel'], '')
self.assertEqual(self.vm['PV_ramdisk'], '')
self.assertEqual(self.vm['HVM_boot_params'], {})
self.assertEqual(self.vm['HVM_boot_policy'], '')
def check_vm_params_for_linux_with_external_kernel(self):
self.assertEqual(self.vm['platform']['nx'], 'false')
self.assertEqual(self.vm['PV_args'], 'root=/dev/xvda1')
self.assertNotEqual(self.vm['PV_kernel'], '')
self.assertNotEqual(self.vm['PV_ramdisk'], '')
# check that these are not set
self.assertEqual(self.vm['HVM_boot_params'], {})
self.assertEqual(self.vm['HVM_boot_policy'], '')
def _list_vdis(self):
session = get_session()
return session.call_xenapi('VDI.get_all')
def _list_vms(self):
session = get_session()
return session.call_xenapi('VM.get_all')
def _check_vdis(self, start_list, end_list):
for vdi_ref in end_list:
if vdi_ref not in start_list:
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
# If the cache is turned on then the base disk will be
# there even after the cleanup
if 'other_config' in vdi_rec:
if 'image-id' not in vdi_rec['other_config']:
self.fail('Found unexpected VDI:%s' % vdi_ref)
else:
self.fail('Found unexpected VDI:%s' % vdi_ref)
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
hostname="test", architecture="x86-64", instance_id=1,
injected_files=None, check_injection=False,
create_record=True, empty_dns=False,
block_device_info=None,
key_data=None):
if injected_files is None:
injected_files = []
# Fake out inject_instance_metadata
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
fake_inject_instance_metadata)
if create_record:
instance = objects.Instance(context=self.context)
instance.project_id = self.project_id
instance.user_id = self.user_id
instance.image_ref = image_ref
instance.kernel_id = kernel_id
instance.ramdisk_id = ramdisk_id
instance.root_gb = 20
instance.ephemeral_gb = 0
instance.instance_type_id = instance_type_id
instance.os_type = os_type
instance.hostname = hostname
instance.key_data = key_data
instance.architecture = architecture
instance.system_metadata = {}
flavor = objects.Flavor.get_by_id(self.context,
instance_type_id)
if instance_type_id == 5:
# NOTE(danms): xenapi test stubs have flavor 5 with no
# vcpu_weight
flavor.vcpu_weight = None
instance.flavor = flavor
instance.create()
else:
instance = objects.Instance.get_by_id(self.context, instance_id,
expected_attrs=['flavor'])
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
if empty_dns:
# NOTE(tr3buchet): this is a terrible way to do this...
network_info[0]['network']['subnets'][0]['dns'] = []
image_meta = IMAGE_FIXTURES[image_ref]["image_meta"]
self.conn.spawn(self.context, instance, image_meta, injected_files,
'herp', network_info, block_device_info)
self.create_vm_record(self.conn, os_type, instance['name'])
self.check_vm_record(self.conn, instance_type_id, check_injection)
self.assertEqual(instance['os_type'], os_type)
self.assertEqual(instance['architecture'], architecture)
def test_spawn_ipxe_iso_success(self):
self.mox.StubOutWithMock(vm_utils, 'get_sr_path')
vm_utils.get_sr_path(mox.IgnoreArg()).AndReturn('/sr/path')
self.flags(ipxe_network_name='test1',
ipxe_boot_menu_url='http://boot.example.com',
ipxe_mkisofs_cmd='/root/mkisofs',
group='xenserver')
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.conn._session.call_plugin_serialized(
'ipxe', 'inject', '/sr/path', mox.IgnoreArg(),
'http://boot.example.com', '192.168.1.100', '255.255.255.0',
'192.168.1.1', '192.168.1.3', '/root/mkisofs')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_ipxe_iso_no_network_name(self):
self.flags(ipxe_network_name=None,
ipxe_boot_menu_url='http://boot.example.com',
group='xenserver')
# call_plugin_serialized shouldn't be called
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_ipxe_iso_no_boot_menu_url(self):
self.flags(ipxe_network_name='test1',
ipxe_boot_menu_url=None,
group='xenserver')
# call_plugin_serialized shouldn't be called
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_ipxe_iso_unknown_network_name(self):
self.flags(ipxe_network_name='test2',
ipxe_boot_menu_url='http://boot.example.com',
group='xenserver')
# call_plugin_serialized shouldn't be called
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_empty_dns(self):
# Test spawning with an empty dns list.
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
empty_dns=True)
self.check_vm_params_for_linux()
def test_spawn_not_enough_memory(self):
self.assertRaises(exception.InsufficientFreeMemory,
self._test_spawn,
'1', 2, 3, "4") # m1.xlarge
def test_spawn_fail_cleanup_1(self):
"""Simulates an error while downloading an image.
Verifies that the VM and VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
stubs.stubout_fetch_disk_image(self.stubs, raise_failure=True)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, '1', 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_fail_cleanup_2(self):
"""Simulates an error while creating VM record.
Verifies that the VM and VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
stubs.stubout_create_vm(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, '1', 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_fail_cleanup_3(self):
"""Simulates an error while attaching disks.
Verifies that the VM and VDIs created are properly cleaned up.
"""
stubs.stubout_attach_disks(self.stubs)
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, '1', 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_raw_glance(self):
self._test_spawn(IMAGE_RAW, None, None, os_type=None)
self.check_vm_params_for_windows()
def test_spawn_vhd_glance_linux(self):
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_windows(self):
self._test_spawn(IMAGE_VHD, None, None,
os_type="windows", architecture="i386",
instance_type_id=5)
self.check_vm_params_for_windows()
def test_spawn_iso_glance(self):
self._test_spawn(IMAGE_ISO, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_glance(self):
def fake_fetch_disk_image(context, session, instance, name_label,
image_id, image_type):
sr_ref = vm_utils.safe_find_sr(session)
image_type_str = vm_utils.ImageType.to_string(image_type)
vdi_ref = vm_utils.create_vdi(session, sr_ref, instance,
name_label, image_type_str, "20")
vdi_role = vm_utils.ImageType.get_role(image_type)
vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
return {vdi_role: dict(uuid=vdi_uuid, file=None)}
self.stubs.Set(vm_utils, '_fetch_disk_image',
fake_fetch_disk_image)
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK)
self.check_vm_params_for_linux_with_external_kernel()
def test_spawn_boot_from_volume_no_glance_image_meta(self):
dev_info = get_fake_device_info()
self._test_spawn(IMAGE_FROM_VOLUME, None, None,
block_device_info=dev_info)
def test_spawn_boot_from_volume_with_image_meta(self):
dev_info = get_fake_device_info()
self._test_spawn(IMAGE_VHD, None, None,
block_device_info=dev_info)
@testtools.skipIf(test_utils.is_osx(),
'IPv6 pretty-printing broken on OSX, see bug 1409135')
def test_spawn_netinject_file(self):
self.flags(flat_injected=True)
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _tee_handler(cmd, **kwargs):
actual = kwargs.get('process_input', None)
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether DE:AD:BE:EF:00:01
address 192.168.1.100
netmask 255.255.255.0
broadcast 192.168.1.255
gateway 192.168.1.1
dns-nameservers 192.168.1.3 192.168.1.4
iface eth0 inet6 static
hwaddress ether DE:AD:BE:EF:00:01
address 2001:db8:0:1:dcad:beff:feef:1
netmask 64
gateway 2001:db8:0:1::1
"""
self.assertEqual(expected, actual)
self._tee_executed = True
return '', ''
def _readlink_handler(cmd_parts, **kwargs):
return os.path.realpath(cmd_parts[2]), ''
fake_processutils.fake_execute_set_repliers([
# Capture the tee .../etc/network/interfaces command
(r'tee.*interfaces', _tee_handler),
(r'readlink -nm.*', _readlink_handler),
])
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK,
check_injection=True)
self.assertTrue(self._tee_executed)
@testtools.skipIf(test_utils.is_osx(),
'IPv6 pretty-printing broken on OSX, see bug 1409135')
def test_spawn_netinject_xenstore(self):
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
# When mounting, create real files under the mountpoint to simulate
# files in the mounted filesystem
# mount point will be the last item of the command list
self._tmpdir = cmd[len(cmd) - 1]
LOG.debug('Creating files in %s to simulate guest agent',
self._tmpdir)
os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
# Touch the file using open
open(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'), 'w').close()
return '', ''
def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
# Umount would normally make files in the mounted filesystem
# disappear, so do that here
LOG.debug('Removing simulated guest agent files in %s',
self._tmpdir)
os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'))
os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
os.rmdir(os.path.join(self._tmpdir, 'usr'))
return '', ''
def _tee_handler(cmd, *ignore_args, **ignore_kwargs):
self._tee_executed = True
return '', ''
fake_processutils.fake_execute_set_repliers([
(r'mount', _mount_handler),
(r'umount', _umount_handler),
(r'tee.*interfaces', _tee_handler)])
self._test_spawn('1', 2, 3, check_injection=True)
# tee must not run in this case, where an injection-capable
# guest agent is detected
self.assertFalse(self._tee_executed)
def test_spawn_injects_auto_disk_config_to_xenstore(self):
instance = self._create_instance(spawn=False, obj=True)
self.mox.StubOutWithMock(self.conn._vmops, '_inject_auto_disk_config')
self.conn._vmops._inject_auto_disk_config(instance, mox.IgnoreArg())
self.mox.ReplayAll()
self.conn.spawn(self.context, instance,
IMAGE_FIXTURES['1']["image_meta"], [], 'herp', '')
def test_spawn_vlanmanager(self):
self.flags(network_manager='nova.network.manager.VlanManager',
vlan_interface='fake0')
def dummy(*args, **kwargs):
pass
self.stubs.Set(vmops.VMOps, '_create_vifs', dummy)
# Reset network table
xenapi_fake.reset_table('network')
# Instance 2 will use vlan network (see db/fakes.py)
ctxt = self.context.elevated()
inst2 = self._create_instance(False, obj=True)
networks = self.network.db.network_get_all(ctxt)
with mock.patch('nova.objects.network.Network._from_db_object'):
for network in networks:
self.network.set_network_host(ctxt, network)
self.network.allocate_for_instance(ctxt,
instance_id=inst2.id,
instance_uuid=inst2.uuid,
host=CONF.host,
vpn=None,
rxtx_factor=3,
project_id=self.project_id,
macs=None)
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK,
instance_id=inst2.id,
create_record=False)
# TODO(salvatore-orlando): a complete test here would require
# a check for making sure the bridge for the VM's VIF is
# consistent with bridge specified in nova db
def test_spawn_with_network_qos(self):
self._create_instance()
for vif_ref in xenapi_fake.get_all('VIF'):
vif_rec = xenapi_fake.get_record('VIF', vif_ref)
self.assertEqual(vif_rec['qos_algorithm_type'], 'ratelimit')
self.assertEqual(vif_rec['qos_algorithm_params']['kbps'],
str(3 * 10 * 1024))
def test_spawn_ssh_key_injection(self):
# Test spawning with key_data on an instance. Should use
# agent file injection.
self.flags(use_agent_default=True,
group='xenserver')
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
def fake_encrypt_text(sshkey, new_pass):
self.assertEqual("ssh-rsa fake_keydata", sshkey)
return "fake"
self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
expected_data = ('\n# The following ssh key was injected by '
'Nova\nssh-rsa fake_keydata\n')
injected_files = [('/root/.ssh/authorized_keys', expected_data)]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
key_data='ssh-rsa fake_keydata')
self.assertEqual(actual_injected_files, injected_files)
def test_spawn_ssh_key_injection_non_rsa(self):
# Test spawning with key_data on an instance. Should use
# agent file injection.
self.flags(use_agent_default=True,
group='xenserver')
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
def fake_encrypt_text(sshkey, new_pass):
raise NotImplementedError("Should not be called")
self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
expected_data = ('\n# The following ssh key was injected by '
'Nova\nssh-dsa fake_keydata\n')
injected_files = [('/root/.ssh/authorized_keys', expected_data)]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
key_data='ssh-dsa fake_keydata')
self.assertEqual(actual_injected_files, injected_files)
def test_spawn_injected_files(self):
# Test spawning with injected_files.
self.flags(use_agent_default=True,
group='xenserver')
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
injected_files = [('/tmp/foo', 'foobar')]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
injected_files=injected_files)
self.check_vm_params_for_linux()
self.assertEqual(actual_injected_files, injected_files)
@mock.patch('nova.db.agent_build_get_by_triple')
def test_spawn_agent_upgrade(self, mock_get):
self.flags(use_agent_default=True,
group='xenserver')
mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64",
"hypervisor": "xen", "os": "windows",
"url": "url", "md5hash": "asdf",
'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': False,
'id': 1}
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
@mock.patch('nova.db.agent_build_get_by_triple')
def test_spawn_agent_upgrade_fails_silently(self, mock_get):
mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64",
"hypervisor": "xen", "os": "windows",
"url": "url", "md5hash": "asdf",
'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': False,
'id': 1}
self._test_spawn_fails_silently_with(exception.AgentError,
method="_plugin_agent_agentupdate", failure="fake_error")
def test_spawn_with_resetnetwork_alternative_returncode(self):
self.flags(use_agent_default=True,
group='xenserver')
def fake_resetnetwork(self, method, args):
fake_resetnetwork.called = True
# NOTE(johngarbutt): as returned by FreeBSD and Gentoo
return jsonutils.dumps({'returncode': '500',
'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_resetnetwork', fake_resetnetwork)
fake_resetnetwork.called = False
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.assertTrue(fake_resetnetwork.called)
def _test_spawn_fails_silently_with(self, expected_exception_cls,
method="_plugin_agent_version",
failure=None, value=None):
self.flags(use_agent_default=True,
agent_version_timeout=0,
group='xenserver')
def fake_agent_call(self, method, args):
if failure:
raise xenapi_fake.Failure([failure])
else:
return value
self.stubs.Set(stubs.FakeSessionForVMTests,
method, fake_agent_call)
called = {}
def fake_add_instance_fault(*args, **kwargs):
called["fake_add_instance_fault"] = args[2]
self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
fake_add_instance_fault)
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
actual_exception = called["fake_add_instance_fault"]
self.assertIsInstance(actual_exception, expected_exception_cls)
def test_spawn_fails_silently_with_agent_timeout(self):
self._test_spawn_fails_silently_with(exception.AgentTimeout,
failure="TIMEOUT:fake")
def test_spawn_fails_silently_with_agent_not_implemented(self):
self._test_spawn_fails_silently_with(exception.AgentNotImplemented,
failure="NOT IMPLEMENTED:fake")
def test_spawn_fails_silently_with_agent_error(self):
self._test_spawn_fails_silently_with(exception.AgentError,
failure="fake_error")
def test_spawn_fails_silently_with_agent_bad_return(self):
error = jsonutils.dumps({'returncode': -1, 'message': 'fake'})
self._test_spawn_fails_silently_with(exception.AgentError,
value=error)
def test_spawn_sets_last_dom_id(self):
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.assertEqual(self.vm['domid'],
self.vm['other_config']['last_dom_id'])
def test_rescue(self):
instance = self._create_instance(spawn=False, obj=True)
xenapi_fake.create_vm(instance['name'], 'Running')
session = get_session()
vm_ref = vm_utils.lookup(session, instance['name'])
swap_vdi_ref = xenapi_fake.create_vdi('swap', None)
root_vdi_ref = xenapi_fake.create_vdi('root', None)
eph1_vdi_ref = xenapi_fake.create_vdi('eph', None)
eph2_vdi_ref = xenapi_fake.create_vdi('eph', None)
vol_vdi_ref = xenapi_fake.create_vdi('volume', None)
xenapi_fake.create_vbd(vm_ref, swap_vdi_ref, userdevice=2)
xenapi_fake.create_vbd(vm_ref, root_vdi_ref, userdevice=0)
xenapi_fake.create_vbd(vm_ref, eph1_vdi_ref, userdevice=4)
xenapi_fake.create_vbd(vm_ref, eph2_vdi_ref, userdevice=5)
xenapi_fake.create_vbd(vm_ref, vol_vdi_ref, userdevice=6,
other_config={'osvol': True})
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
image_meta = {'id': IMAGE_VHD,
'disk_format': 'vhd',
'properties': {'vm_mode': 'xen'}}
conn.rescue(self.context, instance, [], image_meta, '')
vm = xenapi_fake.get_record('VM', vm_ref)
rescue_name = "%s-rescue" % vm["name_label"]
rescue_ref = vm_utils.lookup(session, rescue_name)
rescue_vm = xenapi_fake.get_record('VM', rescue_ref)
vdi_refs = {}
for vbd_ref in rescue_vm['VBDs']:
vbd = xenapi_fake.get_record('VBD', vbd_ref)
vdi_refs[vbd['VDI']] = vbd['userdevice']
self.assertEqual('1', vdi_refs[root_vdi_ref])
self.assertEqual('2', vdi_refs[swap_vdi_ref])
self.assertEqual('4', vdi_refs[eph1_vdi_ref])
self.assertEqual('5', vdi_refs[eph2_vdi_ref])
self.assertNotIn(vol_vdi_ref, vdi_refs)
def test_rescue_preserve_disk_on_failure(self):
# test that the original disk is preserved if rescue setup fails
# bug #1227898
instance = self._create_instance(obj=True)
session = get_session()
image_meta = {'id': IMAGE_VHD,
'disk_format': 'vhd',
'properties': {'vm_mode': 'xen'}}
vm_ref = vm_utils.lookup(session, instance['name'])
vdi_ref, vdi_rec = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
# raise an error in the spawn setup process and trigger the
# undo manager logic:
def fake_start(*args, **kwargs):
raise test.TestingException('Start Error')
self.stubs.Set(self.conn._vmops, '_start', fake_start)
self.assertRaises(test.TestingException, self.conn.rescue,
self.context, instance, [], image_meta, '')
# confirm original disk still exists:
vdi_ref2, vdi_rec2 = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
self.assertEqual(vdi_ref, vdi_ref2)
self.assertEqual(vdi_rec['uuid'], vdi_rec2['uuid'])
def test_unrescue(self):
instance = self._create_instance(obj=True)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Unrescue expects the original instance to be powered off
conn.power_off(instance)
xenapi_fake.create_vm(instance['name'] + '-rescue', 'Running')
conn.unrescue(instance, None)
def test_unrescue_not_in_rescue(self):
instance = self._create_instance(obj=True)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Ensure that it will not unrescue a non-rescued instance.
self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue,
instance, None)
def test_finish_revert_migration(self):
instance = self._create_instance()
class VMOpsMock(object):
def __init__(self):
self.finish_revert_migration_called = False
def finish_revert_migration(self, context, instance, block_info,
power_on):
self.finish_revert_migration_called = True
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn._vmops = VMOpsMock()
conn.finish_revert_migration(self.context, instance, None)
self.assertTrue(conn._vmops.finish_revert_migration_called)
def test_reboot_hard(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.reboot(self.context, instance, None, "HARD")
def test_poll_rebooting_instances(self):
self.mox.StubOutWithMock(compute_api.API, 'reboot')
compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
instance = self._create_instance()
instances = [instance]
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.poll_rebooting_instances(60, instances)
def test_reboot_soft(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.reboot(self.context, instance, None, "SOFT")
def test_reboot_halted(self):
session = get_session()
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance['name'], 'Halted')
conn.reboot(self.context, instance, None, "SOFT")
vm_ref = vm_utils.lookup(session, instance['name'])
vm = xenapi_fake.get_record('VM', vm_ref)
self.assertEqual(vm['power_state'], 'Running')
def test_reboot_unknown_state(self):
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance['name'], 'Unknown')
self.assertRaises(xenapi_fake.Failure, conn.reboot, self.context,
instance, None, "SOFT")
def test_reboot_rescued(self):
instance = self._create_instance()
instance['vm_state'] = vm_states.RESCUED
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
real_result = vm_utils.lookup(conn._session, instance['name'])
self.mox.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup(conn._session, instance['name'],
True).AndReturn(real_result)
self.mox.ReplayAll()
conn.reboot(self.context, instance, None, "SOFT")
def test_get_console_output_succeeds(self):
def fake_get_console_output(instance):
self.assertEqual("instance", instance)
return "console_log"
self.stubs.Set(self.conn._vmops, 'get_console_output',
fake_get_console_output)
self.assertEqual(self.conn.get_console_output('context', "instance"),
"console_log")
def _test_maintenance_mode(self, find_host, find_aggregate):
real_call_xenapi = self.conn._session.call_xenapi
instance = self._create_instance(spawn=True)
api_calls = {}
# Record all the xenapi calls, and return a fake list of hosts
# for the host.get_all call
def fake_call_xenapi(method, *args):
api_calls[method] = args
if method == 'host.get_all':
return ['foo', 'bar', 'baz']
return real_call_xenapi(method, *args)
self.stubs.Set(self.conn._session, 'call_xenapi', fake_call_xenapi)
def fake_aggregate_get(context, host, key):
if find_aggregate:
return [test_aggregate.fake_aggregate]
else:
return []
self.stubs.Set(db, 'aggregate_get_by_host',
fake_aggregate_get)
def fake_host_find(context, session, src, dst):
if find_host:
return 'bar'
else:
raise exception.NoValidHost("I saw this one coming...")
self.stubs.Set(host, '_host_find', fake_host_find)
result = self.conn.host_maintenance_mode('bar', 'on_maintenance')
self.assertEqual(result, 'on_maintenance')
# We expect the VM.pool_migrate call to have been called to
# migrate our instance to the 'bar' host
vm_ref = vm_utils.lookup(self.conn._session, instance['name'])
host_ref = "foo"
expected = (vm_ref, host_ref, {"live": "true"})
self.assertEqual(api_calls.get('VM.pool_migrate'), expected)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
self.assertEqual(instance['task_state'], task_states.MIGRATING)
def test_maintenance_mode(self):
self._test_maintenance_mode(True, True)
def test_maintenance_mode_no_host(self):
self.assertRaises(exception.NoValidHost,
self._test_maintenance_mode, False, True)
def test_maintenance_mode_no_aggregate(self):
self.assertRaises(exception.NotFound,
self._test_maintenance_mode, True, False)
def test_uuid_find(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
fake_inst = fake_instance.fake_db_instance(id=123)
fake_inst2 = fake_instance.fake_db_instance(id=456)
db.instance_get_all_by_host(self.context, fake_inst['host'],
columns_to_join=None,
use_slave=False
).AndReturn([fake_inst, fake_inst2])
self.mox.ReplayAll()
expected_name = CONF.instance_name_template % fake_inst['id']
inst_uuid = host._uuid_find(self.context, fake_inst['host'],
expected_name)
self.assertEqual(inst_uuid, fake_inst['uuid'])
def test_session_virtapi(self):
was = {'called': False}
def fake_aggregate_get_by_host(self, *args, **kwargs):
was['called'] = True
raise test.TestingException()
self.stubs.Set(db, "aggregate_get_by_host",
fake_aggregate_get_by_host)
self.stubs.Set(self.conn._session, "is_slave", True)
self.assertRaises(test.TestingException,
self.conn._session._get_host_uuid)
self.assertTrue(was['called'])
def test_session_handles_aggregate_metadata(self):
def fake_aggregate_get(context, host, key):
agg = copy.copy(test_aggregate.fake_aggregate)
agg['metadetails'][CONF.host] = 'this_should_be_metadata'
return [agg]
self.stubs.Set(db, 'aggregate_get_by_host',
fake_aggregate_get)
self.stubs.Set(self.conn._session, "is_slave", True)
self.assertEqual('this_should_be_metadata',
self.conn._session._get_host_uuid())
def test_per_instance_usage_running(self):
instance = self._create_instance(spawn=True)
flavor = objects.Flavor.get_by_id(self.context, 3)
expected = {instance['uuid']: {'memory_mb': flavor['memory_mb'],
'uuid': instance['uuid']}}
actual = self.conn.get_per_instance_usage()
self.assertEqual(expected, actual)
# Paused instances still consume resources:
self.conn.pause(instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual(expected, actual)
def test_per_instance_usage_suspended(self):
# Suspended instances do not consume memory:
instance = self._create_instance(spawn=True)
self.conn.suspend(self.context, instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual({}, actual)
def test_per_instance_usage_halted(self):
instance = self._create_instance(spawn=True, obj=True)
self.conn.power_off(instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual({}, actual)
def _create_instance(self, spawn=True, obj=False, **attrs):
"""Creates and spawns a test instance."""
instance_values = {
'uuid': str(uuid.uuid4()),
'display_name': 'host-',
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'vm_mode': 'hvm',
'architecture': 'x86-64'}
instance_values.update(attrs)
instance = create_instance_with_system_metadata(self.context,
instance_values)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
image_meta = {'id': IMAGE_VHD,
'disk_format': 'vhd'}
if spawn:
self.conn.spawn(self.context, instance, image_meta, [], 'herp',
network_info)
if obj:
return instance
return base.obj_to_primitive(instance)
def test_destroy_clean_up_kernel_and_ramdisk(self):
def fake_lookup_kernel_ramdisk(session, vm_ref):
return "kernel", "ramdisk"
self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
fake_lookup_kernel_ramdisk)
def fake_destroy_kernel_ramdisk(session, instance, kernel, ramdisk):
fake_destroy_kernel_ramdisk.called = True
self.assertEqual("kernel", kernel)
self.assertEqual("ramdisk", ramdisk)
fake_destroy_kernel_ramdisk.called = False
self.stubs.Set(vm_utils, "destroy_kernel_ramdisk",
fake_destroy_kernel_ramdisk)
instance = self._create_instance(spawn=True, obj=True)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
self.conn.destroy(self.context, instance, network_info)
vm_ref = vm_utils.lookup(self.conn._session, instance['name'])
self.assertIsNone(vm_ref)
self.assertTrue(fake_destroy_kernel_ramdisk.called)
class XenAPIDiffieHellmanTestCase(test.NoDBTestCase):
"""Unit tests for Diffie-Hellman code."""
def setUp(self):
super(XenAPIDiffieHellmanTestCase, self).setUp()
self.alice = agent.SimpleDH()
self.bob = agent.SimpleDH()
def test_shared(self):
alice_pub = self.alice.get_public()
bob_pub = self.bob.get_public()
alice_shared = self.alice.compute_shared(bob_pub)
bob_shared = self.bob.compute_shared(alice_pub)
self.assertEqual(alice_shared, bob_shared)
def _test_encryption(self, message):
enc = self.alice.encrypt(message)
self.assertFalse(enc.endswith('\n'))
dec = self.bob.decrypt(enc)
self.assertEqual(dec, message)
def test_encrypt_simple_message(self):
self._test_encryption('This is a simple message.')
def test_encrypt_message_with_newlines_at_end(self):
self._test_encryption('This message has a newline at the end.\n')
def test_encrypt_many_newlines_at_end(self):
self._test_encryption('Message with lotsa newlines.\n\n\n')
def test_encrypt_newlines_inside_message(self):
self._test_encryption('Message\nwith\ninterior\nnewlines.')
def test_encrypt_with_leading_newlines(self):
self._test_encryption('\n\nMessage with leading newlines.')
def test_encrypt_really_long_message(self):
self._test_encryption(''.join(['abcd' for i in range(1024)]))
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIMigrateInstance(stubs.XenAPITestBase):
"""Unit test for verifying migration-related actions."""
REQUIRES_LOCKING = True
def setUp(self):
super(XenAPIMigrateInstance, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', 'fake_br1')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.instance_values = {
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': None,
'ramdisk_id': None,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
migration_values = {
'source_compute': 'nova-compute',
'dest_compute': 'nova-compute',
'dest_host': '10.127.5.114',
'status': 'post-migrating',
'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7',
'old_instance_type_id': 5,
'new_instance_type_id': 1
}
self.migration = db.migration_create(
context.get_admin_context(), migration_values)
fake_processutils.stub_out_processutils_execute(self.stubs)
stubs.stub_out_migration_methods(self.stubs)
stubs.stubout_get_this_vm_uuid(self.stubs)
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
fake_inject_instance_metadata)
def test_migrate_disk_and_power_off(self):
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=80,
ephemeral_gb=0)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = vm_utils.lookup(conn._session, instance['name'])
self.mox.StubOutWithMock(volume_utils, 'is_booted_from_volume')
volume_utils.is_booted_from_volume(conn._session, vm_ref)
self.mox.ReplayAll()
conn.migrate_disk_and_power_off(self.context, instance,
'127.0.0.1', flavor, None)
def test_migrate_disk_and_power_off_passes_exceptions(self):
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=80,
ephemeral_gb=0)
def fake_raise(*args, **kwargs):
raise exception.MigrationError(reason='test failure')
self.stubs.Set(vmops.VMOps, "_migrate_disk_resizing_up", fake_raise)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', flavor, None)
def test_migrate_disk_and_power_off_throws_on_zero_gb_resize_down(self):
instance = db.instance_create(self.context, self.instance_values)
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=0,
ephemeral_gb=0)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ResizeError,
conn.migrate_disk_and_power_off,
self.context, instance,
'fake_dest', flavor, None)
def test_migrate_disk_and_power_off_with_zero_gb_old_and_new_works(self):
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=0,
ephemeral_gb=0)
values = copy.copy(self.instance_values)
values["root_gb"] = 0
values["ephemeral_gb"] = 0
instance = db.instance_create(self.context, values)
xenapi_fake.create_vm(instance['name'], 'Running')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = vm_utils.lookup(conn._session, instance['name'])
self.mox.StubOutWithMock(volume_utils, 'is_booted_from_volume')
volume_utils.is_booted_from_volume(conn._session, vm_ref)
self.mox.ReplayAll()
conn.migrate_disk_and_power_off(self.context, instance,
'127.0.0.1', flavor, None)
def _test_revert_migrate(self, power_on):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
self.called = False
self.fake_vm_start_called = False
self.fake_finish_revert_migration_called = False
context = 'fake_context'
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
def fake_finish_revert_migration(*args, **kwargs):
self.fake_finish_revert_migration_called = True
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(vmops.VMOps, 'finish_revert_migration',
fake_finish_revert_migration)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(4, 0, 0),
product_brand='XenServer')
self.mox.StubOutWithMock(volume_utils, 'is_booted_from_volume')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
base = xenapi_fake.create_vdi('hurr', 'fake')
base_uuid = xenapi_fake.get_record('VDI', base)['uuid']
cow = xenapi_fake.create_vdi('durr', 'fake')
cow_uuid = xenapi_fake.get_record('VDI', cow)['uuid']
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy=base_uuid, cow=cow_uuid),
network_info, image_meta, resize_instance=True,
block_device_info=None, power_on=power_on)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, power_on)
conn.finish_revert_migration(context, instance, network_info)
self.assertEqual(self.fake_finish_revert_migration_called, True)
def test_revert_migrate_power_on(self):
self._test_revert_migrate(True)
def test_revert_migrate_power_off(self):
self._test_revert_migrate(False)
def _test_finish_migrate(self, power_on):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
self.called = False
self.fake_vm_start_called = False
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(4, 0, 0),
product_brand='XenServer')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True,
block_device_info=None, power_on=power_on)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, power_on)
def test_finish_migrate_power_on(self):
self._test_finish_migrate(True)
def test_finish_migrate_power_off(self):
self._test_finish_migrate(False)
def test_finish_migrate_no_local_storage(self):
values = copy.copy(self.instance_values)
values["root_gb"] = 0
values["ephemeral_gb"] = 0
instance = create_instance_with_system_metadata(self.context, values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
def test_finish_migrate_no_resize_vdi(self):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
# Resize instance would be determined by the compute call
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=False)
@stub_vm_utils_with_vdi_attached_here
def test_migrate_too_many_partitions_no_resize_down(self):
instance_values = self.instance_values
instance = db.instance_create(self.context, instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = db.flavor_get_by_name(self.context, 'm1.small')
flavor = fake_flavor.fake_flavor_obj(self.context, **flavor)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_partitions(partition):
return [(1, 2, 3, 4, "", ""), (1, 2, 3, 4, "", "")]
self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
self.assertRaises(exception.InstanceFaultRollback,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', flavor, None)
@stub_vm_utils_with_vdi_attached_here
def test_migrate_bad_fs_type_no_resize_down(self):
instance_values = self.instance_values
instance = db.instance_create(self.context, instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = db.flavor_get_by_name(self.context, 'm1.small')
flavor = fake_flavor.fake_flavor_obj(self.context, **flavor)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_partitions(partition):
return [(1, 2, 3, "ext2", "", "boot")]
self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
self.assertRaises(exception.InstanceFaultRollback,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', flavor, None)
def test_migrate_rollback_when_resize_down_fs_fails(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
self.mox.StubOutWithMock(vmops, '_resize_ensure_vm_is_shutdown')
self.mox.StubOutWithMock(vmops, '_apply_orig_vm_name_label')
self.mox.StubOutWithMock(vm_utils, 'resize_disk')
self.mox.StubOutWithMock(vm_utils, 'migrate_vhd')
self.mox.StubOutWithMock(vm_utils, 'destroy_vdi')
self.mox.StubOutWithMock(vm_utils, 'get_vdi_for_vm_safely')
self.mox.StubOutWithMock(vmops, '_restore_orig_vm_and_cleanup_orphan')
instance = objects.Instance(context=self.context,
auto_disk_config=True, uuid='uuid')
instance.obj_reset_changes()
vm_ref = "vm_ref"
dest = "dest"
flavor = "type"
sr_path = "sr_path"
vmops._resize_ensure_vm_is_shutdown(instance, vm_ref)
vmops._apply_orig_vm_name_label(instance, vm_ref)
old_vdi_ref = "old_ref"
vm_utils.get_vdi_for_vm_safely(vmops._session, vm_ref).AndReturn(
(old_vdi_ref, None))
new_vdi_ref = "new_ref"
new_vdi_uuid = "new_uuid"
vm_utils.resize_disk(vmops._session, instance, old_vdi_ref,
flavor).AndReturn((new_vdi_ref, new_vdi_uuid))
vm_utils.migrate_vhd(vmops._session, instance, new_vdi_uuid, dest,
sr_path, 0).AndRaise(
exception.ResizeError(reason="asdf"))
vm_utils.destroy_vdi(vmops._session, new_vdi_ref)
vmops._restore_orig_vm_and_cleanup_orphan(instance)
self.mox.ReplayAll()
with mock.patch.object(instance, 'save') as mock_save:
self.assertRaises(exception.InstanceFaultRollback,
vmops._migrate_disk_resizing_down, self.context,
instance, dest, flavor, vm_ref, sr_path)
self.assertEqual(3, mock_save.call_count)
self.assertEqual(60.0, instance.progress)
def test_resize_ensure_vm_is_shutdown_cleanly(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_forced(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_fails(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.ResizeError,
vmops._resize_ensure_vm_is_shutdown, fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_already_shutdown(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
class XenAPIImageTypeTestCase(test.NoDBTestCase):
"""Test ImageType class."""
def test_to_string(self):
# Can convert from type id to type string.
self.assertEqual(
vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
vm_utils.ImageType.KERNEL_STR)
def _assert_role(self, expected_role, image_type_id):
self.assertEqual(
expected_role,
vm_utils.ImageType.get_role(image_type_id))
def test_get_image_role_kernel(self):
self._assert_role('kernel', vm_utils.ImageType.KERNEL)
def test_get_image_role_ramdisk(self):
self._assert_role('ramdisk', vm_utils.ImageType.RAMDISK)
def test_get_image_role_disk(self):
self._assert_role('root', vm_utils.ImageType.DISK)
def test_get_image_role_disk_raw(self):
self._assert_role('root', vm_utils.ImageType.DISK_RAW)
def test_get_image_role_disk_vhd(self):
self._assert_role('root', vm_utils.ImageType.DISK_VHD)
class XenAPIDetermineDiskImageTestCase(test.NoDBTestCase):
"""Unit tests for code that detects the ImageType."""
def assert_disk_type(self, image_meta, expected_disk_type):
actual = vm_utils.determine_disk_image_type(image_meta)
self.assertEqual(expected_disk_type, actual)
def test_machine(self):
image_meta = objects.ImageMeta.from_dict(
{'disk_format': 'ami'})
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK)
def test_raw(self):
image_meta = objects.ImageMeta.from_dict(
{'disk_format': 'raw'})
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW)
def test_vhd(self):
image_meta = objects.ImageMeta.from_dict(
{'disk_format': 'vhd'})
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD)
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIHostTestCase(stubs.XenAPITestBase):
"""Tests HostState, which holds metrics from XenServer that get
reported back to the Schedulers.
"""
def setUp(self):
super(XenAPIHostTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
self.flags(use_local=True, group='conductor')
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.instance = fake_instance.fake_db_instance(name='foo')
def test_host_state(self):
stats = self.conn.host_state.get_host_stats(False)
# Values from fake.create_local_srs (ext SR)
self.assertEqual(stats['disk_total'], 40000)
self.assertEqual(stats['disk_used'], 20000)
# Values from fake._plugin_xenhost_host_data
self.assertEqual(stats['host_memory_total'], 10)
self.assertEqual(stats['host_memory_overhead'], 20)
self.assertEqual(stats['host_memory_free'], 30)
self.assertEqual(stats['host_memory_free_computed'], 40)
self.assertEqual(stats['hypervisor_hostname'], 'fake-xenhost')
self.assertEqual(stats['host_cpu_info']['cpu_count'], 4)
self.assertThat({
'vendor': 'GenuineIntel',
'model': 'Intel(R) Xeon(R) CPU X3430 @ 2.40GHz',
'topology': {
'sockets': 1,
'cores': 4,
'threads': 1,
},
'features': [
'fpu', 'de', 'tsc', 'msr', 'pae', 'mce',
'cx8', 'apic', 'sep', 'mtrr', 'mca',
'cmov', 'pat', 'clflush', 'acpi', 'mmx',
'fxsr', 'sse', 'sse2', 'ss', 'ht',
'nx', 'constant_tsc', 'nonstop_tsc',
'aperfmperf', 'pni', 'vmx', 'est', 'ssse3',
'sse4_1', 'sse4_2', 'popcnt', 'hypervisor',
'ida', 'tpr_shadow', 'vnmi', 'flexpriority',
'ept', 'vpid',
]},
matchers.DictMatches(stats['cpu_model']))
# No VMs running
self.assertEqual(stats['vcpus_used'], 0)
def test_host_state_vcpus_used(self):
stats = self.conn.host_state.get_host_stats(True)
self.assertEqual(stats['vcpus_used'], 0)
xenapi_fake.create_vm(self.instance['name'], 'Running')
stats = self.conn.host_state.get_host_stats(True)
self.assertEqual(stats['vcpus_used'], 4)
def test_pci_passthrough_devices(self):
stats = self.conn.host_state.get_host_stats(False)
self.assertEqual(len(stats['pci_passthrough_devices']), 2)
def test_host_state_missing_sr(self):
# Must trigger construction of 'host_state' property
# before introducing the stub which raises the error
hs = self.conn.host_state
def fake_safe_find_sr(session):
raise exception.StorageRepositoryNotFound('not there')
self.stubs.Set(vm_utils, 'safe_find_sr', fake_safe_find_sr)
self.assertRaises(exception.StorageRepositoryNotFound,
hs.get_host_stats,
refresh=True)
def _test_host_action(self, method, action, expected=None):
result = method('host', action)
if not expected:
expected = action
self.assertEqual(result, expected)
def _test_host_action_no_param(self, method, action, expected=None):
result = method(action)
if not expected:
expected = action
self.assertEqual(result, expected)
def test_host_reboot(self):
self._test_host_action_no_param(self.conn.host_power_action, 'reboot')
def test_host_shutdown(self):
self._test_host_action_no_param(self.conn.host_power_action,
'shutdown')
def test_host_startup(self):
self.assertRaises(NotImplementedError,
self.conn.host_power_action, 'startup')
def test_host_maintenance_on(self):
self._test_host_action(self.conn.host_maintenance_mode,
True, 'on_maintenance')
def test_host_maintenance_off(self):
self._test_host_action(self.conn.host_maintenance_mode,
False, 'off_maintenance')
def test_set_enable_host_enable(self):
_create_service_entries(self.context, values={'nova': ['fake-mini']})
self._test_host_action_no_param(self.conn.set_host_enabled,
True, 'enabled')
service = db.service_get_by_host_and_binary(self.context, 'fake-mini',
'nova-compute')
self.assertEqual(service.disabled, False)
def test_set_enable_host_disable(self):
_create_service_entries(self.context, values={'nova': ['fake-mini']})
self._test_host_action_no_param(self.conn.set_host_enabled,
False, 'disabled')
service = db.service_get_by_host_and_binary(self.context, 'fake-mini',
'nova-compute')
self.assertEqual(service.disabled, True)
def test_get_host_uptime(self):
result = self.conn.get_host_uptime()
self.assertEqual(result, 'fake uptime')
def test_supported_instances_is_included_in_host_state(self):
stats = self.conn.host_state.get_host_stats(False)
self.assertIn('supported_instances', stats)
def test_supported_instances_is_calculated_by_to_supported_instances(self):
def to_supported_instances(somedata):
return "SOMERETURNVALUE"
self.stubs.Set(host, 'to_supported_instances', to_supported_instances)
stats = self.conn.host_state.get_host_stats(False)
self.assertEqual("SOMERETURNVALUE", stats['supported_instances'])
def test_update_stats_caches_hostname(self):
self.mox.StubOutWithMock(host, 'call_xenhost')
self.mox.StubOutWithMock(vm_utils, 'scan_default_sr')
self.mox.StubOutWithMock(vm_utils, 'list_vms')
self.mox.StubOutWithMock(self.conn._session, 'call_xenapi')
data = {'disk_total': 0,
'disk_used': 0,
'disk_available': 0,
'supported_instances': 0,
'host_capabilities': [],
'host_hostname': 'foo',
'vcpus_used': 0,
}
sr_rec = {
'physical_size': 0,
'physical_utilisation': 0,
'virtual_allocation': 0,
}
for i in range(3):
host.call_xenhost(mox.IgnoreArg(), 'host_data', {}).AndReturn(data)
vm_utils.scan_default_sr(self.conn._session).AndReturn("ref")
vm_utils.list_vms(self.conn._session).AndReturn([])
self.conn._session.call_xenapi('SR.get_record', "ref").AndReturn(
sr_rec)
if i == 2:
# On the third call (the second below) change the hostname
data = dict(data, host_hostname='bar')
self.mox.ReplayAll()
stats = self.conn.host_state.get_host_stats(refresh=True)
self.assertEqual('foo', stats['hypervisor_hostname'])
stats = self.conn.host_state.get_host_stats(refresh=True)
self.assertEqual('foo', stats['hypervisor_hostname'])
class ToSupportedInstancesTestCase(test.NoDBTestCase):
def test_default_return_value(self):
self.assertEqual([],
host.to_supported_instances(None))
def test_return_value(self):
self.assertEqual([(arch.X86_64, hv_type.XEN, 'xen')],
host.to_supported_instances([u'xen-3.0-x86_64']))
def test_invalid_values_do_not_break(self):
self.assertEqual([(arch.X86_64, hv_type.XEN, 'xen')],
host.to_supported_instances([u'xen-3.0-x86_64', 'spam']))
def test_multiple_values(self):
self.assertEqual(
[
(arch.X86_64, hv_type.XEN, 'xen'),
(arch.I686, hv_type.XEN, 'hvm')
],
host.to_supported_instances([u'xen-3.0-x86_64', 'hvm-3.0-x86_32'])
)
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
def setUp(self):
super(XenAPIAutoDiskConfigTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True,
osvol=False):
pass
self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
def assertIsPartitionCalled(self, called):
marker = {"partition_called": False}
def fake_resize_part_and_fs(dev, start, old_sectors, new_sectors,
flags):
marker["partition_called"] = True
self.stubs.Set(vm_utils, "_resize_part_and_fs",
fake_resize_part_and_fs)
context.RequestContext(self.user_id, self.project_id)
session = get_session()
disk_image_type = vm_utils.ImageType.DISK_VHD
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}}
image_meta = {'id': 'null',
'disk_format': 'vhd',
'properties': {'vm_mode': 'xen'}}
self.conn._vmops._attach_disks(instance, image_meta, vm_ref,
instance['name'], vdis, disk_image_type, "fake_nw_inf")
self.assertEqual(marker["partition_called"], called)
def test_instance_not_auto_disk_config(self):
"""Should not partition unless instance is marked as
auto_disk_config.
"""
self.instance_values['auto_disk_config'] = False
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_fails_safe_two_partitions(self):
# Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4', "", ""), (2, 100, 200, 'ext4' "", "")]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_fails_safe_badly_numbered(self):
# Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(2, 100, 200, 'ext4', "", "")]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_fails_safe_bad_fstype(self):
# Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 100, 200, 'asdf', "", "")]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_passes_fail_safes(self):
"""Should partition if instance is marked as auto_disk_config=True and
virt-layer specific fail-safe checks pass.
"""
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4', "", "boot")]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(True)
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIGenerateLocal(stubs.XenAPITestBase):
"""Test generating of local disks, like swap and ephemeral."""
def setUp(self):
super(XenAPIGenerateLocal, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True,
osvol=False, empty=False, unpluggable=True):
return session.call_xenapi('VBD.create', {'VM': vm_ref,
'VDI': vdi_ref})
self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
def assertCalled(self, instance,
disk_image_type=vm_utils.ImageType.DISK_VHD):
context.RequestContext(self.user_id, self.project_id)
session = get_session()
vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
vdi_key = 'root'
if disk_image_type == vm_utils.ImageType.DISK_ISO:
vdi_key = 'iso'
vdis = {vdi_key: {'uuid': vdi_uuid, 'ref': vdi_ref}}
self.called = False
image_meta = {'id': 'null',
'disk_format': 'vhd',
'properties': {'vm_mode': 'xen'}}
self.conn._vmops._attach_disks(instance, image_meta, vm_ref,
instance['name'], vdis, disk_image_type, "fake_nw_inf")
self.assertTrue(self.called)
def test_generate_swap(self):
# Test swap disk generation.
instance_values = dict(self.instance_values, instance_type_id=5)
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_swap(*args, **kwargs):
self.called = True
self.stubs.Set(vm_utils, 'generate_swap', fake_generate_swap)
self.assertCalled(instance)
def test_generate_ephemeral(self):
# Test ephemeral disk generation.
instance_values = dict(self.instance_values, instance_type_id=4)
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_ephemeral(*args):
self.called = True
self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
self.assertCalled(instance)
def test_generate_iso_blank_root_disk(self):
instance_values = dict(self.instance_values, instance_type_id=4)
instance_values.pop('kernel_id')
instance_values.pop('ramdisk_id')
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_ephemeral(*args):
pass
self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
def fake_generate_iso(*args):
self.called = True
self.stubs.Set(vm_utils, 'generate_iso_blank_root_disk',
fake_generate_iso)
self.assertCalled(instance, vm_utils.ImageType.DISK_ISO)
class XenAPIBWCountersTestCase(stubs.XenAPITestBaseNoDB):
FAKE_VMS = {'test1:ref': dict(name_label='test1',
other_config=dict(nova_uuid='hash'),
domid='12',
_vifmap={'0': "a:b:c:d...",
'1': "e:f:12:q..."}),
'test2:ref': dict(name_label='test2',
other_config=dict(nova_uuid='hash'),
domid='42',
_vifmap={'0': "a:3:c:d...",
'1': "e:f:42:q..."}),
}
def setUp(self):
super(XenAPIBWCountersTestCase, self).setUp()
self.stubs.Set(vm_utils, 'list_vms',
XenAPIBWCountersTestCase._fake_list_vms)
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def _fake_get_vif_device_map(vm_rec):
return vm_rec['_vifmap']
self.stubs.Set(self.conn._vmops, "_get_vif_device_map",
_fake_get_vif_device_map)
@classmethod
def _fake_list_vms(cls, session):
return six.iteritems(cls.FAKE_VMS)
@staticmethod
def _fake_fetch_bandwidth_mt(session):
return {}
@staticmethod
def _fake_fetch_bandwidth(session):
return {'42':
{'0': {'bw_in': 21024, 'bw_out': 22048},
'1': {'bw_in': 231337, 'bw_out': 221212121}},
'12':
{'0': {'bw_in': 1024, 'bw_out': 2048},
'1': {'bw_in': 31337, 'bw_out': 21212121}},
}
def test_get_all_bw_counters(self):
instances = [dict(name='test1', uuid='1-2-3'),
dict(name='test2', uuid='4-5-6')]
self.stubs.Set(vm_utils, 'fetch_bandwidth',
self._fake_fetch_bandwidth)
result = self.conn.get_all_bw_counters(instances)
self.assertEqual(len(result), 4)
self.assertIn(dict(uuid='1-2-3',
mac_address="a:b:c:d...",
bw_in=1024,
bw_out=2048), result)
self.assertIn(dict(uuid='1-2-3',
mac_address="e:f:12:q...",
bw_in=31337,
bw_out=21212121), result)
self.assertIn(dict(uuid='4-5-6',
mac_address="a:3:c:d...",
bw_in=21024,
bw_out=22048), result)
self.assertIn(dict(uuid='4-5-6',
mac_address="e:f:42:q...",
bw_in=231337,
bw_out=221212121), result)
def test_get_all_bw_counters_in_failure_case(self):
"""Test that get_all_bw_conters returns an empty list when
no data returned from Xenserver. c.f. bug #910045.
"""
instances = [dict(name='instance-0001', uuid='1-2-3-4-5')]
self.stubs.Set(vm_utils, 'fetch_bandwidth',
self._fake_fetch_bandwidth_mt)
result = self.conn.get_all_bw_counters(instances)
self.assertEqual(result, [])
# TODO(salvatore-orlando): this class and
# nova.tests.unit.virt.test_libvirt.IPTablesFirewallDriverTestCase
# share a lot of code. Consider abstracting common code in a base
# class for firewall driver testing.
#
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
REQUIRES_LOCKING = True
_in_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
':PREROUTING ACCEPT [1170:189210]',
':INPUT ACCEPT [844:71028]',
':OUTPUT ACCEPT [5149:405186]',
':POSTROUTING ACCEPT [5063:386098]',
'# Completed on Mon Dec 6 11:54:13 2010',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*mangle',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
]
_in6_filter_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [349256:75777230]',
'COMMIT',
'# Completed on Tue Jan 18 23:47:56 2011',
]
def setUp(self):
super(XenAPIDom0IptablesFirewallTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.user_id = 'mappin'
self.project_id = 'fake'
stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests,
test_case=self)
self.context = context.RequestContext(self.user_id, self.project_id)
self.network = importutils.import_object(CONF.network_manager)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.fw = self.conn._vmops.firewall_driver
def _create_instance_ref(self):
return db.instance_create(self.context,
{'user_id': self.user_id,
'project_id': self.project_id,
'instance_type_id': 1})
def _create_test_security_group(self):
admin_ctxt = context.get_admin_context()
secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testgroup',
'description': 'test group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': 8,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'cidr': '192.168.10.0/24'})
return secgroup
def _validate_security_group(self):
in_rules = filter(lambda l: not l.startswith('#'),
self._in_rules)
for rule in in_rules:
if 'nova' not in rule:
self.assertIn(rule, self._out_rules,
'Rule went missing: %s' % rule)
instance_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
# last two octets change
if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
instance_chain = rule.split(' ')[-1]
break
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
break
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp'
' -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"ICMP acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp'
' --icmp-type 8 -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp --dport 80:81'
' -s 192.168.10.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
def test_static_filters(self):
instance_ref = self._create_instance_ref()
src_instance_ref = self._create_instance_ref()
admin_ctxt = context.get_admin_context()
secgroup = self._create_test_security_group()
src_secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testsourcegroup',
'description': 'src group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'group_id': src_secgroup['id']})
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
src_secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
network_model = fake_network.fake_get_instance_nw_info(self.stubs, 1)
from nova.compute import utils as compute_utils # noqa
self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
lambda instance: network_model)
self.fw.prepare_instance_filter(instance_ref, network_model)
self.fw.apply_instance_filter(instance_ref, network_model)
self._validate_security_group()
# Extra test for TCP acceptance rules
for ip in network_model.fixed_ips():
if ip['version'] != 4:
continue
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp'
' --dport 80:81 -s %s' % ip['address'])
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_filters_for_instance_with_ip_v6(self):
self.flags(use_ipv6=True)
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEqual(len(rulesv4), 2)
self.assertEqual(len(rulesv6), 1)
def test_filters_for_instance_without_ip_v6(self):
self.flags(use_ipv6=False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEqual(len(rulesv4), 2)
self.assertEqual(len(rulesv6), 0)
def test_multinic_iptables(self):
ipv4_rules_per_addr = 1
ipv4_addr_per_network = 2
ipv6_rules_per_addr = 1
ipv6_addr_per_network = 1
networks_count = 5
instance_ref = self._create_instance_ref()
_get_instance_nw_info = fake_network.fake_get_instance_nw_info
network_info = _get_instance_nw_info(self.stubs,
networks_count,
ipv4_addr_per_network)
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
'1.1.1.1'
ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
ipv4 = self.fw.iptables.ipv4['filter'].rules
ipv6 = self.fw.iptables.ipv6['filter'].rules
ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
# Extra rules are for the DHCP request
rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
networks_count) + 2
self.assertEqual(ipv4_network_rules, rules)
self.assertEqual(ipv6_network_rules,
ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
def test_do_refresh_security_group_rules(self):
admin_ctxt = context.get_admin_context()
instance_ref = self._create_instance_ref()
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
secgroup = self._create_test_security_group()
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.instance_info[instance_ref['id']] = (instance_ref,
network_info)
self._validate_security_group()
# add a rule to the security group
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'udp',
'from_port': 200,
'to_port': 299,
'cidr': '192.168.99.0/24'})
# validate the extra rule
self.fw.refresh_security_group_rules(secgroup)
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p udp --dport 200:299'
' -s 192.168.99.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"Rules were not updated properly. "
"The rule for UDP acceptance is missing")
def test_provider_firewall_rules(self):
# setup basic instance data
instance_ref = self._create_instance_ref()
# FRAGILE: as in libvirt tests
# peeks at how the firewall names chains
chain_name = 'inst-%s' % instance_ref['id']
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
self.fw.prepare_instance_filter(instance_ref, network_info)
self.assertIn('provider', self.fw.iptables.ipv4['filter'].chains)
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(0, len(rules))
admin_ctxt = context.get_admin_context()
# add a rule and send the update message, check for 1 rule
db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'tcp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
# Add another, refresh, and make sure number of rules goes to two
provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'udp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(2, len(rules))
# create the instance filter and make sure it has a jump rule
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == chain_name]
jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
provjump_rules = []
# IptablesTable doesn't make rules unique internally
for rule in jump_rules:
if 'provider' in rule.rule and rule not in provjump_rules:
provjump_rules.append(rule)
self.assertEqual(1, len(provjump_rules))
# remove a rule from the db, cast to compute to refresh rule
db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
class XenAPISRSelectionTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for testing we find the right SR."""
def test_safe_find_sr_raise_exception(self):
# Ensure StorageRepositoryNotFound is raise when wrong filter.
self.flags(sr_matching_filter='yadayadayada', group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
self.assertRaises(exception.StorageRepositoryNotFound,
vm_utils.safe_find_sr, session)
def test_safe_find_sr_local_storage(self):
# Ensure the default local-storage is found.
self.flags(sr_matching_filter='other-config:i18n-key=local-storage',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
# This test is only guaranteed if there is one host in the pool
self.assertEqual(len(xenapi_fake.get_all('host')), 1)
host_ref = xenapi_fake.get_all('host')[0]
pbd_refs = xenapi_fake.get_all('PBD')
for pbd_ref in pbd_refs:
pbd_rec = xenapi_fake.get_record('PBD', pbd_ref)
if pbd_rec['host'] != host_ref:
continue
sr_rec = xenapi_fake.get_record('SR', pbd_rec['SR'])
if sr_rec['other_config']['i18n-key'] == 'local-storage':
local_sr = pbd_rec['SR']
expected = vm_utils.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_by_other_criteria(self):
# Ensure the SR is found when using a different filter.
self.flags(sr_matching_filter='other-config:my_fake_sr=true',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
host_ref = xenapi_fake.get_all('host')[0]
local_sr = xenapi_fake.create_sr(name_label='Fake Storage',
type='lvm',
other_config={'my_fake_sr': 'true'},
host_ref=host_ref)
expected = vm_utils.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_default(self):
# Ensure the default SR is found regardless of other-config.
self.flags(sr_matching_filter='default-sr:true',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
pool_ref = session.call_xenapi('pool.get_all')[0]
expected = vm_utils.safe_find_sr(session)
self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref),
expected)
def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
'fake_host2'],
'avail_zone2': ['fake_host3'], }):
for avail_zone, hosts in six.iteritems(values):
for service_host in hosts:
db.service_create(context,
{'host': service_host,
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0})
return values
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIAggregateTestCase(stubs.XenAPITestBase):
"""Unit tests for aggregate operations."""
def setUp(self):
super(XenAPIAggregateTestCase, self).setUp()
self.flags(connection_url='http://test_url',
connection_username='test_user',
connection_password='test_pass',
group='xenserver')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host',
compute_driver='xenapi.XenAPIDriver',
default_availability_zone='avail_zone1')
self.flags(use_local=True, group='conductor')
host_ref = xenapi_fake.get_all('host')[0]
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.compute = importutils.import_object(CONF.compute_manager)
self.api = compute_api.AggregateAPI()
values = {'name': 'test_aggr',
'metadata': {'availability_zone': 'test_zone',
pool_states.POOL_FLAG: 'XenAPI'}}
self.aggr = objects.Aggregate(context=self.context, id=1,
**values)
self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI',
'master_compute': 'host',
'availability_zone': 'fake_zone',
pool_states.KEY: pool_states.ACTIVE,
'host': xenapi_fake.get_record('host',
host_ref)['uuid']}
def test_pool_add_to_aggregate_called_by_driver(self):
calls = []
def pool_add_to_aggregate(context, aggregate, host, slave_info=None):
self.assertEqual("CONTEXT", context)
self.assertEqual("AGGREGATE", aggregate)
self.assertEqual("HOST", host)
self.assertEqual("SLAVEINFO", slave_info)
calls.append(pool_add_to_aggregate)
self.stubs.Set(self.conn._pool,
"add_to_aggregate",
pool_add_to_aggregate)
self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST",
slave_info="SLAVEINFO")
self.assertIn(pool_add_to_aggregate, calls)
def test_pool_remove_from_aggregate_called_by_driver(self):
calls = []
def pool_remove_from_aggregate(context, aggregate, host,
slave_info=None):
self.assertEqual("CONTEXT", context)
self.assertEqual("AGGREGATE", aggregate)
self.assertEqual("HOST", host)
self.assertEqual("SLAVEINFO", slave_info)
calls.append(pool_remove_from_aggregate)
self.stubs.Set(self.conn._pool,
"remove_from_aggregate",
pool_remove_from_aggregate)
self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST",
slave_info="SLAVEINFO")
self.assertIn(pool_remove_from_aggregate, calls)
def test_add_to_aggregate_for_first_host_sets_metadata(self):
def fake_init_pool(id, name):
fake_init_pool.called = True
self.stubs.Set(self.conn._pool, "_init_pool", fake_init_pool)
aggregate = self._aggregate_setup()
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
result = db.aggregate_get(self.context, aggregate['id'])
self.assertTrue(fake_init_pool.called)
self.assertThat(self.fake_metadata,
matchers.DictMatches(result['metadetails']))
def test_join_slave(self):
# Ensure join_slave gets called when the request gets to master.
def fake_join_slave(id, compute_uuid, host, url, user, password):
fake_join_slave.called = True
self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave)
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.conn._pool.add_to_aggregate(self.context, aggregate, "host2",
dict(compute_uuid='fake_uuid',
url='fake_url',
user='fake_user',
passwd='fake_pass',
xenhost_uuid='fake_uuid'))
self.assertTrue(fake_join_slave.called)
def test_add_to_aggregate_first_host(self):
def fake_pool_set_name_label(self, session, pool_ref, name):
fake_pool_set_name_label.called = True
self.stubs.Set(xenapi_fake.SessionBase, "pool_set_name_label",
fake_pool_set_name_label)
self.conn._session.call_xenapi("pool.create", {"name": "asdf"})
metadata = {'availability_zone': 'fake_zone',
pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.CREATED}
aggregate = objects.Aggregate(context=self.context)
aggregate.name = 'fake_aggregate'
aggregate.metadata = dict(metadata)
aggregate.create()
aggregate.add_host('host')
self.assertEqual(["host"], aggregate.hosts)
self.assertEqual(metadata, aggregate.metadata)
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
self.assertTrue(fake_pool_set_name_label.called)
def test_remove_from_aggregate_called(self):
def fake_remove_from_aggregate(context, aggregate, host):
fake_remove_from_aggregate.called = True
self.stubs.Set(self.conn._pool,
"remove_from_aggregate",
fake_remove_from_aggregate)
self.conn.remove_from_aggregate(None, None, None)
self.assertTrue(fake_remove_from_aggregate.called)
def test_remove_from_empty_aggregate(self):
result = self._aggregate_setup()
self.assertRaises(exception.InvalidAggregateActionDelete,
self.conn._pool.remove_from_aggregate,
self.context, result, "test_host")
def test_remove_slave(self):
# Ensure eject slave gets called.
def fake_eject_slave(id, compute_uuid, host_uuid):
fake_eject_slave.called = True
self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave)
self.fake_metadata['host2'] = 'fake_host2_uuid'
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2")
self.assertTrue(fake_eject_slave.called)
def test_remove_master_solo(self):
# Ensure metadata are cleared after removal.
def fake_clear_pool(id):
fake_clear_pool.called = True
self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool)
aggregate = self._aggregate_setup(metadata=self.fake_metadata)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
result = db.aggregate_get(self.context, aggregate['id'])
self.assertTrue(fake_clear_pool.called)
self.assertThat({'availability_zone': 'fake_zone',
pool_states.POOL_FLAG: 'XenAPI',
pool_states.KEY: pool_states.ACTIVE},
matchers.DictMatches(result['metadetails']))
def test_remote_master_non_empty_pool(self):
# Ensure AggregateError is raised if removing the master.
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.assertRaises(exception.InvalidAggregateActionDelete,
self.conn._pool.remove_from_aggregate,
self.context, aggregate, "host")
def _aggregate_setup(self, aggr_name='fake_aggregate',
aggr_zone='fake_zone',
aggr_state=pool_states.CREATED,
hosts=['host'], metadata=None):
aggregate = objects.Aggregate(context=self.context)
aggregate.name = aggr_name
aggregate.metadata = {'availability_zone': aggr_zone,
pool_states.POOL_FLAG: 'XenAPI',
pool_states.KEY: aggr_state,
}
if metadata:
aggregate.metadata.update(metadata)
aggregate.create()
for aggregate_host in hosts:
aggregate.add_host(aggregate_host)
return aggregate
def test_add_host_to_aggregate_invalid_changing_status(self):
"""Ensure InvalidAggregateActionAdd is raised when adding host while
aggregate is not ready.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
ex = self.assertRaises(exception.InvalidAggregateActionAdd,
self.conn.add_to_aggregate, self.context,
aggregate, 'host')
self.assertIn('setup in progress', str(ex))
def test_add_host_to_aggregate_invalid_dismissed_status(self):
"""Ensure InvalidAggregateActionAdd is raised when aggregate is
deleted.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
ex = self.assertRaises(exception.InvalidAggregateActionAdd,
self.conn.add_to_aggregate, self.context,
aggregate, 'fake_host')
self.assertIn('aggregate deleted', str(ex))
def test_add_host_to_aggregate_invalid_error_status(self):
"""Ensure InvalidAggregateActionAdd is raised when aggregate is
in error.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.ERROR)
ex = self.assertRaises(exception.InvalidAggregateActionAdd,
self.conn.add_to_aggregate, self.context,
aggregate, 'fake_host')
self.assertIn('aggregate in error', str(ex))
def test_remove_host_from_aggregate_error(self):
# Ensure we can remove a host from an aggregate even if in error.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
# let's mock the fact that the aggregate is ready!
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.ACTIVE}
db.aggregate_metadata_add(self.context, aggr['id'], metadata)
for aggregate_host in values[fake_zone]:
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], aggregate_host)
# let's mock the fact that the aggregate is in error!
expected = self.api.remove_host_from_aggregate(self.context,
aggr['id'],
values[fake_zone][0])
self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts']))
self.assertEqual(expected['metadata'][pool_states.KEY],
pool_states.ACTIVE)
def test_remove_host_from_aggregate_invalid_dismissed_status(self):
"""Ensure InvalidAggregateActionDelete is raised when aggregate is
deleted.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
self.assertRaises(exception.InvalidAggregateActionDelete,
self.conn.remove_from_aggregate, self.context,
aggregate, 'fake_host')
def test_remove_host_from_aggregate_invalid_changing_status(self):
"""Ensure InvalidAggregateActionDelete is raised when aggregate is
changing.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
self.assertRaises(exception.InvalidAggregateActionDelete,
self.conn.remove_from_aggregate, self.context,
aggregate, 'fake_host')
def test_add_aggregate_host_raise_err(self):
# Ensure the undo operation works correctly on add.
def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
raise exception.AggregateError(
aggregate_id='', action='', reason='')
self.stubs.Set(self.compute.driver, "add_to_aggregate",
fake_driver_add_to_aggregate)
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.ACTIVE}
self.aggr.metadata = metadata
self.aggr.hosts = ['fake_host']
self.assertRaises(exception.AggregateError,
self.compute.add_aggregate_host,
self.context, host="fake_host",
aggregate=self.aggr,
slave_info=None)
self.assertEqual(self.aggr.metadata[pool_states.KEY],
pool_states.ERROR)
self.assertEqual(self.aggr.hosts, ['fake_host'])
class MockComputeAPI(object):
def __init__(self):
self._mock_calls = []
def add_aggregate_host(self, ctxt, aggregate,
host_param, host, slave_info):
self._mock_calls.append((
self.add_aggregate_host, ctxt, aggregate,
host_param, host, slave_info))
def remove_aggregate_host(self, ctxt, aggregate_id, host_param,
host, slave_info):
self._mock_calls.append((
self.remove_aggregate_host, ctxt, aggregate_id,
host_param, host, slave_info))
class StubDependencies(object):
"""Stub dependencies for ResourcePool."""
def __init__(self):
self.compute_rpcapi = MockComputeAPI()
def _is_hv_pool(self, *_ignore):
return True
def _get_metadata(self, *_ignore):
return {
pool_states.KEY: {},
'master_compute': 'master'
}
def _create_slave_info(self, *ignore):
return "SLAVE_INFO"
class ResourcePoolWithStubs(StubDependencies, pool.ResourcePool):
"""A ResourcePool, use stub dependencies."""
class HypervisorPoolTestCase(test.NoDBTestCase):
fake_aggregate = {
'id': 98,
'hosts': [],
'metadata': {
'master_compute': 'master',
pool_states.POOL_FLAG: {},
pool_states.KEY: {}
}
}
def test_slave_asks_master_to_add_slave_to_pool(self):
slave = ResourcePoolWithStubs()
slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave")
self.assertIn(
(slave.compute_rpcapi.add_aggregate_host,
"CONTEXT", jsonutils.to_primitive(self.fake_aggregate),
"slave", "master", "SLAVE_INFO"),
slave.compute_rpcapi._mock_calls)
def test_slave_asks_master_to_remove_slave_from_pool(self):
slave = ResourcePoolWithStubs()
slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave")
self.assertIn(
(slave.compute_rpcapi.remove_aggregate_host,
"CONTEXT", 98, "slave", "master", "SLAVE_INFO"),
slave.compute_rpcapi._mock_calls)
class SwapXapiHostTestCase(test.NoDBTestCase):
def test_swapping(self):
self.assertEqual(
"http://otherserver:8765/somepath",
pool.swap_xapi_host(
"http://someserver:8765/somepath", 'otherserver'))
def test_no_port(self):
self.assertEqual(
"http://otherserver/somepath",
pool.swap_xapi_host(
"http://someserver/somepath", 'otherserver'))
def test_no_path(self):
self.assertEqual(
"http://otherserver",
pool.swap_xapi_host(
"http://someserver", 'otherserver'))
class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for live_migration."""
def setUp(self):
super(XenAPILiveMigrateTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host')
db_fakes.stub_out_db_instance_api(self.stubs)
self.context = context.get_admin_context()
def test_live_migration_calls_vmops(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_live_migrate(context, instance_ref, dest, post_method,
recover_method, block_migration, migrate_data):
fake_live_migrate.called = True
self.stubs.Set(self.conn._vmops, "live_migrate", fake_live_migrate)
self.conn.live_migration(None, None, None, None, None)
self.assertTrue(fake_live_migrate.called)
def test_pre_live_migration(self):
# ensure method is present
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.conn.pre_live_migration(None, None, None, None, None)
def test_post_live_migration_at_destination(self):
# ensure method is present
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
fake_instance = {"name": "name"}
fake_network_info = "network_info"
def fake_fw(instance, network_info):
self.assertEqual(instance, fake_instance)
self.assertEqual(network_info, fake_network_info)
fake_fw.call_count += 1
def fake_create_kernel_and_ramdisk(context, session, instance,
name_label):
return "fake-kernel-file", "fake-ramdisk-file"
fake_fw.call_count = 0
_vmops = self.conn._vmops
self.stubs.Set(_vmops.firewall_driver,
'setup_basic_filtering', fake_fw)
self.stubs.Set(_vmops.firewall_driver,
'prepare_instance_filter', fake_fw)
self.stubs.Set(_vmops.firewall_driver,
'apply_instance_filter', fake_fw)
self.stubs.Set(vm_utils, "create_kernel_and_ramdisk",
fake_create_kernel_and_ramdisk)
def fake_get_vm_opaque_ref(instance):
fake_get_vm_opaque_ref.called = True
self.stubs.Set(_vmops, "_get_vm_opaque_ref", fake_get_vm_opaque_ref)
fake_get_vm_opaque_ref.called = False
def fake_strip_base_mirror_from_vdis(session, vm_ref):
fake_strip_base_mirror_from_vdis.called = True
self.stubs.Set(vm_utils, "strip_base_mirror_from_vdis",
fake_strip_base_mirror_from_vdis)
fake_strip_base_mirror_from_vdis.called = False
self.conn.post_live_migration_at_destination(None, fake_instance,
fake_network_info, None)
self.assertEqual(fake_fw.call_count, 3)
self.assertTrue(fake_get_vm_opaque_ref.called)
self.assertTrue(fake_strip_base_mirror_from_vdis.called)
def test_check_can_live_migrate_destination_with_block_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
expected = {'block_migration': True,
'migrate_data': {
'migrate_send_data': "fake_migrate_data",
'destination_sr_ref': 'asdf'
}
}
result = self.conn.check_can_live_migrate_destination(self.context,
{'host': 'host'},
{}, {},
True, False)
self.assertEqual(expected, result)
def test_check_live_migrate_destination_verifies_ip(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
for pif_ref in xenapi_fake.get_all('PIF'):
pif_rec = xenapi_fake.get_record('PIF', pif_ref)
pif_rec['IP'] = ''
pif_rec['IPv6'] = ''
self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'},
{}, {},
True, False)
def test_check_can_live_migrate_destination_block_migration_fails(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'},
{}, {},
True, False)
def _add_default_live_migrate_stubs(self, conn):
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
pass
def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
return []
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
def fake_lookup_kernel_ramdisk(session, vm):
return ("fake_PV_kernel", "fake_PV_ramdisk")
self.stubs.Set(conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
self.stubs.Set(conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
self.stubs.Set(conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
fake_lookup_kernel_ramdisk)
def test_check_can_live_migrate_source_with_block_migrate(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
dest_check_data = {'block_migration': True,
'migrate_data': {
'destination_sr_ref': None,
'migrate_send_data': None
}}
result = self.conn.check_can_live_migrate_source(self.context,
{'host': 'host'},
dest_check_data)
self.assertEqual(dest_check_data, result)
def test_check_can_live_migrate_source_with_block_migrate_iscsi(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
return ['sr_ref']
self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
def fake_make_plugin_call(plugin, method, **args):
return "true"
self.stubs.Set(self.conn._vmops, "_make_plugin_call",
fake_make_plugin_call)
dest_check_data = {'block_migration': True,
'migrate_data': {
'destination_sr_ref': None,
'migrate_send_data': None
}}
result = self.conn.check_can_live_migrate_source(self.context,
{'host': 'host'},
dest_check_data)
self.assertEqual(dest_check_data, result)
def test_check_can_live_migrate_source_with_block_iscsi_fails(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
return ['sr_ref']
self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
def fake_make_plugin_call(plugin, method, **args):
return {'returncode': 'error', 'message': 'Plugin not found'}
self.stubs.Set(self.conn._vmops, "_make_plugin_call",
fake_make_plugin_call)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_source,
self.context, {'host': 'host'},
{})
def test_check_can_live_migrate_source_with_block_migrate_fails(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
dest_check_data = {'block_migration': True,
'migrate_data': {
'destination_sr_ref': None,
'migrate_send_data': None
}}
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_source,
self.context,
{'host': 'host'},
dest_check_data)
def test_check_can_live_migrate_works(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_aggregate_get_by_host(context, host, key=None):
self.assertEqual(CONF.host, host)
return [dict(test_aggregate.fake_aggregate,
metadetails={"host": "test_host_uuid"})]
self.stubs.Set(db, "aggregate_get_by_host",
fake_aggregate_get_by_host)
self.conn.check_can_live_migrate_destination(self.context,
{'host': 'host'}, False, False)
def test_check_can_live_migrate_fails(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_aggregate_get_by_host(context, host, key=None):
self.assertEqual(CONF.host, host)
return [dict(test_aggregate.fake_aggregate,
metadetails={"dest_other": "test_host_uuid"})]
self.stubs.Set(db, "aggregate_get_by_host",
fake_aggregate_get_by_host)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'}, None, None)
def test_live_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_get_host_opaque_ref(context, destination_hostname):
return "fake_host"
self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def post_method(context, instance, destination_hostname,
block_migration, migrate_data):
post_method.called = True
self.conn.live_migration(self.conn, None, None, post_method, None)
self.assertTrue(post_method.called, "post_method.called")
def test_live_migration_on_failure(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_get_host_opaque_ref(context, destination_hostname):
return "fake_host"
self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def fake_call_xenapi(*args):
raise NotImplementedError()
self.stubs.Set(self.conn._vmops._session, "call_xenapi",
fake_call_xenapi)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
self.assertRaises(NotImplementedError, self.conn.live_migration,
self.conn, None, None, None, recover_method)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migration_calls_post_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def post_method(context, instance, destination_hostname,
block_migration, migrate_data):
post_method.called = True
# pass block_migration = True and migrate data
migrate_data = {"destination_sr_ref": "foo",
"migrate_send_data": "bar"}
self.conn.live_migration(self.conn, None, None, post_method, None,
True, migrate_data)
self.assertTrue(post_method.called, "post_method.called")
def test_live_migration_block_cleans_srs(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def fake_get_iscsi_srs(context, instance):
return ['sr_ref']
self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
def fake_forget_sr(context, instance):
fake_forget_sr.called = True
self.stubs.Set(volume_utils, "forget_sr",
fake_forget_sr)
def post_method(context, instance, destination_hostname,
block_migration, migrate_data):
post_method.called = True
migrate_data = {"destination_sr_ref": "foo",
"migrate_send_data": "bar"}
self.conn.live_migration(self.conn, None, None, post_method, None,
True, migrate_data)
self.assertTrue(post_method.called, "post_method.called")
self.assertTrue(fake_forget_sr.called, "forget_sr.called")
def test_live_migration_with_block_migration_raises_invalid_param(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
# pass block_migration = True and no migrate data
self.assertRaises(exception.InvalidParameterValue,
self.conn.live_migration, self.conn,
None, None, None, recover_method, True, None)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migration_with_block_migration_fails_migrate_send(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
# pass block_migration = True and migrate data
migrate_data = dict(destination_sr_ref='foo', migrate_send_data='bar')
self.assertRaises(exception.MigrationError,
self.conn.live_migration, self.conn,
None, None, None, recover_method, True, migrate_data)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migrate_block_migration_xapi_call_parameters(self):
fake_vdi_map = object()
class Session(xenapi_fake.SessionBase):
def VM_migrate_send(self_, session, vmref, migrate_data, islive,
vdi_map, vif_map, options):
self.assertEqual('SOMEDATA', migrate_data)
self.assertEqual(fake_vdi_map, vdi_map)
stubs.stubout_session(self.stubs, Session)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(conn)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
return fake_vdi_map
self.stubs.Set(conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
def dummy_callback(*args, **kwargs):
pass
conn.live_migration(
self.context, instance=dict(name='ignore'), dest=None,
post_method=dummy_callback, recover_method=dummy_callback,
block_migration="SOMEDATA",
migrate_data=dict(migrate_send_data='SOMEDATA',
destination_sr_ref="TARGET_SR_OPAQUE_REF"))
def test_live_migrate_pool_migration_xapi_call_parameters(self):
class Session(xenapi_fake.SessionBase):
def VM_pool_migrate(self_, session, vm_ref, host_ref, options):
self.assertEqual("fake_ref", host_ref)
self.assertEqual({"live": "true"}, options)
raise IOError()
stubs.stubout_session(self.stubs, Session)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(conn)
def fake_get_host_opaque_ref(context, destination):
return "fake_ref"
self.stubs.Set(conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def dummy_callback(*args, **kwargs):
pass
self.assertRaises(IOError, conn.live_migration,
self.context, instance=dict(name='ignore'), dest=None,
post_method=dummy_callback, recover_method=dummy_callback,
block_migration=False, migrate_data={})
def test_generate_vdi_map(self):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = "fake_vm_ref"
def fake_find_sr(_session):
self.assertEqual(conn._session, _session)
return "source_sr_ref"
self.stubs.Set(vm_utils, "safe_find_sr", fake_find_sr)
def fake_get_instance_vdis_for_sr(_session, _vm_ref, _sr_ref):
self.assertEqual(conn._session, _session)
self.assertEqual(vm_ref, _vm_ref)
self.assertEqual("source_sr_ref", _sr_ref)
return ["vdi0", "vdi1"]
self.stubs.Set(vm_utils, "get_instance_vdis_for_sr",
fake_get_instance_vdis_for_sr)
result = conn._vmops._generate_vdi_map("dest_sr_ref", vm_ref)
self.assertEqual({"vdi0": "dest_sr_ref",
"vdi1": "dest_sr_ref"}, result)
def test_rollback_live_migration_at_destination(self):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(conn, "destroy") as mock_destroy:
conn.rollback_live_migration_at_destination("context",
"instance", [], {'block_device_mapping': []})
self.assertFalse(mock_destroy.called)
class XenAPIInjectMetadataTestCase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(XenAPIInjectMetadataTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.xenstore = dict(persist={}, ephem={})
self.called_fake_get_vm_opaque_ref = False
def fake_get_vm_opaque_ref(inst, instance):
self.called_fake_get_vm_opaque_ref = True
if instance["uuid"] == "not_found":
raise exception.NotFound
self.assertEqual(instance, {'uuid': 'fake'})
return 'vm_ref'
def fake_add_to_param_xenstore(inst, vm_ref, key, val):
self.assertEqual(vm_ref, 'vm_ref')
self.xenstore['persist'][key] = val
def fake_remove_from_param_xenstore(inst, vm_ref, key):
self.assertEqual(vm_ref, 'vm_ref')
if key in self.xenstore['persist']:
del self.xenstore['persist'][key]
def fake_write_to_xenstore(inst, instance, path, value, vm_ref=None):
self.assertEqual(instance, {'uuid': 'fake'})
self.assertEqual(vm_ref, 'vm_ref')
self.xenstore['ephem'][path] = jsonutils.dumps(value)
def fake_delete_from_xenstore(inst, instance, path, vm_ref=None):
self.assertEqual(instance, {'uuid': 'fake'})
self.assertEqual(vm_ref, 'vm_ref')
if path in self.xenstore['ephem']:
del self.xenstore['ephem'][path]
self.stubs.Set(vmops.VMOps, '_get_vm_opaque_ref',
fake_get_vm_opaque_ref)
self.stubs.Set(vmops.VMOps, '_add_to_param_xenstore',
fake_add_to_param_xenstore)
self.stubs.Set(vmops.VMOps, '_remove_from_param_xenstore',
fake_remove_from_param_xenstore)
self.stubs.Set(vmops.VMOps, '_write_to_xenstore',
fake_write_to_xenstore)
self.stubs.Set(vmops.VMOps, '_delete_from_xenstore',
fake_delete_from_xenstore)
def test_inject_instance_metadata(self):
# Add some system_metadata to ensure it doesn't get added
# to xenstore
instance = dict(metadata=[{'key': 'a', 'value': 1},
{'key': 'b', 'value': 2},
{'key': 'c', 'value': 3},
# Check xenstore key sanitizing
{'key': 'hi.there', 'value': 4},
{'key': 'hi!t.e/e', 'value': 5}],
# Check xenstore key sanitizing
system_metadata=[{'key': 'sys_a', 'value': 1},
{'key': 'sys_b', 'value': 2},
{'key': 'sys_c', 'value': 3}],
uuid='fake')
self.conn._vmops._inject_instance_metadata(instance, 'vm_ref')
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/hi_there': '4',
'vm-data/user-metadata/hi_t_e_e': '5',
},
'ephem': {},
})
def test_change_instance_metadata_add(self):
# Test XenStore key sanitizing here, too.
diff = {'test.key': ['+', 4]}
instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/test_key': '4',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/test_key': '4',
},
})
def test_change_instance_metadata_update(self):
diff = dict(b=['+', 4])
instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '4',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '4',
'vm-data/user-metadata/c': '3',
},
})
def test_change_instance_metadata_delete(self):
diff = dict(b=['-'])
instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/c': '3',
},
})
def test_change_instance_metadata_not_found(self):
instance = {'uuid': 'not_found'}
self.conn._vmops.change_instance_metadata(instance, "fake_diff")
self.assertTrue(self.called_fake_get_vm_opaque_ref)
class XenAPISessionTestCase(test.NoDBTestCase):
def _get_mock_xapisession(self, software_version):
class MockXapiSession(xenapi_session.XenAPISession):
def __init__(_ignore):
"Skip the superclass's dirty init"
def _get_software_version(_ignore):
return software_version
return MockXapiSession()
def test_local_session(self):
session = self._get_mock_xapisession({})
session.is_local_connection = True
session.XenAPI = self.mox.CreateMockAnything()
session.XenAPI.xapi_local().AndReturn("local_connection")
self.mox.ReplayAll()
self.assertEqual("local_connection",
session._create_session("unix://local"))
def test_remote_session(self):
session = self._get_mock_xapisession({})
session.is_local_connection = False
session.XenAPI = self.mox.CreateMockAnything()
session.XenAPI.Session("url").AndReturn("remote_connection")
self.mox.ReplayAll()
self.assertEqual("remote_connection", session._create_session("url"))
def test_get_product_version_product_brand_does_not_fail(self):
session = self._get_mock_xapisession({
'build_number': '0',
'date': '2012-08-03',
'hostname': 'komainu',
'linux': '3.2.0-27-generic',
'network_backend': 'bridge',
'platform_name': 'XCP_Kronos',
'platform_version': '1.6.0',
'xapi': '1.3',
'xen': '4.1.2',
'xencenter_max': '1.10',
'xencenter_min': '1.10'
})
self.assertEqual(
((1, 6, 0), None),
session._get_product_version_and_brand()
)
def test_get_product_version_product_brand_xs_6(self):
session = self._get_mock_xapisession({
'product_brand': 'XenServer',
'product_version': '6.0.50',
'platform_version': '0.0.1'
})
self.assertEqual(
((6, 0, 50), 'XenServer'),
session._get_product_version_and_brand()
)
def test_verify_plugin_version_same(self):
session = self._get_mock_xapisession({})
session.PLUGIN_REQUIRED_VERSION = '2.4'
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
session.call_plugin_serialized('nova_plugin_version', 'get_version',
).AndReturn("2.4")
self.mox.ReplayAll()
session._verify_plugin_version()
def test_verify_plugin_version_compatible(self):
session = self._get_mock_xapisession({})
session.XenAPI = xenapi_fake.FakeXenAPI()
session.PLUGIN_REQUIRED_VERSION = '2.4'
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
session.call_plugin_serialized('nova_plugin_version', 'get_version',
).AndReturn("2.5")
self.mox.ReplayAll()
session._verify_plugin_version()
def test_verify_plugin_version_bad_maj(self):
session = self._get_mock_xapisession({})
session.XenAPI = xenapi_fake.FakeXenAPI()
session.PLUGIN_REQUIRED_VERSION = '2.4'
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
session.call_plugin_serialized('nova_plugin_version', 'get_version',
).AndReturn("3.0")
self.mox.ReplayAll()
self.assertRaises(xenapi_fake.Failure, session._verify_plugin_version)
def test_verify_plugin_version_bad_min(self):
session = self._get_mock_xapisession({})
session.XenAPI = xenapi_fake.FakeXenAPI()
session.PLUGIN_REQUIRED_VERSION = '2.4'
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
session.call_plugin_serialized('nova_plugin_version', 'get_version',
).AndReturn("2.3")
self.mox.ReplayAll()
self.assertRaises(xenapi_fake.Failure, session._verify_plugin_version)
def test_verify_current_version_matches(self):
session = self._get_mock_xapisession({})
# Import the plugin to extract its version
path = os.path.dirname(__file__)
rel_path_elem = "../../../../../plugins/xenserver/xenapi/etc/xapi.d/" \
"plugins/nova_plugin_version"
for elem in rel_path_elem.split('/'):
path = os.path.join(path, elem)
path = os.path.realpath(path)
plugin_version = None
with open(path) as plugin_file:
for line in plugin_file:
if "PLUGIN_VERSION = " in line:
plugin_version = line.strip()[17:].strip('"')
self.assertEqual(session.PLUGIN_REQUIRED_VERSION,
plugin_version)
class XenAPIFakeTestCase(test.NoDBTestCase):
def test_query_matches(self):
record = {'a': '1', 'b': '2', 'c_d': '3'}
tests = {'field "a"="1"': True,
'field "b"="2"': True,
'field "b"="4"': False,
'not field "b"="4"': True,
'field "a"="1" and field "b"="4"': False,
'field "a"="1" or field "b"="4"': True,
'field "c__d"="3"': True,
'field \'b\'=\'2\'': True,
}
for query in tests.keys():
expected = tests[query]
fail_msg = "for test '%s'" % query
self.assertEqual(xenapi_fake._query_matches(record, query),
expected, fail_msg)
def test_query_bad_format(self):
record = {'a': '1', 'b': '2', 'c': '3'}
tests = ['"a"="1" or "b"="4"',
'a=1',
]
for query in tests:
fail_msg = "for test '%s'" % query
self.assertFalse(xenapi_fake._query_matches(record, query),
fail_msg)
| {
"content_hash": "ddcaadeb60ab5ff195a2e2507da9a944",
"timestamp": "",
"source": "github",
"line_count": 4164,
"max_line_length": 79,
"avg_line_length": 42.50504322766571,
"alnum_prop": 0.5620455277386985,
"repo_name": "apporc/nova",
"id": "284fd7df30b73a56b4666c5315b5d87b7cf0c914",
"size": "177611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/unit/virt/xenapi/test_xenapi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16560867"
},
{
"name": "Shell",
"bytes": "24210"
},
{
"name": "Smarty",
"bytes": "335237"
}
],
"symlink_target": ""
} |
import json
import logging
import ochopod
import os
import tempfile
import threading
import time
import shutil
from argparse import ArgumentParser
from copy import deepcopy
from ochopod.api import Binding, LifeCycle, Model, Tool
from ochopod.core.core import Coordinator
from ochopod.core.fsm import diagnostic, shutdown, spin_lock
from ochopod.core.utils import shell
from ochopod.models.reactive import Actor as Reactive
from os import path
from pykka import ThreadingFuture
from pykka.exceptions import Timeout, ActorDeadError
from flask import Flask, request
from requests import post
from urlparse import urlparse
from werkzeug.exceptions import default_exceptions, HTTPException
#: Our ochopod logger.
logger = logging.getLogger('ochopod')
class Marathon(Binding):
"""
Mesosphere/Marathon framework abstract binding, providing some basic environment variable translation (especially
the port mappings). We run a Flask micro-server to handle leader or CLI requests.
The pod requires configuration settings from the environment variables. All settings are simple key/value
pairs prefixed by *ochopod*. These are optional settings you may specify (e.g you can set them in your application
configuration):
- *ochopod_cluster*: identifier for the cluster to run this pod under (e.g "database" or "web-server"
for instance, defaulted to the Marathon application identifier if not specified).
- *ochopod_debug*: turns debug logging on if set to "true".
- *ochopod_namespace*: namespace as dot separated tokens (e.g "my-app.staging"), defaulted to "marathon".
- *ochopod_port*: pod control port on which we listen for HTTP requests, defaulted to 8080.
- *ochopod_zk*: location of ZK ensemble, default to an empty string. This string must be a well formed ZK URL
for instance zk://127.0.0.1:2181
The following payload is registered by the pod at boot time:
- **cluster**: the pod cluster
- **namespace**: the pod namespace
- **binding**: set to *mesos+marathon*
- **ports**: exposed ports, as a dict
- **port**: local control port
- **debug**: true if debug logging is on
- **application**: controlling Marathon application identifier
- **task**: underlying Mesos task identifier
- **seq**: unique pod index within the cluster
- **node**: resource id of the underlying node running the container.
- **ip**: local IPv4 for the resource on which the pod is running.
- **public**: externally reachable resource IPv4 (used for the CLI or 3rd party integrations if applicable).
- **zk**: connection string for our ZK ensemble.
"""
def get_node_details(self):
raise NotImplementedError
def boot(self, lifecycle, model=Reactive, tools=None, local=False):
#
# - quick check to make sure we get the right implementations
#
assert issubclass(model, Model), 'model must derive from ochopod.api.Model'
assert issubclass(lifecycle, LifeCycle), 'lifecycle must derive from ochopod.api.LifeCycle'
#
# - instantiate our flask endpoint
# - default to a json handler for all HTTP errors (including an unexpected 500)
#
def _handler(error):
http = error.code if isinstance(error, HTTPException) else 500
return '{}', http, {'Content-Type': 'application/json; charset=utf-8'}
web = Flask(__name__)
for code in default_exceptions.iterkeys():
web.error_handler_spec[None][code] = _handler
#
# - default presets in case we run outside of marathon (local vm testing)
# - any environment variable prefixed with "ochopod." is of interest for us (e.g this is what the user puts
# in the marathon application configuration for instance)
# - the other settings come from marathon (namely the port bindings & application/task identifiers)
# - the MESOS_TASK_ID is important to keep around to enable task deletion via the marathon REST API
#
env = \
{
'ochopod_application': '',
'ochopod_cluster': 'default',
'ochopod_debug': 'true',
'ochopod_local': 'false',
'ochopod_namespace': 'marathon',
'ochopod_port': '8080',
'ochopod_start': 'true',
'ochopod_task': '',
'ochopod_zk': '',
'PORT_8080': '8080'
}
env.update(os.environ)
ochopod.enable_cli_log(debug=env['ochopod_debug'] == 'true')
try:
#
# - grab our environment variables (which are set by the marathon executor)
# - extract the mesos PORT_* bindings and construct a small remapping dict
#
ports = {}
logger.debug('environment ->\n%s' % '\n'.join(['\t%s -> %s' % (k, v) for k, v in env.items()]))
for key, val in env.items():
if key.startswith('PORT_'):
ports[key[5:]] = int(val)
#
# - keep any "ochopod_" environment variable & trim its prefix
# - default all our settings, especially the mandatory ones
# - the ip and zookeeper are defaulted to localhost to enable easy testing
#
hints = {k[8:]: v for k, v in env.items() if k.startswith('ochopod_')}
if local or hints['local'] == 'true':
#
# - we are running in local mode (e.g on a dev workstation)
# - default everything to localhost
#
logger.info('running in local mode (make sure you run a standalone zookeeper)')
hints.update(
{
'fwk': 'marathon (debug)',
'ip': '127.0.0.1',
'node': 'local',
'ports': ports,
'public': '127.0.0.1',
'zk': '127.0.0.1:2181'
})
else:
#
# - extend our hints
# - add the application + task
#
hints.update(
{
'application': env['MARATHON_APP_ID'][1:],
'fwk': 'marathon',
'ip': '',
'node': '',
'ports': ports,
'public': '',
'task': env['MESOS_TASK_ID'],
'zk': ''
})
#
# - use whatever subclass is implementing us to infer 'ip', 'node' and 'public'
#
hints.update(self.get_node_details())
#
# - lookup for the zookeeper connection string from environment variable or on disk
# - we have to look into different places depending on how mesos was installed
#
def _1():
#
# - most recent DCOS release
# - $MESOS_MASTER is located in /opt/mesosphere/etc/mesos-slave-common
# - the snippet in there is prefixed by MESOS_MASTER=zk://<ip:port>/mesos
#
logger.debug('checking /opt/mesosphere/etc/mesos-slave-common...')
_, lines = shell("grep MESOS_MASTER /opt/mesosphere/etc/mesos-slave-common")
return lines[0][13:]
def _2():
#
# - same as above except for slightly older DCOS releases
# - $MESOS_MASTER is located in /opt/mesosphere/etc/mesos-slave
#
logger.debug('checking /opt/mesosphere/etc/mesos-slave...')
_, lines = shell("grep MESOS_MASTER /opt/mesosphere/etc/mesos-slave")
return lines[0][13:]
def _3():
#
# - a regular package install will write the slave settings under /etc/mesos/zk (the snippet in
# there looks like zk://10.0.0.56:2181/mesos)
#
logger.debug('checking /etc/mesos/zk...')
_, lines = shell("cat /etc/mesos/zk")
return lines[0]
def _4():
#
# - look for ZK from environment variables
# - user can pass down ZK using $ochopod_zk
# - this last-resort situation is used mostly for debugging
#
logger.debug('checking $ochopod_zk environment variable...')
return env['ochopod_zk']
#
# - depending on how the slave has been installed we might have to look in various places
# to find out what our zookeeper connection string is
# - use urlparse to keep the host:port part of the URL (possibly including a login+password)
#
for method in [_1, _2, _3, _4]:
try:
hints['zk'] = urlparse(method()).netloc
break
except Exception:
pass
#
# - the cluster must be fully qualified with a namespace (which is defaulted anyway)
#
assert hints['zk'], 'unable to determine where zookeeper is located (unsupported/bogus mesos setup ?)'
assert hints['cluster'] and hints['namespace'], 'no cluster and/or namespace defined (user error ?)'
#
# - load the tools
#
if tools:
tools = {tool.tag: tool for tool in [clz() for clz in tools if issubclass(clz, Tool)] if tool.tag}
logger.info('supporting tools %s' % ', '.join(tools.keys()))
#
# - start the life-cycle actor which will pass our hints (as a json object) to its underlying sub-process
# - start our coordinator which will connect to zookeeper and attempt to lead the cluster
# - upon grabbing the lock the model actor will start and implement the configuration process
# - the hints are a convenient bag for any data that may change at runtime and needs to be returned (via
# the HTTP POST /info request)
# - what's being registered in zookeeper is immutable though and decorated with additional details by
# the coordinator (especially the pod index which is derived from zookeeper)
#
latch = ThreadingFuture()
logger.info('starting %s.%s (marathon) @ %s' % (hints['namespace'], hints['cluster'], hints['node']))
breadcrumbs = deepcopy(hints)
hints['metrics'] = {}
hints['dependencies'] = model.depends_on
env.update({'ochopod': json.dumps(hints)})
executor = lifecycle.start(env, latch, hints)
coordinator = Coordinator.start(
hints['zk'].split(','),
hints['namespace'],
hints['cluster'],
int(hints['port']),
breadcrumbs,
model,
hints)
#
# - external hook forcing a coordinator reset
# - this will force a re-connection to zookeeper and pod registration
# - please note this will not impact the pod lifecycle (e.g the underlying sub-process will be
# left running)
#
@web.route('/reset', methods=['POST'])
def _reset():
logger.debug('http in -> /reset')
coordinator.tell({'request': 'reset'})
return '{}', 200, {'Content-Type': 'application/json; charset=utf-8'}
#
# - external hook exposing information about our pod
# - this is a subset of what's registered in zookeeper at boot-time
# - the data is dynamic and updated from time to time by the model and executor actors
# - from @pferro -> the pod's dependencies defined in the model are now added as well
#
@web.route('/info', methods=['POST'])
def _info():
logger.debug('http in -> /info')
keys = \
[
'application',
'dependencies',
'ip',
'metrics',
'node',
'port',
'ports',
'process',
'public',
'state',
'status',
'task'
]
subset = dict(filter(lambda i: i[0] in keys, hints.iteritems()))
return json.dumps(subset), 200, {'Content-Type': 'application/json; charset=utf-8'}
#
# - external hook exposing our circular log
# - reverse and dump ochopod.log as a json array
#
@web.route('/log', methods=['POST'])
def _log():
logger.debug('http in -> /log')
with open(ochopod.LOG, 'r+') as log:
lines = [line for line in log]
return json.dumps(lines), 200, {'Content-Type': 'application/json; charset=utf-8'}
#
# - RPC call to run a custom tool within the pod
#
@web.route('/exec', methods=['POST'])
def _exec():
logger.debug('http in -> /exec')
#
# - make sure the command (first token in the X-Shell header) maps to a tool
# - if no match abort on a 404
#
line = request.headers['X-Shell']
tokens = line.split(' ')
cmd = tokens[0]
if not tools or cmd not in tools:
return '{}', 404, {'Content-Type': 'application/json; charset=utf-8'}
code = 1
tool = tools[cmd]
#
# - make sure the parser does not sys.exit()
#
class _Parser(ArgumentParser):
def exit(self, status=0, message=None):
raise ValueError(message)
#
# - prep a temporary directory
# - invoke define_cmdline_parsing()
# - switch off parsing if NotImplementedError is raised
#
use_parser = 1
parser = _Parser(prog=tool.tag)
try:
tool.define_cmdline_parsing(parser)
except NotImplementedError:
use_parser = 0
tmp = tempfile.mkdtemp()
try:
#
# - parse the command line
# - upload any attachment
#
args = parser.parse_args(tokens[1:]) if use_parser else ' '.join(tokens[1:])
for tag, upload in request.files.items():
where = path.join(tmp, tag)
logger.debug('uploading %s @ %s' % (tag, tmp))
upload.save(where)
#
# - run the tool method
# - pass the temporary directory as well
#
logger.info('invoking "%s"' % line)
code, lines = tool.body(args, tmp)
except ValueError as failure:
lines = [parser.format_help() if failure.message is None else failure.message]
except Exception as failure:
lines = ['unexpected failure -> %s' % failure]
finally:
#
# - make sure to cleanup our temporary directory
#
shutil.rmtree(tmp)
out = \
{
'code': code,
'stdout': lines
}
return json.dumps(out), 200, {'Content-Type': 'application/json; charset=utf-8'}
#
# - web-hook used to receive requests from the leader or the CLI tools
# - those requests are passed down to the executor actor
# - any non HTTP 200 response is a failure
# - failure to acknowledge within the specified timeout will result in a HTTP 408 (REQUEST TIMEOUT)
# - attempting to send a control request to a dead pod will result in a HTTP 410 (GONE)
#
@web.route('/control/<task>', methods=['POST'])
@web.route('/control/<task>/<timeout>', methods=['POST'])
def _control(task, timeout='60'):
logger.debug('http in -> /control/%s' % task)
if task not in ['check', 'on', 'off', 'ok', 'kill', 'signal']:
#
# - fail on a HTTP 400 if the request is not supported
#
return '{}', 400, {'Content-Type': 'application/json; charset=utf-8'}
try:
ts = time.time()
latch = ThreadingFuture()
executor.tell({'request': task, 'latch': latch, 'data': request.data})
js, code = latch.get(timeout=int(timeout))
ms = time.time() - ts
logger.debug('http out -> HTTP %s (%d ms)' % (code, ms))
return json.dumps(js), code, {'Content-Type': 'application/json; charset=utf-8'}
except Timeout:
#
# - we failed to match the specified timeout
# - gracefully fail on a HTTP 408
#
return '{}', 408, {'Content-Type': 'application/json; charset=utf-8'}
except ActorDeadError:
#
# - the executor has been shutdown (probably after a /control/kill)
# - gracefully fail on a HTTP 410
#
return '{}', 410, {'Content-Type': 'application/json; charset=utf-8'}
#
# - internal hook required to shutdown the web-server
# - it's not possible to do it outside of a request handler
# - make sure this calls only comes from localhost (todo)
#
@web.route('/terminate', methods=['POST'])
def _terminate():
request.environ.get('werkzeug.server.shutdown')()
return '{}', 200, {'Content-Type': 'application/json; charset=utf-8'}
#
# - run werkzeug from a separate thread to avoid blocking the main one
# - we'll have to shut it down using a dedicated HTTP POST
#
class _Runner(threading.Thread):
def run(self):
web.run(host='0.0.0.0', port=int(hints['port']), threaded=True)
try:
#
# - block on the lifecycle actor until it goes down (usually after a /control/kill request)
#
_Runner().start()
spin_lock(latch)
logger.debug('pod is dead, idling')
while 1:
#
# - simply idle forever (since the framework would restart any container that terminates)
# - /log and /hints HTTP requests will succeed (and show the pod as being killed)
# - any control request will now fail
#
time.sleep(60.0)
finally:
#
# - when we exit the block first shutdown our executor (which may probably be already down)
# - then shutdown the coordinator to un-register from zookeeper
# - finally ask werkzeug to shutdown via a REST call
#
shutdown(executor)
shutdown(coordinator)
post('http://127.0.0.1:%s/terminate' % env['ochopod_port'])
except KeyboardInterrupt:
logger.fatal('CTRL-C pressed')
except Exception as failure:
logger.fatal('unexpected condition -> %s' % diagnostic(failure))
| {
"content_hash": "2f038af5895e4a4191a8b7dd962f8c9c",
"timestamp": "",
"source": "github",
"line_count": 499,
"max_line_length": 118,
"avg_line_length": 42.00400801603207,
"alnum_prop": 0.5025286259541984,
"repo_name": "autodesk-cloud/ochopod",
"id": "9a005e990abdb52914eecebb9b5944a77acb8266",
"size": "21565",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ochopod/frameworks/marathon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "136308"
}
],
"symlink_target": ""
} |
from scorpion.sharedobj import *
from scorpion.sql import *
from scorpion.sqlparser import *
from scorpion.db import *
parsed = parse_sql("select hr, avg(temp) from readings group by hr")
parsed.where.append("sensor in %s")
params = [('1', '2', '18')]
db = connect("intel")
obj = SharedObj(db, parsed=parsed, params=params)
for t in obj.get_filter_rows(['2004-03-05 12:00:00']):
print t
| {
"content_hash": "f7008c238265b47453af4a95624814bd",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 68,
"avg_line_length": 24.625,
"alnum_prop": 0.6979695431472082,
"repo_name": "sirrice/scorpion",
"id": "c92c983f106a41b11d0a0ddf6d509d88618ef7a1",
"size": "394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/sharedobj.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "389180"
},
{
"name": "R",
"bytes": "3041"
},
{
"name": "Shell",
"bytes": "1322"
}
],
"symlink_target": ""
} |
from automationclient import base
from automationclient import utils
class ListExtResource(base.Resource):
@property
def summary(self):
descr = self.description.strip()
if not descr:
return '??'
lines = descr.split("\n")
if len(lines) == 1:
return lines[0]
else:
return lines[0] + "..."
class ListExtManager(base.Manager):
resource_class = ListExtResource
def show_all(self):
return self._list("/extensions", 'extensions')
@utils.service_type('automation')
def do_list_extensions(client, _args):
"""
List all the os-api extensions that are available.
"""
extensions = client.list_extensions.show_all()
fields = ["Name", "Summary", "Alias", "Updated"]
utils.print_list(extensions, fields)
| {
"content_hash": "a2dc1e593ed3d34669405feadc96a771",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 54,
"avg_line_length": 25.59375,
"alnum_prop": 0.6214896214896215,
"repo_name": "StackOps/python-automationclient",
"id": "a694cfe9f3a08b0d1b20c3b2fee1f9f91620feed",
"size": "1449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "automationclient/v1_1/contrib/list_extensions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arc",
"bytes": "533"
},
{
"name": "CSS",
"bytes": "4176"
},
{
"name": "Python",
"bytes": "428565"
},
{
"name": "Shell",
"bytes": "12215"
}
],
"symlink_target": ""
} |
import os
import platform
import unittest
from textwrap import dedent
from nose.plugins.attrib import attr
from parameterized.parameterized import parameterized
from conans.client.build.cmake import CMake
from conans.model.version import Version
from conans.test.utils.deprecation import catch_deprecation_warning
from conans.test.utils.tools import TestClient
conanfile_py = """
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
build_policy="missing"
def package_info(self):
self.cpp_info.cxxflags = ["MyFlag1", "MyFlag2"]
self.cpp_info.cflags = ["-load", "C:\some\path"]
self.cpp_info.defines = ['MY_DEF=My" \string', 'MY_DEF2=My${} other \string']
"""
chatconanfile_py = """
from conans import ConanFile
class ChatConan(ConanFile):
name = "Chat"
version = "0.1"
requires = "Hello/0.1@lasote/testing"
build_policy="missing"
def package_info(self):
self.cpp_info.cxxflags = ["MyChatFlag1", "MyChatFlag2"]
"""
conanfile = """[requires]
Hello/0.1@lasote/testing
"""
cmake = """set(CMAKE_CXX_COMPILER_WORKS 1)
set(CMAKE_CXX_ABI_COMPILED 1)
project(MyHello CXX)
cmake_minimum_required(VERSION 2.8.12)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()
message(STATUS "CMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}")
message(STATUS "CONAN_CXX_FLAGS=${CONAN_CXX_FLAGS}")
message(STATUS "CMAKE_C_FLAGS=${CMAKE_C_FLAGS}")
message(STATUS "CONAN_C_FLAGS=${CONAN_C_FLAGS}")
message(STATUS "HELLO_CXX_FLAGS=${HELLO_FLAGS}")
message(STATUS "CHAT_CXX_FLAGS=${CHAT_FLAGS}")
message(STATUS "CONAN_DEFINES_HELLO=${CONAN_DEFINES_HELLO}")
message(STATUS "HELLO_DEFINES=${HELLO_DEFINES}")
"""
@attr("slow")
class CMakeFlagsTest(unittest.TestCase):
def _get_line(self, text, begin):
lines = str(text).splitlines()
begin = "-- %s=" % begin
line = [l for l in lines if l.startswith(begin)][0]
flags = line[len(begin):].strip()
self.assertNotIn("'", flags)
self.assertNotIn('"', flags)
return flags
@parameterized.expand([(True, ), (False, )])
def build_app_test(self, targets):
client = TestClient()
conanfile_py = """
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
def package_info(self):
self.cpp_info.defines = [r'MY_DEF=My${} $string', r'MY_DEF2=My$ other string']
"""
client.save({"conanfile.py": conanfile_py})
client.run("create . lasote/testing")
consumer = """from conans import ConanFile, CMake
import os
class App(ConanFile):
settings = "os", "compiler", "arch", "build_type"
requires = "Hello/0.1@lasote/testing"
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
self.run(os.sep.join([".", "bin", "myapp"]))
"""
cmake_app = """set(CMAKE_CXX_COMPILER_WORKS 1)
set(CMAKE_CXX_ABI_COMPILED 1)
project(MyHello CXX)
cmake_minimum_required(VERSION 2.8.12)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup(%s)
add_executable(myapp myapp.cpp)
conan_target_link_libraries(myapp)
""" % ("TARGETS" if targets else "")
myapp = r"""#include <iostream>
#define STRINGIFY(x) #x
#define STRINGIFYMACRO(y) STRINGIFY(y)
int main(){
std::cout << "Msg1: " << STRINGIFYMACRO(MY_DEF) << "\n";
std::cout << "Msg2: " << STRINGIFYMACRO(MY_DEF2) << "\n";
}"""
client.save({"conanfile.py": consumer,
"CMakeLists.txt": cmake_app,
"myapp.cpp": myapp
}, clean_first=True)
client.run("install .")
client.run("build .")
self.assertIn("Msg1: My${} $string", client.out)
self.assertIn("Msg2: My$ other string", client.out)
def flags_test(self):
client = TestClient()
client.save({"conanfile.py": conanfile_py})
client.run("export . lasote/testing")
client.save({"conanfile.txt": conanfile,
"CMakeLists.txt": cmake}, clean_first=True)
client.run('install . -g cmake')
generator = '-G "Visual Studio 15 Win64"' if platform.system() == "Windows" else ""
client.runner("cmake . %s" % generator, cwd=client.current_folder)
cmake_cxx_flags = self._get_line(client.user_io.out, "CMAKE_CXX_FLAGS")
self.assertTrue(cmake_cxx_flags.endswith("MyFlag1 MyFlag2"))
self.assertIn("CONAN_CXX_FLAGS=MyFlag1 MyFlag2", client.out)
self.assertIn("CMAKE_C_FLAGS= -load C:\some\path", client.out)
self.assertIn("CONAN_C_FLAGS=-load C:\some\path ", client.out)
self.assertIn('CONAN_DEFINES_HELLO=-DMY_DEF=My" \string;-DMY_DEF2=My${} other \string',
client.out)
def transitive_flags_test(self):
client = TestClient()
client.save({"conanfile.py": conanfile_py})
client.run("export . lasote/testing")
client.save({"conanfile.py": chatconanfile_py}, clean_first=True)
client.run("export . lasote/testing")
client.save({"conanfile.txt": conanfile.replace("Hello", "Chat"),
"CMakeLists.txt": cmake}, clean_first=True)
client.run('install . -g cmake')
generator = '-G "Visual Studio 15 Win64"' if platform.system() == "Windows" else ""
client.runner("cmake . %s" % generator, cwd=client.current_folder)
cmake_cxx_flags = self._get_line(client.user_io.out, "CMAKE_CXX_FLAGS")
self.assertTrue(cmake_cxx_flags.endswith("MyFlag1 MyFlag2 MyChatFlag1 MyChatFlag2"))
self.assertIn("CONAN_CXX_FLAGS=MyFlag1 MyFlag2 MyChatFlag1 MyChatFlag2",
client.user_io.out)
def targets_flags_test(self):
client = TestClient()
client.save({"conanfile.py": conanfile_py})
client.run("export . lasote/testing")
cmake_targets = cmake.replace("conan_basic_setup()",
"conan_basic_setup(TARGETS)\n"
"get_target_property(HELLO_FLAGS CONAN_PKG::Hello"
" INTERFACE_COMPILE_OPTIONS)\n"
"get_target_property(HELLO_DEFINES CONAN_PKG::Hello"
" INTERFACE_COMPILE_DEFINITIONS)")
client.save({"conanfile.txt": conanfile,
"CMakeLists.txt": cmake_targets},
clean_first=True)
client.run('install . -g cmake')
generator = '-G "Visual Studio 15 Win64"' if platform.system() == "Windows" else ""
client.runner("cmake . %s" % generator, cwd=client.current_folder)
cmake_cxx_flags = self._get_line(client.out, "CMAKE_CXX_FLAGS")
self.assertNotIn("My", cmake_cxx_flags)
self.assertIn("CONAN_CXX_FLAGS=MyFlag1 MyFlag2", client.out)
self.assertIn("HELLO_CXX_FLAGS=-load;C:\some\path;MyFlag1;MyFlag2;"
"$<$<CONFIG:Release>:;>;$<$<CONFIG:RelWithDebInfo>:;>;"
"$<$<CONFIG:MinSizeRel>:;>;$<$<CONFIG:Debug>:;>", client.out)
self.assertIn('HELLO_DEFINES=MY_DEF=My" \string;MY_DEF2=My${} other \string;', client.out)
def targets_own_flags_test(self):
client = TestClient()
client.save({"conanfile.py": conanfile_py.replace('version = "0.1"',
'version = "0.1"\n'
' settings = "compiler"')})
client.run("export . lasote/testing")
cmake_targets = cmake.replace("conan_basic_setup()",
"conan_basic_setup(TARGETS)\n"
"get_target_property(HELLO_FLAGS CONAN_PKG::Hello"
" INTERFACE_COMPILE_OPTIONS)\n"
"get_target_property(HELLO_DEFINES CONAN_PKG::Hello"
" INTERFACE_COMPILE_DEFINITIONS)")
client.save({"conanfile.txt": conanfile,
"CMakeLists.txt": cmake_targets},
clean_first=True)
client.run('install . -g cmake')
generator = '-G "Visual Studio 15 Win64"' if platform.system() == "Windows" else ""
client.runner("cmake . %s -DCONAN_CXX_FLAGS=CmdCXXFlag" % generator,
cwd=client.current_folder)
cmake_cxx_flags = self._get_line(client.user_io.out, "CMAKE_CXX_FLAGS")
self.assertNotIn("My", cmake_cxx_flags)
self.assertIn("CmdCXXFlag", cmake_cxx_flags)
self.assertIn("CONAN_CXX_FLAGS=MyFlag1 MyFlag2 CmdCXXFlag", client.user_io.out)
self.assertIn("HELLO_CXX_FLAGS=-load;C:\some\path;MyFlag1;MyFlag2;"
"$<$<CONFIG:Release>:;>;$<$<CONFIG:RelWithDebInfo>:;>;"
"$<$<CONFIG:MinSizeRel>:;>;$<$<CONFIG:Debug>:;>", client.out)
self.assertIn('HELLO_DEFINES=MY_DEF=My" \string;MY_DEF2=My${} other \string;', client.out)
def transitive_targets_flags_test(self):
client = TestClient()
client.save({"conanfile.py": conanfile_py})
client.run("export . lasote/testing")
client.save({"conanfile.py": chatconanfile_py}, clean_first=True)
client.run("export . lasote/testing")
cmake_targets = cmake.replace("conan_basic_setup()",
"conan_basic_setup(TARGETS)\n"
"get_target_property(HELLO_FLAGS CONAN_PKG::Hello"
" INTERFACE_COMPILE_OPTIONS)\n"
"get_target_property(CHAT_FLAGS CONAN_PKG::Chat"
" INTERFACE_COMPILE_OPTIONS)\n"
"get_target_property(HELLO_DEFINES CONAN_PKG::Hello"
" INTERFACE_COMPILE_DEFINITIONS)")
client.save({"conanfile.txt": conanfile.replace("Hello", "Chat"),
"CMakeLists.txt": cmake_targets},
clean_first=True)
client.run('install . -g cmake')
generator = '-G "Visual Studio 15 Win64"' if platform.system() == "Windows" else ""
client.runner("cmake . %s" % generator, cwd=client.current_folder)
cmake_cxx_flags = self._get_line(client.user_io.out, "CMAKE_CXX_FLAGS")
self.assertNotIn("My", cmake_cxx_flags)
self.assertIn("CONAN_CXX_FLAGS=MyFlag1 MyFlag2 MyChatFlag1 MyChatFlag2",
client.user_io.out)
self.assertIn("HELLO_CXX_FLAGS=-load;C:\some\path;MyFlag1;MyFlag2;"
"$<$<CONFIG:Release>:;>;$<$<CONFIG:RelWithDebInfo>:;>;"
"$<$<CONFIG:MinSizeRel>:;>;$<$<CONFIG:Debug>:;>", client.out)
self.assertIn("CHAT_CXX_FLAGS=MyChatFlag1;MyChatFlag2;"
"$<$<CONFIG:Release>:;>;$<$<CONFIG:RelWithDebInfo>:;>;"
"$<$<CONFIG:MinSizeRel>:;>;$<$<CONFIG:Debug>:;>", client.out)
self.assertIn('HELLO_DEFINES=MY_DEF=My" \string;MY_DEF2=My${} other \string;', client.out)
def cmake_test_needed_settings(self):
conanfile = """
import os
from conans import ConanFile, CMake
class MyLib(ConanFile):
name = "MyLib"
version = "0.1"
%s
def build(self):
cmake = CMake(self)
"""
for settings_line in ('', 'settings="arch"', 'settings="compiler"'):
client = TestClient()
client.save({"conanfile.py": conanfile % settings_line})
client.run("install .")
client.run("build .")
def cmake_shared_flag_test(self):
conanfile = """
import os
from conans import ConanFile, CMake
class MyLib(ConanFile):
name = "MyLib"
version = "0.1"
options = {"shared": [True, False]}
default_options= "shared=%s"
settings = "arch", "compiler"
def build(self):
cmake = CMake(self)
if self.options.shared:
assert(cmake.definitions["BUILD_SHARED_LIBS"] == "ON")
else:
assert(cmake.definitions["BUILD_SHARED_LIBS"] == "OFF")
"""
client = TestClient()
client.save({"conanfile.py": conanfile % "True"})
client.run("build .", assert_error=True)
self.assertIn("conanbuildinfo.txt file not found", client.user_io.out)
client.run("install .")
client.run("build .")
client.save({"conanfile.py": conanfile % "False"}, clean_first=True)
client.run("install .")
client.run("build .")
def std_flag_applied_test(self):
conanfile = """
import os
from conans import ConanFile, CMake
class MyLib(ConanFile):
name = "MyLib"
version = "0.1"
settings = "arch", "compiler", "cppstd"
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
"""
client = TestClient()
client.save({"conanfile.py": conanfile,
"mylib.cpp": "auto myfunc(){return 3;}", # c++14 feature
"CMakeLists.txt": """
set(CMAKE_CXX_COMPILER_WORKS 1)
set(CMAKE_CXX_ABI_COMPILED 1)
project(MyHello CXX)
cmake_minimum_required(VERSION 2.8.12)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()
add_library(mylib mylib.cpp)
target_link_libraries(mylib ${CONAN_LIBS})
"""})
if platform.system() != "Windows":
with catch_deprecation_warning(self):
client.run("install . --install-folder=build -s cppstd=gnu98")
client.run("build . --build-folder=build", assert_error=True)
self.assertIn("Error in build()", client.out)
# Now specify c++14
with catch_deprecation_warning(self):
client.run("install . --install-folder=build -s cppstd=gnu14")
client.run("build . --build-folder=build")
self.assertIn("CPP STANDARD: 14 WITH EXTENSIONS ON", client.out)
libname = "libmylib.a" if platform.system() != "Windows" else "mylib.lib"
libpath = os.path.join(client.current_folder, "build", "lib", libname)
self.assertTrue(os.path.exists(libpath))
with catch_deprecation_warning(self):
client.run("install . --install-folder=build -s cppstd=14")
client.run("build . --build-folder=build")
self.assertIn("CPP STANDARD: 14 WITH EXTENSIONS OFF", client.out)
self.assertNotIn("Conan setting CXX_FLAGS flags", client.out)
libname = "libmylib.a" if platform.system() != "Windows" else "mylib.lib"
libpath = os.path.join(client.current_folder, "build", "lib", libname)
self.assertTrue(os.path.exists(libpath))
def standard_20_as_cxx_flag_test(self):
# CMake (1-Jun-2018) do not support the 20 flag in CMAKE_CXX_STANDARD var
conanfile = """
import os
from conans import ConanFile, CMake
class MyLib(ConanFile):
name = "MyLib"
version = "0.1"
settings = "arch", "compiler", "cppstd"
exports_sources = "CMakeLists.txt"
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure()
"""
cmakelists = """
set(CMAKE_CXX_COMPILER_WORKS 1)
set(CMAKE_CXX_ABI_COMPILED 1)
project(MyHello CXX)
cmake_minimum_required(VERSION 2.8.12)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_set_std()
"""
client = TestClient()
client.save({"conanfile.py": conanfile,
"CMakeLists.txt": cmakelists})
def conan_set_std_branch():
# Replicate logic from cmake_common definition of 'macro(conan_set_std)'
cmake_version = CMake.get_version()
return cmake_version < Version("3.12")
with catch_deprecation_warning(self):
client.run("create . user/channel -s cppstd=gnu20 -s compiler=gcc -s compiler.version=8 "
"-s compiler.libcxx=libstdc++11")
if conan_set_std_branch():
self.assertIn("Conan setting CXX_FLAGS flags: -std=gnu++2a", client.out)
else:
self.assertIn("Conan setting CPP STANDARD: 20 WITH EXTENSIONS ON", client.out)
with catch_deprecation_warning(self):
client.run("create . user/channel -s cppstd=20 -s compiler=gcc -s compiler.version=8 "
"-s compiler.libcxx=libstdc++11")
if conan_set_std_branch():
self.assertIn("Conan setting CXX_FLAGS flags: -std=c++2a", client.out)
else:
self.assertIn("Conan setting CPP STANDARD: 20 WITH EXTENSIONS OFF", client.out)
def fpic_applied_test(self):
conanfile = """
import os
from conans import ConanFile, CMake
class MyLib(ConanFile):
name = "MyLib"
version = "0.1"
settings = "arch", "compiler"
options = {"fPIC": [True, False]}
default_options = "fPIC=False"
generators = "cmake"
exports_sources = "CMakeLists.txt"
def build(self):
cmake = CMake(self)
cmake.configure()
"""
cmakelists = """
set(CMAKE_CXX_COMPILER_WORKS 1)
set(CMAKE_CXX_ABI_COMPILED 1)
project(MyHello CXX)
cmake_minimum_required(VERSION 2.8.12)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()
"""
client = TestClient()
client.save({"conanfile.py": conanfile,
"CMakeLists.txt": cmakelists})
client.run("create . user/channel -o MyLib:fPIC=True")
self.assertIn("Conan: Adjusting fPIC flag (ON)", client.out)
client.run("create . user/channel -o MyLib:fPIC=False")
self.assertIn("Conan: Adjusting fPIC flag (OFF)", client.out)
client.save({"conanfile.py": conanfile.replace("fPIC", "fpic")}, clean_first=False)
client.run("create . user/channel -o MyLib:fpic=True")
self.assertNotIn("Conan: Adjusting fPIC flag (ON)", client.out)
# Skip fpic adjustements in basic setup
tmp = cmakelists.replace("conan_basic_setup()", "conan_basic_setup(SKIP_FPIC)")
client.save({"CMakeLists.txt": tmp, "conanfile.py": conanfile}, clean_first=True)
client.run("create . user/channel -o MyLib:fPIC=True")
self.assertNotIn("Conan: Adjusting fPIC flag", client.out)
def header_only_generator_test(self):
""" Test cmake.install() is possible although Generetaor could not be deduced from
settings
"""
conanfile = dedent("""
from conans import ConanFile, CMake
class TestConan(ConanFile):
name = "kk"
version = "1.0"
exports = "*"
def package(self):
cmake = CMake(self)
self.output.info("Configure command: %s" % cmake.command_line)
cmake.configure()
cmake.install()
""")
cmakelists = dedent("""
cmake_minimum_required(VERSION 3.3)
project(test)
install(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/include"
DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}")
""")
client = TestClient()
client.save({"conanfile.py": conanfile, "CMakeLists.txt": cmakelists, "include/file.h": ""})
client.run("create . danimtb/testing")
if platform.system() == "Windows":
self.assertIn("WARN: CMake generator could not be deduced from settings", client.out)
self.assertIn('Configure command: -DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="ON" '
'-DCMAKE_INSTALL_PREFIX=', client.out)
else:
self.assertIn('Configure command: -G "Unix Makefiles" -DCONAN_EXPORTED="1" '
'-DCONAN_IN_LOCAL_CACHE="ON" -DCMAKE_INSTALL_PREFIX=', client.out)
| {
"content_hash": "94cb10ec752b6d409a860ac6b6991ca1",
"timestamp": "",
"source": "github",
"line_count": 482,
"max_line_length": 101,
"avg_line_length": 40.892116182572614,
"alnum_prop": 0.5909690512430238,
"repo_name": "memsharded/conan",
"id": "eb2b43a0858554e580865612b90a87e1c354f4f3",
"size": "19710",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/test/functional/build_helpers/cmake_flags_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1100"
},
{
"name": "C",
"bytes": "264"
},
{
"name": "C++",
"bytes": "425"
},
{
"name": "CMake",
"bytes": "447"
},
{
"name": "Groovy",
"bytes": "12586"
},
{
"name": "Python",
"bytes": "4334185"
},
{
"name": "Shell",
"bytes": "1864"
}
],
"symlink_target": ""
} |
"""
CLASS: ContactHandler.py
FOR: This Class is responsible for handling Contact Us Page.
CREATED: 21 December 2013
MODIFIED: 21 December 2013
LOGS:
"""
# Import Statements
from BaseHandler import *
import Cookies
import EmailHandler
# Terms Class
class ContactHandler(BaseHandler):
def get(self):
loggedin = Cookies.validUserCookie(self.request.cookies.get('User'))
if loggedin:
self.render("contact.html")
else:
self.render('contactAnon.html')
def post(self):
formType = self.request.get('contactForm')
if formType == "Anonymous":
email = self.request.get('email')
subject = self.request.get('subject')
message = self.request.get('message')
ipAddress = self.request.remote_addr
EmailHandler.contactUsAnon(email, subject, message, ipAddress)
self.render('contactAnonSuccess.html')
elif formType == "LoggedIn":
subject = self.request.get('subject')
message = self.request.get('message')
ipAddress = self.request.remote_addr
user = Cookies.userFromCookie(self.request.cookies.get('User'))
if user:
EmailHandler.contactUs(user.personName, user.email, user.key.id(), subject, message, ipAddress)
self.render('contactSuccess.html')
else:
logging.critical("ERROR: Someone's Trying Something Funny w/ Contact Form")
self.redirect('/contact')
| {
"content_hash": "a14ce9409ec31d23570f7230572d7d9a",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 111,
"avg_line_length": 32.04,
"alnum_prop": 0.5973782771535581,
"repo_name": "mathee92/unirentalz",
"id": "7421be61d07b676aa59e46fe2a5958a09c8d94be",
"size": "1602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "playground/tv/ContactHandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "301153"
},
{
"name": "JavaScript",
"bytes": "332881"
},
{
"name": "Python",
"bytes": "338982"
}
],
"symlink_target": ""
} |
"""
The `~certbot_dns_route53.dns_route53` plugin automates the process of
completing a ``dns-01`` challenge (`~acme.challenges.DNS01`) by creating, and
subsequently removing, TXT records using the Amazon Web Services Route 53 API.
.. note::
The plugin is not installed by default. It can be installed by heading to
`certbot.eff.org <https://certbot.eff.org/instructions#wildcard>`_, choosing your system and
selecting the Wildcard tab.
Named Arguments
---------------
======================================== =====================================
``--dns-route53-propagation-seconds`` The number of seconds to wait for DNS
to propagate before asking the ACME
server to verify the DNS record.
(Default: 10)
======================================== =====================================
Credentials
-----------
Use of this plugin requires a configuration file containing Amazon Web Sevices
API credentials for an account with the following permissions:
* ``route53:ListHostedZones``
* ``route53:GetChange``
* ``route53:ChangeResourceRecordSets``
These permissions can be captured in an AWS policy like the one below. Amazon
provides `information about managing access <https://docs.aws.amazon.com/Route53
/latest/DeveloperGuide/access-control-overview.html>`_ and `information about
the required permissions <https://docs.aws.amazon.com/Route53/latest
/DeveloperGuide/r53-api-permissions-ref.html>`_
.. code-block:: json
:name: sample-aws-policy.json
:caption: Example AWS policy file:
{
"Version": "2012-10-17",
"Id": "certbot-dns-route53 sample policy",
"Statement": [
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones",
"route53:GetChange"
],
"Resource": [
"*"
]
},
{
"Effect" : "Allow",
"Action" : [
"route53:ChangeResourceRecordSets"
],
"Resource" : [
"arn:aws:route53:::hostedzone/YOURHOSTEDZONEID"
]
}
]
}
The `access keys <https://docs.aws.amazon.com/general/latest/gr
/aws-sec-cred-types.html#access-keys-and-secret-access-keys>`_ for an account
with these permissions must be supplied in one of the following ways, which are
discussed in more detail in the Boto3 library's documentation about `configuring
credentials <https://boto3.readthedocs.io/en/latest/guide/configuration.html
#best-practices-for-configuring-credentials>`_.
* Using the ``AWS_ACCESS_KEY_ID`` and ``AWS_SECRET_ACCESS_KEY`` environment
variables.
* Using a credentials configuration file at the default location,
``~/.aws/config``.
* Using a credentials configuration file at a path supplied using the
``AWS_CONFIG_FILE`` environment variable.
.. code-block:: ini
:name: config.ini
:caption: Example credentials config file:
[default]
aws_access_key_id=AKIAIOSFODNN7EXAMPLE
aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
.. caution::
You should protect these API credentials as you would a password. Users who
can read this file can use these credentials to issue some types of API calls
on your behalf, limited by the permissions assigned to the account. Users who
can cause Certbot to run using these credentials can complete a ``dns-01``
challenge to acquire new certificates or revoke existing certificates for
domains these credentials are authorized to manage.
Examples
--------
.. code-block:: bash
:caption: To acquire a certificate for ``example.com``
certbot certonly \\
--dns-route53 \\
-d example.com
.. code-block:: bash
:caption: To acquire a single certificate for both ``example.com`` and
``www.example.com``
certbot certonly \\
--dns-route53 \\
-d example.com \\
-d www.example.com
.. code-block:: bash
:caption: To acquire a certificate for ``example.com``, waiting 30 seconds
for DNS propagation
certbot certonly \\
--dns-route53 \\
--dns-route53-propagation-seconds 30 \\
-d example.com
"""
| {
"content_hash": "aa8da92b7791ba9dfddf8efcd40b361e",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 95,
"avg_line_length": 34.91129032258065,
"alnum_prop": 0.6225456225456225,
"repo_name": "letsencrypt/letsencrypt",
"id": "1b59f5620a2175fdd6f71e1806edefc93da72a73",
"size": "4329",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "certbot-dns-route53/certbot_dns_route53/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "50702"
},
{
"name": "Augeas",
"bytes": "5062"
},
{
"name": "Batchfile",
"bytes": "35037"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Makefile",
"bytes": "37309"
},
{
"name": "Nginx",
"bytes": "4274"
},
{
"name": "Python",
"bytes": "1336185"
},
{
"name": "Shell",
"bytes": "147823"
}
],
"symlink_target": ""
} |
"""
Created on Fri Aug 4 11:52:11 2017
@author: lracuna
"""
from vision.camera import *
from vision.plane import Plane
import vision.error_functions as ef
import gdescent.hpoints_gradient5 as gd5
from ippe import homo2d
## CREATE A SIMULATED CAMERA
cam = Camera()
cam.set_K(fx = 800,fy = 800,cx = 640,cy = 480)
cam.set_width_heigth(1280,960)
## DEFINE CAMERA POSE LOOKING STRAIGTH DOWN INTO THE PLANE MODEL
#cam.set_R_axisAngle(1.0, 0.0, 0.0, np.deg2rad(180.0))
#cam.set_t(0.0,-0.0,0.5, frame='world')
cam.set_R_axisAngle(1.0, 1.0, 0.0, np.deg2rad(130.0))
cam.set_t(0.0,-0.4,2.0, frame='world')
## Define a Display plane
pl = Plane(origin=np.array([0, 0, 0]), normal = np.array([0, 0, 1]), size=(0.3,0.3), n = (2,2))
pl.random(n =5, r = 0.001, min_sep = 0.001)
## CREATE A SET OF IMAGE POINTS FOR VALIDATION OF THE HOMOGRAPHY ESTIMATION
validation_plane = Plane(origin=np.array([0, 0, 0]), normal = np.array([0, 0, 1]), size=(0.5,0.5), n = (4,4))
validation_plane.uniform()
## we create the gradient for the point distribution
normalize= False
n = 0.000001 #condition number norm
gradient = gd5.create_gradient(metric='condition_number')
#gradient = gd5.create_gradient(metric='volker_metric')
#gradient = gd5.create_gradient(metric='pnorm_condition_number')
objectPoints_des = pl.get_points()
alpha=0.2
imagePoints_des = np.array(cam.project(objectPoints_des, False))
objectPoints_list = list()
imagePoints_list = list()
transfer_error_list = list()
condition_number_list = list()
normalized_condition_number_list = list()
new_objectPoints = objectPoints_des
for i in range(1000):
objectPoints = np.copy(new_objectPoints)
gradient = gd5.evaluate_gradient(gradient,objectPoints, np.array(cam.P))
#gradient = gd5.normalize_gradient(gradient)
new_objectPoints = gd5.update_points(gradient, objectPoints)#, limit = 3)
new_imagePoints = np.array(cam.project(new_objectPoints, False))
objectPoints_list.append(new_objectPoints)
imagePoints_list.append(new_imagePoints)
#plt.cla()
plt.figure('Image Points')
plt.ion()
if i==0:
plt.cla()
cam.plot_plane(pl)
plt.plot(imagePoints_des[0],imagePoints_des[1],'x',color = 'black',)
plt.xlim(0,1280)
plt.ylim(0,960)
plt.gca().invert_yaxis()
plt.axes().set_aspect('equal', 'datalim')
plt.cla()
cam.plot_plane(pl)
plt.plot(new_imagePoints[0],new_imagePoints[1],'.',color = 'blue',)
plt.pause(0.01)
plt.figure('Object Points')
plt.ion()
if i==0:
plt.cla()
plt.plot(objectPoints_des[0],objectPoints_des[1],'x',color = 'black',)
plt.axes().set_aspect('equal', 'datalim')
plt.plot(new_objectPoints[0],new_objectPoints[1],'.',color = 'blue',)
plt.pause(0.01)
Xo = np.copy(new_objectPoints[[0,1,3],:]) #without the z coordinate (plane)
Xi = np.copy(new_imagePoints)
Hnoisy,A_t_ref,H_t = homo2d.homography2d(Xo,Xi)
Aideal_norm = ef.calculate_A_matrix(Xo,Xi)
x1,y1,x2,y2,x3,y3,x4,y4,x5,y5 = gd5.extract_objectpoints_vars(new_objectPoints)
mat_cond_autrograd = gd5.matrix_condition_number_autograd(x1,y1,x2,y2,x3,y3,x4,y4,x5,y5,np.array(cam.P))
#volkerMetric = ef.volker_metric(Aideal)
#mat_cond = ef.get_matrix_pnorm_condition_number(Aideal)
#mat_cond = get_matrix_conditioning_number(Aideal)
#condition_number_list.append()
##HOMOGRAPHY ERRORS
## TRUE VALUE OF HOMOGRAPHY OBTAINED FROM CAMERA PARAMETERS
Hcam = cam.homography_from_Rt()
homography_iters = 100
##We add noise to the image points and calculate the noisy homography
transfer_error_sum = 0
for j in range(homography_iters):
new_imagePoints_noisy = cam.addnoise_imagePoints(new_imagePoints, mean = 0, sd = 2)
#Noisy homography calculation
Xo = new_objectPoints[[0,1,3],:]
Xi = new_imagePoints_noisy
Hnoisy,A_t_ref,H_t = homo2d.homography2d(Xo,Xi)
Hnoisy = Hnoisy/Hnoisy[2,2]
## ERRORS FOR THE NOISY HOMOGRAPHY
## VALIDATION OBJECT POINTS
validation_objectPoints =validation_plane.get_points()
validation_imagePoints = np.array(cam.project(validation_objectPoints, False))
Xo = np.copy(validation_objectPoints)
Xo = np.delete(Xo, 2, axis=0)
Xi = np.copy(validation_imagePoints)
transfer_error_sum += ef.validation_points_error(Xi, Xo, Hnoisy)
transfer_error_list.append(transfer_error_sum/homography_iters)
plt.figure("Average Transfer error")
plt.cla()
plt.ion()
plt.plot(transfer_error_list)
plt.pause(0.01)
print "Iteration: ", i
print "Mat cond Autograd: ", mat_cond_autrograd
#print "Mat cond:", mat_cond
#print "Volker Metric:", volkerMetric
print "dx1,dy1 :", gradient.dx1_eval,gradient.dy1_eval
print "dx2,dy2 :", gradient.dx2_eval,gradient.dy2_eval
print "dx3,dy3 :", gradient.dx3_eval,gradient.dy3_eval
print "dx4,dy4 :", gradient.dx4_eval,gradient.dy4_eval
print "dx5,dy5 :", gradient.dx5_eval,gradient.dy5_eval
print "------------------------------------------------------"
plt.figure('Image Points')
plt.plot(new_imagePoints[0],new_imagePoints[1],'.',color = 'red',)
| {
"content_hash": "4c73d654e5efaa9308132e8176826a0c",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 110,
"avg_line_length": 29.403508771929825,
"alnum_prop": 0.6883452665075577,
"repo_name": "raultron/ivs_sim",
"id": "59c82ffc235d65384b2448cbd1328439d2362060",
"size": "5075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/5points_gradientdescent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "281576"
}
],
"symlink_target": ""
} |
print "server initializing..."
import SimpleHTTPServer
import SocketServer
print "packages imported..."
PORT = 8000
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print "serving at port", PORT
httpd.serve_forever() | {
"content_hash": "84144e8f6ccda7abe877b9e414a9b5d8",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 51,
"avg_line_length": 19.714285714285715,
"alnum_prop": 0.7789855072463768,
"repo_name": "tomshen/draft-table",
"id": "4a8e1e23248447ae2e6147ae23cfdc5a69718037",
"size": "276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34751"
},
{
"name": "CoffeeScript",
"bytes": "14496"
},
{
"name": "JavaScript",
"bytes": "944870"
},
{
"name": "Python",
"bytes": "276"
},
{
"name": "Shell",
"bytes": "117"
}
],
"symlink_target": ""
} |
"""
Basic SparseCFProjection with associated sparse CFs and output,
response, and learning function. If sparse component cannot be imported,
SparseCFProjection will fall back to a basic dense CFProjection.
CFSOF and CFSLF Plugin function allow any single CF output function to
be applied to the sparse CFs, but may suffer a serious performance
loss. For real work, such functions should be implemented at the
Cython or C++ level.
"""
import numpy as np
import math
from scipy.ndimage.filters import gaussian_filter
import param
from copy import copy
import topo
from topo.base.cf import CFProjection, NullCFError, _create_mask, simple_vectorize
from topo import pattern
from imagen import patterngenerator
from imagen.patterngenerator import PatternGenerator
from topo.base.functionfamily import TransferFn, IdentityTF
from topo.base.functionfamily import LearningFn, Hebbian
from topo.base.functionfamily import ResponseFn, DotProduct
from topo.base.sheetcoords import Slice
use_sparse = True
try:
import sparse
except:
use_sparse = False
sparse_type = np.float32
class CFSPLF_Plugin(param.Parameterized):
"""CFSPLearningFunction applying the specified single_cf_fn to each Sparse CF."""
single_cf_fn = param.ClassSelector(LearningFn,default=Hebbian(),doc="""
Accepts a LearningFn that will be applied to each CF individually.""")
def constant_sum_connection_rate(self,n_units,learning_rate):
"""
Return the learning rate for a single connection assuming that
the total rate is to be divided evenly among all the units in
the connection field.
"""
return float(learning_rate)/n_units
def __call__(self, projection, **params):
"""Apply the specified single_cf_fn to every sparse CF."""
single_connection_learning_rate = self.constant_sum_connection_rate(projection.n_units,projection.learning_rate)
# avoid evaluating these references each time in the loop
single_cf_fn = self.single_cf_fn
for cf in projection.flatcfs:
temp_weights = cf.weights
single_cf_fn(cf.get_input_matrix(projection.src.activity),
projection.dest.activity.flat[cf.oned_idx], temp_weights,
single_connection_learning_rate)
temp_weights *= cf.mask
cf.weights = temp_weights
class CFSPOF_Plugin(param.Parameterized):
"""
Applies the specified single_cf_fn to each SparseCF in the SparseCFProjection.
"""
single_cf_fn = param.ClassSelector(TransferFn,default=IdentityTF(),
doc="Accepts a TransferFn that will be applied to each CF individually.")
def __call__(self, projection, **params):
if type(self.single_cf_fn) is not IdentityTF:
single_cf_fn = self.single_cf_fn
for cf in projection.flatcfs:
temp_weights = cf.weights
single_cf_fn(cf.weights)
cf.weights = temp_weights
del cf.norm_total
class CFSPOF_Prune(CFSPOF_Plugin):
"""
Prunes specified percentage of connections from CFs in SparseCFProjection
at specified interval.
"""
interval = param.Number(default=1000,bounds=(0,None),doc="""
Time interval at which pruning step will be applied.""")
percentile = param.Number(default=10.0,bounds=(0,100),doc="""
Percentile boundary below which connections will be pruned.""")
def __call__(self, projection, **params):
time = math.ceil(topo.sim.time())
if (time == 0):
if not hasattr(self,"initial_conns"):
self.initial_conns = {}
self.initial_conns[projection.name] = projection.n_conns()
elif (time % self.interval) == 0:
for cf in projection.flatcfs:
dim1,dim2 = cf.weights.shape
temp_weights = cf.weights
percentile = np.percentile(temp_weights[temp_weights.nonzero()],self.percentile)
temp_weights[np.where(temp_weights<=percentile)] = 0.0
cf.weights = temp_weights
projection.weights.prune()
self.message("%s has %f%% of initial connections" % (projection.name, (float(projection.n_conns())/self.initial_conns[projection.name])*100))
class CFSPOF_SproutRetract(CFSPOF_Plugin):
"""
Sprouting and retraction weights output function. At a preset time
interval, the function removes and adds connections based on a
piecewise function, which determines the number of connections to
alter and the sprouting and retraction ratios, eventually allowing
connections to converge on the target_sparsity. The function
ensures the full turnover_rate is applied at the maximal distances
from the target sparsity, i.e. at 0% and 100% density. As the
projection approaches the target sparsity, it will asymptote, but a
residual turnover will ensure that a fixed amount of connections
will continue to sprout and retract.
Retraction deletes the x lowest weights, while sprouting applies a
convolution with a Gaussian kernel to the existing connections,
growing connections at locations with the highest probabilities.
Still experimental and not scientifically validated.
"""
interval = param.Number(default=1000,bounds=(0,None),doc="""
Time interval between sprout/retract steps.""")
residual_turnover = param.Number(default=0.01,bounds=(0,1.0),doc="""
Constant turnover rate independent of current sparsity.""")
turnover_rate = param.Number(default=0.1,bounds=(0,1.0),doc="""
Percentage of weights to change per interval, assuming
currently fully dense and target is fully sparse.""")
target_sparsity = param.Number(default=0.15,bounds=(0,1.0),doc="""
Sparsity level at which sprouting and retraction cancel out.""")
kernel_sigma = param.Number(default=1.0,bounds=(0.0,10.0),doc="""
Gaussian spatial variance for weights to diffuse per interval.""")
disk_mask = param.Boolean(default=True,doc="""
Limits connection sprouting to a disk.""")
def __call__(self, projection, **params):
time = math.ceil(topo.sim.time())
if self.disk_mask:
self.disk = pattern.Disk(size=1.0,smoothing=0.0)
# Get CF and src sheet shapes
cf_x,cf_y = projection.dest.activity.shape
src_x,src_y = projection.src.activity.shape
# Initialize sparse triplet arrays
y_array = np.zeros((src_x*src_y*cf_y),dtype=np.int32)
x_array = np.zeros((src_x*src_y*cf_y),dtype=np.int32)
val_array = np.zeros((src_x*src_y*cf_y),dtype=sparse_type)
# Create new sparse matrix to accumulate into
sum_sparse = sparse.csarray_float(projection.src.activity.shape,projection.dest.activity.shape)
# Counters for logging
sprout_sum = 0; prune_sum = 0; unit_total = 0
self.mask_total = 0
if (time == 0):
if not hasattr(self,"initial_conns"):
self.initial_conns = {}
self.initial_conns[projection.name] = projection.n_conns()
elif (time % self.interval) == 0:
idx=0
for cidx,cf in enumerate(projection.flatcfs):
temp_weights = cf.weights
dense_unit_mask = (1.0 - (temp_weights>0.0))
dim1,dim2 = temp_weights.shape
sprout_count,prune_idx,nnz = self.calc_ratios(temp_weights)
self.prune(temp_weights,prune_idx)
nnz_pp = np.count_nonzero(temp_weights)
prune_sum += (nnz_pp-nnz)
self.sprout(temp_weights,dense_unit_mask,sprout_count)
nnz_ps = np.count_nonzero(temp_weights)
sprout_sum += nnz_ps - nnz_pp
unit_total += nnz_ps
# Populate sparse array chunk
temp_sparse = sparse.csarray_float(projection.src.activity.shape,projection.dest.activity.shape)
x1,x2,y1,y2 = cf.input_sheet_slice.tolist()
for cnx in range(dim1):
val_array[idx:idx+dim2] = temp_weights[cnx,:]
x_val = (x1+cnx) * src_y + y1
x_array[idx:idx+dim2] = range(x_val,x_val+dim2)
y_array[idx:idx+dim2] = cidx
idx += dim2
# Populate combined sparse array with sparse array chunk
if (cidx+1)%cf_y == 0:
nnz_idx = val_array.nonzero()
temp_sparse.setTriplets(x_array[nnz_idx],y_array[nnz_idx],val_array[nnz_idx])
sum_sparse += temp_sparse
x_array *= 0; y_array *= 0; val_array *= 0.0
idx=0
projection.weights = sum_sparse
del temp_sparse, sum_sparse
projection.weights.compress()
self.message("%s pruned by %d and sprouted %d, connection is now %f%% dense" % (projection.name,prune_sum,sprout_sum,(float(unit_total)/self.mask_total)*100))
def sprout(self, temp_weights, mask, sprout_count):
"""
Applies a Gaussian blur to the existing connection field,
selecting the n units with the highest probabilities to sprout
new connections, where n is set by the sprout_count. New
connections are initialized at the minimal strength of the
current CF.
"""
dim1,dim2 = temp_weights.shape
init_weight = temp_weights[temp_weights.nonzero()].min()
blurred_weights = gaussian_filter(temp_weights, sigma=self.kernel_sigma)
blurred_weights = (blurred_weights - blurred_weights.min()) / blurred_weights.max()
sprout_prob_map = (blurred_weights * np.random.rand(dim1,dim2)) * mask
if self.disk_mask:
sprout_prob_map *= self.disk(xdensity=dim2,ydensity=dim1)
sprout_inds = np.unravel_index(np.argsort(sprout_prob_map.flatten())[-sprout_count:],(dim1,dim2))
temp_weights[sprout_inds] = init_weight
def prune(self, temp_weights, prune_idx):
"""
Retracts n connections with the lowest weights, where n is
determined by the piecewise linear function in the calc_ratios
method.
"""
sorted_weights = np.sort(temp_weights.flatten())
threshold = sorted_weights[prune_idx]
temp_weights[temp_weights < threshold] = 0.0
def calc_ratios(self,temp_weights):
"""
Uses a piecewise linear function to determine the unit
proportion of sprouting and retraction and the associated
turnover rates.
Above the target sparsity the sprout/retract ratio scales
linearly up to maximal density, i.e. at full density 100% of
the turnover is put into retraction while at full sparsity
all the turnover is put into sprouting new connections. At
the target density sprouting and retraction are equal.
The turnover is determined also determined by the piecewise
linear function. At maximal distance from the target sparsity,
i.e. at full sparsity or density, the full turnover rate will
be used and as the target sparsity is approached from either
side this term decays to zero. Therefore, a residual turnover
is introduced to ensure that even at the target sparsity some
connections continue to sprout and retract.
"""
dim1,dim2 = temp_weights.shape
if self.disk_mask:
masked_units = len(self.disk(xdensity=dim2,ydensity=dim1).nonzero()[0])
else:
masked_units = dim1*dim2
self.mask_total += masked_units
max_units = dim1*dim2
nnz = np.count_nonzero(temp_weights)
cf_sparsity = nnz / float(masked_units)
delta_sparsity = cf_sparsity - self.target_sparsity
if delta_sparsity > 0:
relative_sparsity = delta_sparsity/(1.0 - self.target_sparsity)
else:
relative_sparsity = delta_sparsity/self.target_sparsity
# Total number of units to modify, broken down into units for pruning and sprouting
delta_units = (abs(self.turnover_rate * relative_sparsity) + self.residual_turnover) * masked_units
prune_factor = 0.5 + (0.5*relative_sparsity)
prune_count = int(delta_units * prune_factor)
prune_idx = (max_units-nnz)+prune_count
sprout_count = int(delta_units * (1-prune_factor))
return sprout_count, prune_idx, nnz
class CFSPRF_Plugin(param.Parameterized):
"""
Generic large-scale response function based on a simple single-CF function.
Applies the single_cf_fn to each CF in turn. For the default single_cf_fn
of DotProduct(), does a basic dot product of each CF with the corresponding
slice of the input array. This function is likely to be slow to run, but
it is easy to extend with any arbitrary single-CF response function.
The single_cf_fn must be a function f(X,W) that takes two identically
shaped matrices X (the input) and W (the CF weights) and computes a scalar
activation value based on those weights.
"""
single_cf_fn = param.ClassSelector(ResponseFn,default=DotProduct(),doc="""
Accepts a ResponseFn that will be applied to each CF individually.""")
def __call__(self, projection, **params):
single_cf_fn = self.single_cf_fn
for i,cf in enumerate(projection.flatcfs):
X = cf.input_sheet_slice.submatrix(projection.src.activity)
projection.activity.flat[i] = single_cf_fn(X,cf.weights)
projection.activity *= projection.strength
def compute_sparse_joint_norm_totals(projlist,active_units_mask=True):
"""
Compute norm_total for each CF in each projection from a group to be
normalized jointly.
"""
# Assumes that all Projections in the list have the same r,c size
assert len(projlist)>=1
joint_sum = np.zeros(projlist[0].dest.shape,dtype=np.float64)
for p in projlist:
if not p.has_norm_total:
p.norm_total *= 0.0
p.weights.CFWeightTotals(p.norm_total)
p.has_norm_total=True
joint_sum = np.add.reduce([proj.norm_total for proj in projlist],dtype=np.float64)
for p in projlist:
p.norm_total = joint_sum.copy()
def CFPOF_DivisiveNormalizeL1_Sparse(projection):
"""
Sparse CF Projection output function applying L1 divisive normalization
to individual CFs.
"""
if not projection.has_norm_total:
projection.norm_total *= 0.0
projection.weights.CFWeightTotals(projection.norm_total)
projection.weights.DivisiveNormalizeL1(projection.norm_total)
projection.has_norm_total = False
def CFPLF_Hebbian_Sparse(projection):
"""
Sparse CF Projection learning function applying Hebbian learning
to the weights in a projection.
"""
single_conn_lr = projection.learning_rate/projection.n_units
projection.norm_total *= 0.0
projection.weights.Hebbian(projection.src.activity,projection.dest.activity,
projection.norm_total,single_conn_lr)
projection.has_norm_total = True
def CFPLF_Hebbian_Sparse_opt(projection):
"""
Sparse CF Projection learning function, which calls an optimized Hebbian
learning function while skipping over inactive units.
"""
single_conn_lr = projection.learning_rate/projection.n_units
projection.norm_total *= 0.0
projection.weights.Hebbian_opt(projection.src.activity,projection.dest.activity,
projection.norm_total,single_conn_lr,projection.initialized)
projection.has_norm_total = True
def CFPRF_DotProduct_Sparse(projection):
"""
Sparse CF Projection response function calculating the dot-product
between incoming activities and CF weights.
"""
projection.weights.DotProduct(projection.strength, projection.input_buffer, projection.activity)
def CFPRF_DotProduct_Sparse_opt(projection):
"""
Sparse CF Projection response function calculating the dot-product
between incoming activities and CF weights. Optimization skips
inactive units if a certain percentage of neurons is inactive.
"""
nnz_ratio = np.count_nonzero(projection.src.activity) / len(projection.src.activity.flatten())
if nnz_ratio < 0.1:
projection.weights.DotProduct_opt(projection.strength, projection.src.activity, projection.activity)
else:
projection.weights.DotProduct(projection.strength, projection.src.activity, projection.activity)
class SparseConnectionField(param.Parameterized):
"""
A set of weights on one input Sheet.
Each ConnectionField contributes to the activity of one unit on
the output sheet, and is normally used as part of a Projection
including many other ConnectionFields.
"""
# ALERT: need bounds, more docs
x = param.Number(default=0.0,doc="Sheet X coordinate of CF")
y = param.Number(default=0.0,doc="Sheet Y coordinate of CF")
weights_generator = param.ClassSelector(PatternGenerator,
default=patterngenerator.Constant(),constant=True,doc="""
Generates initial weights values.""")
min_matrix_radius=param.Integer(default=1)
output_fns = param.HookList(default=[],class_=TransferFn,precedence=0.08,doc="""
Optional function(s) to apply to the pattern array after it has been created.
Can be used for normalization, thresholding, etc.""")
# Class attribute to switch to legacy weight generation if False
independent_weight_generation = True
def get_bounds(self,input_sheet=None):
if not input_sheet == None:
return self.input_sheet_slice.compute_bounds(input_sheet)
else:
return self.input_sheet_slice.compute_bounds(self.input_sheet)
def __get_shape_mask(self):
cf_shape = self.projection.cf_shape
bounds = self.projection.bounds_template
xdensity = self.projection.src.xdensity
ydensity = self.projection.src.xdensity
center_r,center_c = self.projection.src.sheet2matrixidx(0,0)
center_x,center_y = self.projection.src.matrixidx2sheet(center_r,center_c)
cf_mask = cf_shape(x=center_x,y=center_y,bounds=bounds,xdensity=xdensity,ydensity=ydensity)
return cf_mask
shape_mask = property(__get_shape_mask)
def __get_norm_total(self):
return self.projection.norm_total[self.matrix_idx[0],self.matrix_idx[1]]
def __set_norm_total(self,new_norm_total):
self.projection.norm_total[self.matrix_idx[0],self.matrix_idx[1]] = new_norm_total
def __del_norm_total(self):
self.projection.norm_total[self.matrix_idx[0],self.matrix_idx[1]] = 0.0
norm_total = property(__get_norm_total,__set_norm_total,__del_norm_total)
def __get_mask(self):
x1,x2,y1,y2 = self.input_sheet_slice.tolist()
mask = np.zeros((x2-x1,y2-y1),dtype=np.bool)
inds = np.ravel_multi_index(np.mgrid[x1:x2,y1:y2],self.projection.src.shape).flatten()
nz_flat = self.projection.weights[inds,self.oned_idx].toarray()
nz_inds = nz_flat.reshape(x2-x1,y2-y1).nonzero()
mask[nz_inds] = True
return mask
mask = property(__get_mask,
"""
The mask property returns an array of bools representing the
zero weights in the CF weights array.
It is useful when applying additive functions on the weights
array, to ensure zero values are not accidentally overwritten.
The mask cannot be changed via the property, only by changing
the weights directly.
""")
def __get_weights(self):
"""
get_weights accesses the sparse CF matrix and returns the CF
in dense form.
"""
x1,x2,y1,y2 = self.src_slice
inds = np.ravel_multi_index(np.mgrid[x1:x2,y1:y2],self.projection.src.shape).flatten()
return self.projection.weights[inds,self.oned_idx].toarray().reshape(x2-x1,y2-y1)
def __set_weights(self,arr):
"""
Takes an input array, which has to match the CF shape, and
creates an mgrid of the appropriate size, adds the proper
offsets and passes the values and indices to the sparse matrix
representation.
"""
x1,x2,y1,y2 = self.src_slice
(dim1,dim2) = arr.shape
assert (dim1,dim2) == (x2-x1,y2-y1), "Array does not match CF shape."
(x,y) = np.mgrid[0:dim1,0:dim2] # Create mgrid of CF size
x_ind = np.array(x)+x1; y_ind = np.array(y) + y1; # Add slice offsets
row_inds = np.ravel_multi_index((x_ind,y_ind),self.projection.src.shape).flatten().astype(np.int32)
col_inds = np.array([self.oned_idx]*len(row_inds),dtype=np.int32)
self.projection.weights.put(arr[x,y].flatten(),row_inds,col_inds)
weights = property(__get_weights,__set_weights)
def __init__(self,template,input_sheet,projection,label=None,**params):
"""
Initializes the CF object and stores meta information about the CF's
shape and position in the SparseCFProjection to allow for easier
initialization.
"""
super(SparseConnectionField,self).__init__(**params)
self.input_sheet = input_sheet
self.projection = projection
self.label = label
self.matrix_idx = self.projection.dest.sheet2matrixidx(self.x,self.y)
self.oned_idx = self.matrix_idx[0] * self.projection.dest.shape[1] + self.matrix_idx[1]
template = copy(template)
if not isinstance(template,Slice):
template = Slice(template,self.input_sheet,force_odd=True,
min_matrix_radius=self.min_matrix_radius)
self.weights_slice = self._create_input_sheet_slice(template)
self.src_slice = tuple(self.input_sheet_slice.tolist())
def _init_weights(self,mask_template):
if not hasattr(mask_template,'view'):
mask = _create_mask(mask_template,
self.weights_slice.compute_bounds(
self.input_sheet),
self.input_sheet,True,0.5)
mask = self.weights_slice.submatrix(mask_template)
mask = np.array(mask,copy=1)
pattern_params = dict(x=self.x,y=self.y,
bounds=self.get_bounds(self.input_sheet),
xdensity=self.input_sheet.xdensity,
ydensity=self.input_sheet.ydensity,
mask=mask)
controlled_weights = (param.Dynamic.time_dependent
and isinstance(param.Dynamic.time_fn,
param.Time)
and self.independent_weight_generation)
if controlled_weights:
with param.Dynamic.time_fn as t:
t(0) # Initialize at time zero.
# Controls random streams
label = '' if self.label is None else self.label
name = "%s_CF (%.5f, %.5f)" % (label, self.x, self.y)
w = self.weights_generator(**dict(pattern_params,
name=name))
else:
w = self.weights_generator(**pattern_params)
w = w.astype(sparse_type)
for of in self.output_fns:
of(w)
return w
def _create_input_sheet_slice(self,template):
"""
Create the input_sheet_slice, which provides the appropriate
Slice for this CF on the input_sheet (as well as providing
this CF's exact bounds).
Also creates the weights_slice, which provides the Slice for
this weights matrix (in case it must be cropped at an edge).
"""
# copy required because the template gets modified here but
# needs to be used again
input_sheet_slice = copy(template)
input_sheet_slice.positionedcrop(self.x,self.y,self.input_sheet)
input_sheet_slice.crop_to_sheet(self.input_sheet)
# weights matrix cannot have a zero-sized dimension (could
# happen at this stage because of cropping)
nrows,ncols = input_sheet_slice.shape_on_sheet()
if nrows<1 or ncols<1:
raise NullCFError(self.x,self.y,self.input_sheet,nrows,ncols)
self.input_sheet_slice = input_sheet_slice
# not copied because we don't use again
template.positionlesscrop(self.x,self.y,self.input_sheet)
return template
def get_input_matrix(self, activity):
return self.input_sheet_slice.submatrix(activity)
class SparseCFProjection(CFProjection):
"""
A projection composed of SparseConnectionFields from a Sheet into
a ProjectionSheet.
SparseCFProjection computes its activity using a response_fn which
can either be an optimized function implemented as part of the
sparse matrix class or an unoptimized function, which requests the
weights in dense format. The initial contents of the
SparseConnectionFields mapping from the input Sheet into the
target ProjectionSheet are controlled by the weights_generator,
cf_shape, and weights_output_fn parameters, while the location of
the ConnectionField is controlled by the coord_mapper parameter.
Any subclass has to implement the interface activate(self) that
computes the response from the input and stores it in the activity
array.
"""
cf_type = param.Parameter(default=SparseConnectionField,doc="""
Type of ConnectionField to use when creating individual CFs.""")
learning_fn = param.Callable(default=CFPLF_Hebbian_Sparse,doc="""
Function for computing changes to the weights based on one activation step.""")
response_fn = param.Callable(default=CFPRF_DotProduct_Sparse,doc="""
Function for computing the Projection response to an input pattern.""")
weights_output_fns = param.HookList(default=[CFPOF_DivisiveNormalizeL1_Sparse],doc="""
Functions applied to each CF after learning.""")
initialized = param.Boolean(default=False)
def __init__(self,initialize_cfs=True,**params):
"""
Initialize the Projection with a set of cf_type objects
(typically SparseConnectionFields), each located at the
location in the source sheet corresponding to the unit in the
target sheet. The cf_type objects are stored in the 'cfs'
array.
The nominal_bounds_template specified may be altered: the
bounds must be fitted to the Sheet's matrix, and the weights
matrix must have odd dimensions. These altered bounds are
passed to the individual connection fields.
A mask for the weights matrix is constructed. The shape is
specified by cf_shape; the size defaults to the size
of the nominal_bounds_template.
"""
super(CFProjection,self).__init__(**params)
self.weights_generator.set_dynamic_time_fn(None,sublistattr='generators')
# get the actual bounds_template by adjusting a copy of the
# nominal_bounds_template to ensure an odd slice, and to be
# cropped to sheet if necessary
self._slice_template = Slice(copy(self.nominal_bounds_template),
self.src,force_odd=True,
min_matrix_radius=self.min_matrix_radius)
self.bounds_template = self._slice_template.compute_bounds(self.src)
self.mask_template = _create_mask(self.cf_shape,self.bounds_template,
self.src,self.autosize_mask,
self.mask_threshold)
self.n_units = self._calc_n_units()
self.activity = np.array(self.dest.activity)
self.norm_total = np.array(self.dest.activity,dtype=np.float64)
self.has_norm_total = False
if initialize_cfs:
self._create_cfs()
if self.apply_output_fns_init:
self.apply_learn_output_fns()
self.input_buffer = None
def __getstate__(self):
"""
Method to support pickling of sparse weights object.
"""
state_dict = self.__dict__.copy()
state_dict['triplets'] = state_dict['weights'].getTriplets()
state_dict['weight_shape'] = (self.src.activity.shape,self.dest.activity.shape)
del state_dict['weights']
return state_dict
def __setstate__(self,state_dict):
"""
Method to support unpickling of sparse weights object.
"""
self.__dict__.update(state_dict)
self.weights = sparse.csarray_float(self.weight_shape[0],self.weight_shape[1])
rowInds, colInds, values = self.triplets
self.weights.setTriplets(rowInds,colInds,values)
del self.triplets
del self.weight_shape
def _create_cfs(self):
"""
Creates the CF objects, initializing the weights one by one
and adding them to the sparse weights object in chunks.
"""
vectorized_create_cf = simple_vectorize(self._create_cf)
self.cfs = vectorized_create_cf(*self._generate_coords())
self.flatcfs = list(self.cfs.flat)
self.weights = sparse.csarray_float(self.src.activity.shape,self.dest.activity.shape)
cf_x,cf_y = self.dest.activity.shape
src_x,src_y = self.src.activity.shape
y_array = np.zeros((src_x*src_y*cf_y),dtype=np.int32)
x_array = np.zeros((src_x*src_y*cf_y),dtype=np.int32)
val_array = np.zeros((src_x*src_y*cf_y),dtype=np.float32)
# Iterate over the CFs
for x in range(cf_x):
temp_sparse = sparse.csarray_float(self.src.activity.shape,self.dest.activity.shape)
idx = 0
for y in range(cf_y):
x1,x2,y1,y2 = self.cfs[x][y].input_sheet_slice.tolist()
if self.same_cf_shape_for_all_cfs:
mask_template = self.mask_template
else:
mask_template = _create_mask(self.cf_shape,self.bounds_template,
self.src,self.autosize_mask,
self.mask_threshold)
weights = self.cfs[x][y]._init_weights(mask_template)
cn_x,cn_y = weights.shape
y_val = x * cf_y + y
for cnx in range(cn_x):
val_array[idx:idx+cn_y] = weights[cnx,:]
x_val = (x1+cnx) * src_y + y1
x_array[idx:idx+cn_y] = range(x_val,x_val+cn_y)
y_array[idx:idx+cn_y] = y_val
idx += cn_y
nnz_idx = val_array.nonzero()
temp_sparse.setTriplets(x_array[nnz_idx],y_array[nnz_idx],val_array[nnz_idx])
self.weights += temp_sparse
x_array *= 0; y_array *= 0; val_array *= 0.0
del temp_sparse
self.weights.compress()
self.debug("Sparse projection %r loaded" % self.name)
def _create_cf(self,x,y):
"""
Create a ConnectionField at x,y in the src sheet.
"""
label = self.hash_format.format(name=self.name,
src=self.src.name,
dest=self.dest.name)
try:
CF = self.cf_type(template=self._slice_template,
projection=self,input_sheet=self.src,x=x,y=y,
weights_generator=self.weights_generator,
min_matrix_radius=self.min_matrix_radius,
label=label)
except NullCFError:
if self.allow_null_cfs:
CF = None
else:
raise
return CF
def get_sheet_mask(self):
return np.ones(self.activity.shape, dtype=self.activity.dtype)
def get_active_units_mask(self):
return np.ones(self.activity.shape, dtype=self.activity.dtype)
def activate(self,input_activity):
"""Activate using the specified response_fn and output_fn."""
if self.input_fns:
input_activity = input_activity.copy()
for iaf in self.input_fns:
iaf(input_activity)
self.input_buffer = input_activity
self.activity *=0.0
self.response_fn(self)
for of in self.output_fns:
of(self.activity)
def learn(self):
"""
For a SparseCFProjection, learn consists of calling the learning_fn.
"""
# Learning is performed if the input_buffer has already been set,
# i.e. there is an input to the Projection.
if self.input_buffer != None:
self.learning_fn(self)
def apply_learn_output_fns(self,active_units_mask=True):
"""
Apply the weights_output_fns to each unit.
"""
for of in self.weights_output_fns: of(self)
def n_bytes(self):
"""
Estimates the size on the basis of the number non-zeros in the
sparse matrix, asssuming indices and values are stored using
32-bit integers and floats respectively.
"""
return self.n_conns() * (3 * 4)
def n_conns(self):
"""
Returns number of nonzero weights.
"""
return self.weights.getnnz()
if not use_sparse:
print "WARNING: Sparse component could not be imported, replacing SparseCFProjection with regular CFProjection"
def SparseCFProjection(*args, **kwargs): # pyflakes:ignore (optimized version provided)
return CFProjection(*args,**kwargs)
sparse_components = [CFSPLF_Plugin,
CFSPOF_Plugin,
CFSPOF_Prune,
CFSPOF_SproutRetract,
CFSPRF_Plugin,
compute_sparse_joint_norm_totals,
CFPOF_DivisiveNormalizeL1_Sparse,
CFPLF_Hebbian_Sparse,
CFPLF_Hebbian_Sparse_opt,
CFPRF_DotProduct_Sparse,
CFPRF_DotProduct_Sparse_opt,
SparseConnectionField,
SparseCFProjection]
__all__ = sparse_components
| {
"content_hash": "796e3cb7d9052b5c2590c68492b00cca",
"timestamp": "",
"source": "github",
"line_count": 886,
"max_line_length": 170,
"avg_line_length": 38.85665914221219,
"alnum_prop": 0.6299706625613617,
"repo_name": "mjabri/topographica",
"id": "a1af9cea6b392c34c7982eca727e09c044abd54c",
"size": "34427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "topo/sparse/sparsecf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "14889"
},
{
"name": "C++",
"bytes": "5714"
},
{
"name": "Elixir",
"bytes": "202"
},
{
"name": "JavaScript",
"bytes": "122"
},
{
"name": "Makefile",
"bytes": "15490"
},
{
"name": "Python",
"bytes": "1878339"
},
{
"name": "Shell",
"bytes": "1577"
},
{
"name": "TeX",
"bytes": "253834"
}
],
"symlink_target": ""
} |
print("Hello World!")
print("Hello Again")
print("I like typing this")
print("This is fun")
print("Yay! Printing")
print("I'd much rather you not")
print('I "said" do not touch this.')
| {
"content_hash": "6a80e1ec246348bd120ebb22f6af19ee",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 36,
"avg_line_length": 26.571428571428573,
"alnum_prop": 0.6827956989247311,
"repo_name": "petervdb/eLearning",
"id": "acc44b3ce7e8dad79c46f659dc79b51faa3421ce",
"size": "243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/hardway/ex1.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "7715"
},
{
"name": "JavaScript",
"bytes": "663"
},
{
"name": "Perl",
"bytes": "163"
},
{
"name": "Python",
"bytes": "72813"
},
{
"name": "Shell",
"bytes": "155"
},
{
"name": "Smarty",
"bytes": "3017"
}
],
"symlink_target": ""
} |
"""
**Timeprocessing
Profils temporels (journalier, hebdomadaire, annuel)
Exemple d'utilisation
profil_hebdo(df, func='mean')
profil_journalier(df, func='max')
profil_annuel(df, func=numpy.std)
"""
import numpy as np
import calendar as cal
def _get_funky(func):
"""Renvoie une fonction numpy correspondant au nom passé en paramètre,
sinon renvoie la fonction elle-même"""
if isinstance(func, str):
try:
func = getattr(np, func)
except:
raise NameError("Nom de fonction non comprise")
return func
def profil_journalier(df, func='mean'):
"""
Calcul du profil journalier
Paramètres:
df: DataFrame de données dont l'index est une série temporelle
(cf module xair par exemple)
func: function permettant le calcul. Soit un nom de fonction numpy ('mean', 'max', ...)
soit la fonction elle-même (np.mean, np.max, ...)
Retourne:
Un DataFrame de moyennes par heure sur une journée
"""
func = _get_funky(func)
res = df.groupby(lambda x: x.hour).aggregate(func)
return res
def profil_hebdo(df, func='mean'):
"""
Calcul du profil journalier
Paramètres:
df: DataFrame de données dont l'index est une série temporelle
(cf module xair par exemple)
func: function permettant le calcul. Soit un nom de fonction numpy ('mean', 'max', ...)
soit la fonction elle-même (np.mean, np.max, ...)
Retourne:
Un DataFrame de moyennes par journée sur la semaine
"""
func = _get_funky(func)
res = df.groupby(lambda x: x.weekday).aggregate(func)
# On met des noms de jour à la place des numéros dans l'index
res.index = [cal.day_name[i] for i in range(0,7)]
return res
def profil_annuel(df, func='mean'):
"""
Calcul du profil annuel
Paramètres:
df: DataFrame de données dont l'index est une série temporelle
(cf module xair par exemple)
func: function permettant le calcul. Soit un nom de fonction numpy ('mean', 'max', ...)
soit la fonction elle-même (np.mean, np.max, ...)
Retourne:
Un DataFrame de moyennes par mois
"""
func = _get_funky(func)
res = df.groupby(lambda x: x.month).aggregate(func)
# On met des noms de mois à la place des numéros dans l'index
res.index = [cal.month_name[i] for i in range(1,13)]
return res
def strtime_help():
"""
Print a help message on the time strptime format
"""
print("""
%a Locale's abbreviated weekday name.
%A Locale's full weekday name.
%b Locale's abbreviated month name.
%B Locale's full month name.
%c Locale's appropriate date and time representativity.
%d Day of the month as a decimal number [01,31].
%H Hour (24-hour clock) as a decimal number [00,23].
%I Hour (12-hour clock) as a decimal number [01,12].
%j Day of the year as a decimal number [001,366].
%m Month as a decimal number [01,12].
%M Minute as a decimal number [00,59].
%p Locale's equivalent of either AM or PM. (1)
%S Second as a decimal number [00,61]. (2)
%U Week number of the year (Sunday as the first day of the week) as a decimal number [00,53]. All days in a new year preceding the first Sunday are considered to be in week 0.
%w Weekday as a decimal number [0(Sunday),6].
%W Week number of the year (Monday as the first day of the week) as a decimal number [00,53]. All days in a new year preceding the first Monday are considered to be in week 0.
%x Locale's appropriate date representativity.
%X Locale's appropriate time representativity.
%y Year without century as a decimal number [00,99].
%Y Year with century as a decimal number.
%Z Time zone name (no characters if no time zone exists).
%% A literal "%" character.
""")
| {
"content_hash": "490ce8b03b886ee087eee41601880a08",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 176,
"avg_line_length": 31.66949152542373,
"alnum_prop": 0.6711265721166711,
"repo_name": "airpaca/pyair",
"id": "d7f1ddc326fb6db7a3f737fc05b398274c5c0cb9",
"size": "3801",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyair/date.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63221"
}
],
"symlink_target": ""
} |
"""
Classes and interfaces for producing tree structures that represent
the internal organziation of a text. This task is known as X{parsing}
the text, and the resulting tree structures are called the text's
X{parses}. Typically, the text is a single sentence, and the tree
structure represents the syntactic structure of the sentence.
However, parsers can also be used in other domains. For example,
parsers can be used to derive the morphological structure of the
morphemes that make up a word, or to derive the discourse structure
for a set of utterances.
Sometimes, a single piece of text can be represented by more than one
tree structure. Texts represented by more than one tree structure are
called X{ambiguous} texts. Note that there are actually two ways in
which a text can be ambiguous:
- The text has multiple correct parses.
- There is not enough information to decide which of several
candidate parses is correct.
However, the parser module does I{not} distinguish these two types of
ambiguity.
The parser module defines C{ParserI}, a standard interface for parsing
texts; and two simple implementations of that interface,
C{ShiftReduceParser} and C{RecursiveDescentParser}. It also contains
three sub-modules for specialized kinds of parsing:
- C{nltk.parser.chart} defines chart parsing, which uses dynamic
programming to efficiently parse texts.
- C{nltk.parser.chunk} defines chunk parsing, which identifies
non-overlapping linguistic groups in a text.
- C{nltk.parser.probabilistic} defines probabilistic parsing, which
associates a probability with each parse.
@group Interfaces: ParserI
@group Parsers: ShiftReduceParser, SteppingShiftReduceParser,
RecursiveDescentParser, SteppingRecursiveDescentParser
@sort: ParserI, ShiftReduceParser, SteppingShiftReduceParser,
RecursiveDescentParser, SteppingRecursiveDescentParser,
demo, chart, chunk, probabilistic
@see: C{nltk.cfg}
"""
from nltk import TaskI, PropertyIndirectionMixIn
from nltk.tree import Tree, ImmutableTree
from nltk.token import Token
from nltk.cfg import Nonterminal, CFG, CFGProduction, nonterminals
from nltk.chktype import chktype
import types
##//////////////////////////////////////////////////////
## Parser Interface
##//////////////////////////////////////////////////////
class ParserI(TaskI):
"""
A processing class for deriving trees that represent possible
structures for a sequence of tokens. These tree structures are
known as X{parses}. Typically, parsers are used to derive syntax
trees for sentences. But parsers can also be used to derive other
kinds of tree structure, such as morphological trees and discourse
structures.
@inprop: C{SUBTOKENS}: The list of subtokens to be parsed.
@outprop: C{TREE}: The parse tree. I{(generated by L{parse})}
@outprop: C{TREES}: A list of possible parse trees.
I{(generated by L{parse_n})}
"""
def parse(self, token):
"""
Derive a parse tree that represents the structure of the given
token's C{SUBTOKENS}, and output it to the token's C{TREE}
property. If no parse are found, then output C{None}. If
multiple parses are found, then output the best parse.
The parsed trees derive a structure for the subtokens, but do
not modify them. In particular, the leaves of the subtree
should be equal to the list of subtokens.
@param token: The token whose subtokens should be parsed.
@type token: L{Token}
"""
raise NotImplementedError()
def get_parse(self, token):
"""
@return: A parse tree that represents the structure of the
given token's C{SUBTOKENS}. If no parse is found, then return
C{None}.
@rtype: L{Tree}
@param token: The token whose subtokens should be parsed.
@type token: L{Token}
"""
def get_parse_list(self, token):
"""
@return: A list of the parse trees that could represent the
structure of the given token's C{SUBTOKENS}. When possible,
this list should be sorted from most likely to least likely.
@rtype: C{list} of L{Tree}
@param token: The token whose subtokens should be parsed.
@type token: L{Token}
"""
def get_parse_probs(self, token):
"""
@return: A probability distribution over the parse trees that
could represent the structure of the given token's
C{SUBTOKENS}.
@rtype: L{ProbDistI}
@param token: The token whose subtokens should be parsed.
@type token: L{Token}
"""
def get_parse_list(self, token):
"""
@return: A dictioanry mapping from parse trees that could
represent the structure of the given token's C{SUBTOKENS} to
numeric scores.
@rtype: C{dict}
@param token: The token whose subtokens should be parsed.
@type token: L{Token}
"""
##//////////////////////////////////////////////////////
## Abstract Base Class for Parsers
##//////////////////////////////////////////////////////
class AbstractParser(ParserI, PropertyIndirectionMixIn):
"""
An abstract base class for parsers. C{AbstractParser} provides
a default implementation for:
- L{parse} (based on C{get_parse})
- L{get_parse_list} (based on C{get_parse})
- L{get_parse} (based on C{get_parse_list})
Note that subclasses must override either C{get_parse} or
C{get_parse_list} (or both), to avoid infinite recursion.
"""
def __init__(self, **property_names):
"""
Construct a new parser.
@type property_names: C{dict}
@param property_names: A dictionary that can be used to override
the default property names. Each entry maps from a
default property name to a new property name.
"""
# Make sure we're not directly instantiated:
if self.__class__ == AbstractParser:
raise AssertionError, "Abstract classes can't be instantiated"
PropertyIndirectionMixIn.__init__(self, **property_names)
def parse(self, token):
TREE = self.property('TREE')
token[TREE] = self.get_parse(token)
def get_parse(self, token):
trees = self.get_parse_list(token)
if len(trees) == 0: return None
else: return trees[0]
def get_parse_list(self, token):
tree = self.get_parse(token)
if tree is None: return []
else: return [tree]
##//////////////////////////////////////////////////////
## Shift/Reduce Parser
##//////////////////////////////////////////////////////
class ShiftReduceParser(AbstractParser):
"""
A simple bottom-up CFG parser that uses two operations, "shift"
and "reduce", to find a single parse for a text.
C{ShiftReduceParser} maintains a stack, which records the
structure of a portion of the text. This stack is a list of
C{Token}s and C{Tree}s that collectively cover a portion of
the text. For example, while parsing the sentence "the dog saw
the man" with a typical grammar, C{ShiftReduceParser} will produce
the following stack, which covers "the dog saw"::
[(NP: (Det: <'the'>) (N: <'dog'>)), (V: <'saw'>)]
C{ShiftReduceParser} attempts to extend the stack to cover the
entire text, and to combine the stack elements into a single tree,
producing a complete parse for the sentence.
Initially, the stack is empty. It is extended to cover the text,
from left to right, by repeatedly applying two operations:
- X{shift} moves a token from the beginning of the text to the
end of the stack.
- X{reduce} uses a CFG production to combine the rightmost stack
elements into a single C{Tree}.
Often, more than one operation can be performed on a given stack.
In this case, C{ShiftReduceParser} uses the following heuristics
to decide which operation to perform:
- Only shift if no reductions are available.
- If multiple reductions are available, then apply the reduction
whose CFG production is listed earliest in the grammar.
Note that these heuristics are not guaranteed to choose an
operation that leads to a parse of the text. Also, if multiple
parses exists, C{ShiftReduceParser} will return at most one of
them.
@see: C{nltk.cfg}
@inprop: C{SUBTOKENS}: The list of subtokens to be parsed.
@inprop: C{LEAF}: The string content of the subtokens.
@outprop: C{TREE}: The parse tree. I{(generated by L{parse})}
@outprop: C{TREES}: A list of possible parse trees.
I{(generated by L{parse_n})}
"""
def __init__(self, grammar, trace=0, **property_names):
"""
Create a new C{ShiftReduceParser}, that uses C{grammar} to
parse texts.
@type grammar: C{CFG}
@param grammar: The grammar used to parse texts.
@type trace: C{int}
@param trace: The level of tracing that should be used when
parsing a text. C{0} will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
@type property_names: C{dict}
@param property_names: A dictionary that can be used to override
the default property names. Each entry maps from a
default property name to a new property name.
"""
assert chktype(1, grammar, CFG)
assert chktype(2, trace, types.IntType)
self._grammar = grammar
self._trace = trace
AbstractParser.__init__(self, **property_names)
self._check_grammar()
def grammar(self):
"""
@return: The grammar used to parse texts.
@rtype: C{CFG}
"""
return self._grammar
def set_grammar(self, grammar):
"""
Change the grammar used to parse texts.
@param grammar: The new grammar.
@type grammar: C{CFG}
"""
assert chktype(1, grammar, CFG)
self._grammar = grammar
def get_parse(self, token):
assert chktype(1, token, [Token], (Token))
SUBTOKENS = self.property('SUBTOKENS')
LEAF = self.property('LEAF')
# initialize the stack.
stack = []
remaining_text = token[SUBTOKENS][:]
# Trace output.
if self._trace:
leaves = [tok[LEAF] for tok in token[SUBTOKENS]]
print 'Parsing %r' % ' '.join(leaves)
self._trace_stack(stack, remaining_text)
# iterate through the text, pushing the token's type onto
# the stack, then reducing the stack.
while len(remaining_text) > 0:
self._shift(stack, remaining_text)
while self._reduce(stack, remaining_text): pass
# Did we reduce everything?
if len(stack) != 1: return None
# Did we end up with the right category?
if stack[0].node != self._grammar.start().symbol():
return None
# We parsed successfully!
return stack[0]
def _shift(self, stack, remaining_text):
"""
Move a token from the beginning of C{remaining_text} to the
end of C{stack}.
@type stack: C{list} of C{Token} and C{Tree}
@param stack: A list of C{Token}s and C{Tree}s, encoding
the structure of the text that has been parsed so far.
@type remaining_text: C{list} of C{Token}
@param remaining_text: The portion of the text that is not yet
covered by C{stack}.
@rtype: C{None}
"""
stack.append(remaining_text[0])
remaining_text.remove(remaining_text[0])
if self._trace: self._trace_shift(stack, remaining_text)
def _match_rhs(self, rhs, rightmost_stack):
"""
@rtype: C{boolean}
@return: true if the right hand side of a CFG production
matches the rightmost elements of the stack. C{rhs}
matches C{rightmost_stack} if they are the same length,
and each element of C{rhs} matches the corresponding
element of C{rightmost_stack}. A nonterminal element of
C{rhs} matches any C{Tree} whose node value is equal
to the nonterminal's symbol. A terminal element of C{rhs}
matches any C{Token} whose type is equal to the terminal.
@type rhs: C{list} of (terminal and C{Nonterminal})
@param rhs: The right hand side of a CFG production.
@type rightmost_stack: C{list} of (C{Token} and C{Tree})
@param rightmost_stack: The rightmost elements of the parser's
stack.
"""
if len(rightmost_stack) != len(rhs): return 0
for i in range(len(rightmost_stack)):
if isinstance(rightmost_stack[i], Tree):
if not isinstance(rhs[i], Nonterminal): return 0
if rightmost_stack[i].node != rhs[i].symbol(): return 0
else:
if isinstance(rhs[i], Nonterminal): return 0
if rightmost_stack[i]['TEXT'] != rhs[i]: return 0
return 1
def _reduce(self, stack, remaining_text, production=None):
"""
Find a CFG production whose right hand side matches the
rightmost stack elements; and combine those stack elements
into a single C{Tree}, with the node specified by the
production's left-hand side. If more than one CFG production
matches the stack, then use the production that is listed
earliest in the grammar. The new C{Tree} replaces the
elements in the stack.
@rtype: C{CFGProduction} or C{None}
@return: If a reduction is performed, then return the CFG
production that the reduction is based on; otherwise,
return false.
@type stack: C{list} of C{Token} and C{Tree}
@param stack: A list of C{Token}s and C{Tree}s, encoding
the structure of the text that has been parsed so far.
@type remaining_text: C{list} of C{Token}
@param remaining_text: The portion of the text that is not yet
covered by C{stack}.
"""
if production is None: productions = self._grammar.productions()
else: productions = [production]
# Try each production, in order.
for production in productions:
rhslen = len(production.rhs())
# check if the RHS of a production matches the top of the stack
if self._match_rhs(production.rhs(), stack[-rhslen:]):
# combine the tree to reflect the reduction
tree = Tree(production.lhs().symbol(), stack[-rhslen:])
stack[-rhslen:] = [tree]
# We reduced something
if self._trace:
self._trace_reduce(stack, production, remaining_text)
return production
# We didn't reduce anything
return None
def trace(self, trace=2):
"""
Set the level of tracing output that should be generated when
parsing a text.
@type trace: C{int}
@param trace: The trace level. A trace level of C{0} will
generate no tracing output; and higher trace levels will
produce more verbose tracing output.
@rtype: C{None}
"""
assert chktype(1, trace, types.IntType)
# 1: just show shifts.
# 2: show shifts & reduces
# 3: display which tokens & productions are shifed/reduced
self._trace = trace
def _trace_stack(self, stack, remaining_text, marker=' '):
"""
Print trace output displaying the given stack and text.
@rtype: C{None}
@param marker: A character that is printed to the left of the
stack. This is used with trace level 2 to print 'S'
before shifted stacks and 'R' before reduced stacks.
"""
LEAF = self.property('LEAF')
str = ' '+marker+' [ '
for elt in stack:
if isinstance(elt, Tree):
str += `Nonterminal(elt.node)` + ' '
else:
str += `elt[LEAF]` + ' '
str += '* ' + ' '.join([`s[LEAF]` for s in remaining_text]) + ']'
print str
def _trace_shift(self, stack, remaining_text):
"""
Print trace output displaying that a token has been shifted.
@rtype: C{None}
"""
if self._trace > 2: print 'Shift %r:' % stack[-1]
if self._trace == 2: self._trace_stack(stack, remaining_text, 'S')
elif self._trace > 0: self._trace_stack(stack, remaining_text)
def _trace_reduce(self, stack, production, remaining_text):
"""
Print trace output displaying that C{production} was used to
reduce C{stack}.
@rtype: C{None}
"""
if self._trace > 2:
rhs = ' '.join([`s` for s in production.rhs()])
print 'Reduce %r <- %s' % (production.lhs(), rhs)
if self._trace == 2: self._trace_stack(stack, remaining_text, 'R')
elif self._trace > 1: self._trace_stack(stack, remaining_text)
def _check_grammar(self):
"""
Check to make sure that all of the CFG productions are
potentially useful. If any productions can never be used,
then print a warning.
@rtype: C{None}
"""
productions = self._grammar.productions()
# Any production whose RHS is an extension of another production's RHS
# will never be used.
for i in range(len(productions)):
for j in range(i+1, len(productions)):
rhs1 = productions[i].rhs()
rhs2 = productions[j].rhs()
if rhs1[:len(rhs2)] == rhs2:
print 'Warning: %r will never be used' % productions[i]
##//////////////////////////////////////////////////////
## Recursive Descent Parser
##//////////////////////////////////////////////////////
class RecursiveDescentParser(AbstractParser):
"""
A simple top-down CFG parser that parses texts by recursively
expanding the fringe of a C{Tree}, and matching it against a
text.
C{RecursiveDescentParser} uses a list of tree locations called a
X{frontier} to remember which subtrees have not yet been expanded
and which leaves have not yet been matched against the text. Each
tree location consists of a list of child indices specifying the
path from the root of the tree to a subtree or a leaf; see the
reference documentation for C{Tree} for more information
about tree locations.
When the parser begins parsing a text, it constructs a tree
containing only the start symbol, and a frontier containing the
location of the tree's root node. It then extends the tree to
cover the text, using the following recursive procedure:
- If the frontier is empty, and the text is covered by the tree,
then return the tree as a possible parse.
- If the frontier is empty, and the text is not covered by the
tree, then return no parses.
- If the first element of the frontier is a subtree, then
use CFG productions to X{expand} it. For each applicable
production, add the expanded subtree's children to the
frontier, and recursively find all parses that can be
generated by the new tree and frontier.
- If the first element of the frontier is a token, then X{match}
it against the next token from the text. Remove the token
from the frontier, and recursively find all parses that can be
generated by the new tree and frontier.
@see: C{nltk.cfg}
@inprop: C{SUBTOKENS}: The list of subtokens to be parsed.
@inprop: C{LEAF}: The string content of the subtokens.
@outprop: C{TREE}: The parse tree. I{(generated by L{parse})}
@outprop: C{TREES}: A list of possible parse trees.
I{(generated by L{parse_n})}
"""
def __init__(self, grammar, trace=0, **property_names):
"""
Create a new C{RecursiveDescentParser}, that uses C{grammar}
to parse texts.
@type grammar: C{CFG}
@param grammar: The grammar used to parse texts.
@type trace: C{int}
@param trace: The level of tracing that should be used when
parsing a text. C{0} will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
@type property_names: C{dict}
@param property_names: A dictionary that can be used to override
the default property names. Each entry maps from a
default property name to a new property name.
"""
assert chktype(1, grammar, CFG)
assert chktype(2, trace, types.IntType)
self._grammar = grammar
self._trace = trace
AbstractParser.__init__(self, **property_names)
def grammar(self):
"""
@return: The grammar used to parse texts.
@rtype: C{CFG}
"""
return self._grammar
def set_grammar(self, grammar):
"""
Change the grammar used to parse texts.
@param grammar: The new grammar.
@type grammar: C{CFG}
"""
assert chktype(1, grammar, CFG)
self._grammar = grammar
def get_parse_list(self, token):
# Inherit docs from ParserI
assert chktype(1, token, Token)
SUBTOKENS = self.property('SUBTOKENS')
# Start a recursive descent parse, with an initial tree
# containing just the start symbol.
start = self._grammar.start().symbol()
initial_tree = Tree(start, [])
frontier = [()]
text = token[SUBTOKENS]
if self._trace:
self._trace_start(initial_tree, frontier, text)
parses = self._parse(text, initial_tree, frontier)
# Return the parses.
return parses
def _parse(self, remaining_text, tree, frontier):
"""
Recursively expand and match each elements of C{tree}
specified by C{frontier}, to cover C{remaining_text}. Return
a list of all parses found.
@return: A list of all parses that can be generated by
matching and expanding the elements of C{tree}
specified by C{frontier}.
@rtype: C{list} of C{Tree}
@type tree: C{Tree}
@param tree: A partial structure for the text that is
currently being parsed. The elements of C{tree}
that are specified by C{frontier} have not yet been
expanded or matched.
@type remaining_text: C{list} of C{Token}s
@param remaining_text: The portion of the text that is not yet
covered by C{tree}.
@type frontier: C{list} of C{tuple} of C{int}
@param frontier: A list of the locations within C{tree} of
all subtrees that have not yet been expanded, and all
leaves that have not yet been matched. This list sorted
in left-to-right order of location within the tree.
"""
# If the tree covers the text, and there's nothing left to
# expand, then we've found a complete parse; return it.
if len(remaining_text) == 0 and len(frontier) == 0:
if self._trace:
self._trace_succeed(tree, frontier)
return [tree]
# If there's still text, but nothing left to expand, we failed.
elif len(frontier) == 0:
if self._trace:
self._trace_backtrack(tree, frontier)
return []
# If the next element on the frontier is a tree, expand it.
elif isinstance(tree[frontier[0]], Tree):
return self._expand(remaining_text, tree, frontier)
# If the next element on the frontier is a token, match it.
else:
return self._match(remaining_text, tree, frontier)
def _match(self, rtext, tree, frontier):
"""
@rtype: C{list} of C{Tree}
@return: a list of all parses that can be generated by
matching the first element of C{frontier} against the
first token in C{rtext}. In particular, if the first
element of C{frontier} has the same type as the first
token in C{rtext}, then substitute the token into
C{tree}; and return all parses that can be generated by
matching and expanding the remaining elements of
C{frontier}. If the first element of C{frontier} does not
have the same type as the first token in C{rtext}, then
return empty list.
@type tree: C{Tree}
@param tree: A partial structure for the text that is
currently being parsed. The elements of C{tree}
that are specified by C{frontier} have not yet been
expanded or matched.
@type rtext: C{list} of C{Token}s
@param rtext: The portion of the text that is not yet
covered by C{tree}.
@type frontier: C{list} of C{tuple} of C{int}
@param frontier: A list of the locations within C{tree} of
all subtrees that have not yet been expanded, and all
leaves that have not yet been matched.
"""
LEAF = self.property('LEAF')
tree_leaf = tree[frontier[0]][LEAF]
if (len(rtext) > 0 and tree_leaf == rtext[0]['TEXT']):
# If it's a terminal that matches text[0], then substitute
# in the token, and continue parsing.
newtree = tree.copy(deep=True)
newtree[frontier[0]] = rtext[0]
if self._trace:
self._trace_match(newtree, frontier[1:], rtext[0])
return self._parse(rtext[1:], newtree, frontier[1:])
else:
# If it's a non-matching terminal, fail.
if self._trace:
self._trace_backtrack(tree, frontier, rtext[:1])
return []
def _expand(self, remaining_text, tree, frontier, production=None):
"""
@rtype: C{list} of C{Tree}
@return: A list of all parses that can be generated by
expanding the first element of C{frontier} with
C{production}. In particular, if the first element of
C{frontier} is a subtree whose node type is equal to
C{production}'s left hand side, then add a child to that
subtree for each element of C{production}'s right hand
side; and return all parses that can be generated by
matching and expanding the remaining elements of
C{frontier}. If the first element of C{frontier} is not a
subtree whose node type is equal to C{production}'s left
hand side, then return an empty list. If C{production} is
not specified, then return a list of all parses that can
be generated by expanding the first element of C{frontier}
with I{any} CFG production.
@type tree: C{Tree}
@param tree: A partial structure for the text that is
currently being parsed. The elements of C{tree}
that are specified by C{frontier} have not yet been
expanded or matched.
@type remaining_text: C{list} of C{Token}s
@param remaining_text: The portion of the text that is not yet
covered by C{tree}.
@type frontier: C{list} of C{tuple} of C{int}
@param frontier: A list of the locations within C{tree} of
all subtrees that have not yet been expanded, and all
leaves that have not yet been matched.
"""
LEAF = self.property('LEAF')
if production is None: productions = self._grammar.productions()
else: productions = [production]
parses = []
for production in productions:
lhs = production.lhs().symbol()
if lhs == tree[frontier[0]].node:
subtree = self._production_to_tree(production)
if frontier[0] == ():
newtree = subtree
else:
newtree = tree.copy(deep=True)
newtree[frontier[0]] = subtree
new_frontier = [frontier[0]+(i,) for i in
range(len(production.rhs()))]
if self._trace:
self._trace_expand(newtree, new_frontier, production)
parses += self._parse(remaining_text, newtree,
new_frontier + frontier[1:])
return parses
def _production_to_tree(self, production):
"""
@rtype: C{Tree}
@return: The C{Tree} that is licensed by C{production}.
In particular, given the production::
C{[M{lhs} -> M{elt[1]} ... M{elt[n]}]}
Return a tree token that has a node C{M{lhs}.symbol}, and
C{M{n}} children. For each nonterminal element
C{M{elt[i]}} in the production, the tree token has a
childless subtree with node value C{M{elt[i]}.symbol}; and
for each terminal element C{M{elt[j]}}, the tree token has
a leaf token with type C{M{elt[j]}}.
@param production: The CFG production that licenses the tree
token that should be returned.
@type production: C{CFGProduction}
"""
LEAF = self.property('LEAF')
children = []
for elt in production.rhs():
if isinstance(elt, Nonterminal):
children.append(Tree(elt.symbol(), []))
else:
# This will be matched.
children.append(Token({LEAF: elt}))
return Tree(production.lhs().symbol(), children)
def trace(self, trace=2):
"""
Set the level of tracing output that should be generated when
parsing a text.
@type trace: C{int}
@param trace: The trace level. A trace level of C{0} will
generate no tracing output; and higher trace levels will
produce more verbose tracing output.
@rtype: C{None}
"""
assert chktype(1, trace, types.IntType)
self._trace = trace
def _trace_fringe(self, tree, treeloc=None):
"""
Print trace output displaying the fringe of C{tree}. The
fringe of C{tree} consists of all of its leaves and all of
its childless subtrees.
@rtype: C{None}
"""
LEAF = self.property('LEAF')
if treeloc == (): print "*",
if isinstance(tree, Tree):
if len(tree) == 0: print `Nonterminal(tree.node)`,
for i in range(len(tree)):
if treeloc is not None and i == treeloc[0]:
self._trace_fringe(tree[i], treeloc[1:])
else:
self._trace_fringe(tree[i])
else:
print `tree`,
def _trace_tree(self, tree, frontier, operation):
"""
Print trace output displaying the parser's current state.
@param operation: A character identifying the operation that
generated the current state.
@rtype: C{None}
"""
if self._trace == 2: print ' %c [' % operation,
else: print ' [',
if len(frontier) > 0: self._trace_fringe(tree, frontier[0])
else: self._trace_fringe(tree)
print ']'
def _trace_start(self, tree, frontier, text):
print 'Parsing %r' % ' '.join([tok['TEXT'] for tok in text])
if self._trace > 2: print 'Start:'
if self._trace > 1: self._trace_tree(tree, frontier, ' ')
def _trace_expand(self, tree, frontier, production):
if self._trace > 2: print 'Expand: %s' % production
if self._trace > 1: self._trace_tree(tree, frontier, 'E')
def _trace_match(self, tree, frontier, tok):
if self._trace > 2: print 'Match: %r' % tok
if self._trace > 1: self._trace_tree(tree, frontier, 'M')
def _trace_succeed(self, tree, frontier):
if self._trace > 2: print 'GOOD PARSE:'
if self._trace == 1: print 'Found a parse:\n%s' % tree
if self._trace > 1: self._trace_tree(tree, frontier, '+')
def _trace_backtrack(self, tree, frontier, toks=None):
if self._trace > 2:
if toks: print 'Backtrack: %r match failed' % toks[0]
else: print 'Backtrack'
##//////////////////////////////////////////////////////
## Stepping Shift/Reduce Parser
##//////////////////////////////////////////////////////
class SteppingShiftReduceParser(ShiftReduceParser):
"""
A C{ShiftReduceParser} that allows you to setp through the parsing
process, performing a single operation at a time. It also allows
you to change the parser's grammar midway through parsing a text.
The C{initialize} method is used to start parsing a text.
C{shift} performs a single shift operation, and C{reduce} performs
a single reduce operation. C{step} will perform a single reduce
operation if possible; otherwise, it will perform a single shift
operation. C{parses} returns the set of parses that have been
found by the parser.
@ivar _history: A list of C{(stack, remaining_text)} pairs,
containing all of the previous states of the parser. This
history is used to implement the C{undo} operation.
@see: C{nltk.cfg}
@inprop: C{SUBTOKENS}: The list of subtokens to be parsed.
@inprop: C{LEAF}: The string content of the subtokens.
@outprop: C{TREE}: The parse tree. I{(generated by L{parse})}
@outprop: C{TREES}: A list of possible parse trees.
I{(generated by L{parse_n})}
"""
def __init__(self, grammar, trace=0, **property_names):
assert chktype(1, grammar, CFG)
assert chktype(2, trace, types.IntType)
self._grammar = grammar
self._trace = trace
self._stack = None
self._remaining_text = None
self._history = []
AbstractParser.__init__(self, **property_names)
def get_parse_list(self, token):
assert chktype(1, token, Token)
self.initialize(token)
while self.step(): pass
return self.parses()
def stack(self):
"""
@return: The parser's stack.
@rtype: C{list} of C{Token} and C{Tree}
"""
return self._stack
def remaining_text(self):
"""
@return: The portion of the text that is not yet covered by the
stack.
@rtype: C{list} of C{Token}
"""
return self._remaining_text
def initialize(self, token):
"""
Start parsing a given text. This sets the parser's stack to
C{[]} and sets its remaining text to C{token['SUBTOKENS']}.
"""
assert chktype(1, token, Token)
SUBTOKENS = self.property('SUBTOKENS')
self._stack = []
self._remaining_text = token[SUBTOKENS][:]
self._history = []
def step(self):
"""
Perform a single parsing operation. If a reduction is
possible, then perform that reduction, and return the
production that it is based on. Otherwise, if a shift is
possible, then perform it, and return 1. Otherwise,
return 0.
@return: 0 if no operation was performed; 1 if a shift was
performed; and the CFG production used to reduce if a
reduction was performed.
@rtype: C{CFGProduction} or C{boolean}
"""
return self.reduce() or self.shift()
def shift(self):
"""
Move a token from the beginning of the remaining text to the
end of the stack. If there are no more tokens in the
remaining text, then do nothing.
@return: True if the shift operation was successful.
@rtype: C{boolean}
"""
if len(self._remaining_text) == 0: return 0
self._history.append( (self._stack[:], self._remaining_text[:]) )
self._shift(self._stack, self._remaining_text)
return 1
def reduce(self, production=None):
"""
Use C{production} to combine the rightmost stack elements into
a single C{Tree}. If C{production} does not match the
rightmost stack elements, then do nothing.
@return: The production used to reduce the stack, if a
reduction was performed. If no reduction was performed,
return C{None}.
@rtype: C{CFGProduction} or C{None}
"""
assert chktype(1, production, CFGProduction, types.NoneType)
self._history.append( (self._stack[:], self._remaining_text[:]) )
return_val = self._reduce(self._stack, self._remaining_text,
production)
if not return_val: self._history.pop()
return return_val
def undo(self):
"""
Return the parser to its state before the most recent
shift or reduce operation. Calling C{undo} repeatedly return
the parser to successively earlier states. If no shift or
reduce operations have been performed, C{undo} will make no
changes.
@return: true if an operation was successfully undone.
@rtype: C{boolean}
"""
if len(self._history) == 0: return 0
(self._stack, self._remaining_text) = self._history.pop()
return 1
def reducible_productions(self):
"""
@return: A list of the productions for which reductions are
available for the current parser state.
@rtype: C{list} of C{CFGProduction}
"""
productions = []
for production in self._grammar.productions():
rhslen = len(production.rhs())
if self._match_rhs(production.rhs(), self._stack[-rhslen:]):
productions.append(production)
return productions
def parses(self):
"""
@return: A list of the parses that have been found by this
parser so far.
@rtype: C{list} of C{Tree}
"""
if len(self._remaining_text) != 0: return []
if len(self._stack) != 1: return []
if self._stack[0].node != self._grammar.start().symbol():
return []
return self._stack
##//////////////////////////////////////////////////////
## Stepping Recursive Descent Parser
##//////////////////////////////////////////////////////
class SteppingRecursiveDescentParser(RecursiveDescentParser):
"""
A C{RecursiveDescentParser} that allows you to step through the
parsing process, performing a single operation at a time.
The C{initialize} method is used to start parsing a text.
C{expand} expands the first element on the frontier using a single
CFG production, and C{match} matches the first element on the
frontier against the next text token. C{backtrack} undoes the most
recent expand or match operation. C{step} performs a single
expand, match, or backtrack operation. C{parses} returns the set
of parses that have been found by the parser.
@ivar _history: A list of C{(rtext, tree, frontier)} tripples,
containing the previous states of the parser. This history is
used to implement the C{backtrack} operation.
@ivar _tried_e: A record of all productions that have been tried
for a given tree. This record is used by C{expand} to perform
the next untried production.
@ivar _tried_m: A record of what tokens have been matched for a
given tree. This record is used by C{step} to decide whether
or not to match a token.
@see: C{nltk.cfg}
@inprop: C{SUBTOKENS}: The list of subtokens to be parsed.
@inprop: C{LEAF}: The string content of the subtokens.
@outprop: C{TREE}: The parse tree. I{(generated by L{parse})}
@outprop: C{TREES}: A list of possible parse trees.
I{(generated by L{parse_n})}
"""
def __init__(self, grammar, trace=0, **property_names):
assert chktype(1, grammar, CFG)
assert chktype(2, trace, types.IntType)
self._grammar = grammar
self._trace = trace
self._rtext = None
self._tree = None
self._frontier = [()]
self._tried_e = {}
self._tried_m = {}
self._history = []
self._parses = []
AbstractParser.__init__(self, **property_names)
# [XX] TEMPORARY HACK WARNING! This should be replaced with
# something nicer when we get the chance.
def _freeze(self, tree):
c = tree.copy()
for pos in c.treepositions('leaves'):
c[pos] = c[pos].freeze()
return ImmutableTree.convert(c)
def get_parse_list(self, token):
assert chktype(1, token, Token)
TREES = self.property('TREES')
self.initialize(token)
while self.step() is not None: pass
return self.parses()
def initialize(self, token):
"""
Start parsing a given text. This sets the parser's tree to
the start symbol, its frontier to the root node, and its
remaining text to C{token['SUBTOKENS']}.
"""
assert chktype(1, token, Token)
SUBTOKENS = self.property('SUBTOKENS')
self._rtext = token[SUBTOKENS]
start = self._grammar.start().symbol()
self._tree = Tree(start, [])
self._frontier = [()]
self._tried_e = {}
self._tried_m = {}
self._history = []
self._parses = []
if self._trace:
self._trace_start(self._tree, self._frontier, self._rtext)
def remaining_text(self):
"""
@return: The portion of the text that is not yet covered by the
tree.
@rtype: C{list} of C{Token}
"""
return self._rtext
def frontier(self):
"""
@return: A list of the tree locations of all subtrees that
have not yet been expanded, and all leaves that have not
yet been matched.
@rtype: C{list} of C{tuple} of C{int}
"""
return self._frontier
def tree(self):
"""
@return: A partial structure for the text that is
currently being parsed. The elements specified by the
frontier have not yet been expanded or matched.
@rtype: C{Tree}
"""
return self._tree
def step(self):
"""
Perform a single parsing operation. If an untried match is
possible, then perform the match, and return the matched
token. If an untried expansion is possible, then perform the
expansion, and return the production that it is based on. If
backtracking is possible, then backtrack, and return 1.
Otherwise, return 0.
@return: 0 if no operation was performed; a token if a match
was performed; a production if an expansion was performed;
and 1 if a backtrack operation was performed.
@rtype: C{CFGProduction} or C{Token} or C{boolean}
"""
# Try matching (if we haven't already)
if self.untried_match():
token = self.match()
if token is not None: return token
# Try expanding.
production = self.expand()
if production is not None: return production
# Try backtracking
if self.backtrack():
self._trace_backtrack(self._tree, self._frontier)
return 1
# Nothing left to do.
return None
def expand(self, production=None):
"""
Expand the first element of the frontier. In particular, if
the first element of the frontier is a subtree whose node type
is equal to C{production}'s left hand side, then add a child
to that subtree for each element of C{production}'s right hand
side. If C{production} is not specified, then use the first
untried expandable production. If all expandable productions
have been tried, do nothing.
@return: The production used to expand the frontier, if an
expansion was performed. If no expansion was performed,
return C{None}.
@rtype: C{CFGProduction} or C{None}
"""
LEAF = self.property('LEAF')
assert chktype(1, production, CFGProduction, types.NoneType)
# Make sure we *can* expand.
if len(self._frontier) == 0:
return None
if not isinstance(self._tree[self._frontier[0]],
Tree):
return None
# If they didn't specify a production, check all untried ones.
if production is None:
productions = self.untried_expandable_productions()
else: productions = [production]
parses = []
for prod in productions:
# Record that we've tried this production now.
self._tried_e.setdefault(self._freeze(self._tree), []).append(prod)
# Try expanding.
if self._expand(self._rtext, self._tree, self._frontier, prod):
return prod
# We didn't expand anything.
return None
def match(self):
"""
Match the first element of the frontier. In particular, if
the first element of the frontier has the same type as the
next text token, then substitute the text token into the tree.
@return: The token matched, if a match operation was
performed. If no match was performed, return C{None}
@rtype: C{Token} or C{None}
"""
LEAF = self.property('LEAF')
# Record that we've tried matching this token.
tok = self._rtext[0]
self._tried_m.setdefault(self._freeze(self._tree), []).append(tok)
# Make sure we *can* match.
if len(self._frontier) == 0:
return None
if isinstance(self._tree[self._frontier[0]], Tree):
return None
if self._match(self._rtext, self._tree, self._frontier):
# Return the token we just matched.
return self._history[-1][0][0]
else:
return None
def backtrack(self):
"""
Return the parser to its state before the most recent
match or expand operation. Calling C{undo} repeatedly return
the parser to successively earlier states. If no match or
expand operations have been performed, C{undo} will make no
changes.
@return: true if an operation was successfully undone.
@rtype: C{boolean}
"""
if len(self._history) == 0: return 0
(self._rtext, self._tree, self._frontier) = self._history.pop()
return 1
def expandable_productions(self):
"""
@return: A list of all the productions for which expansions
are available for the current parser state.
@rtype: C{list} of C{CFGProduction}
"""
# Make sure we *can* expand.
if len(self._frontier) == 0: return []
frontier_child = self._tree[self._frontier[0]]
if (len(self._frontier) == 0 or
not isinstance(frontier_child, Tree)):
return []
return [p for p in self._grammar.productions()
if p.lhs().symbol() == frontier_child.node]
def untried_expandable_productions(self):
"""
@return: A list of all the untried productions for which
expansions are available for the current parser state.
@rtype: C{list} of C{CFGProduction}
"""
LEAF = self.property('LEAF')
tried_expansions = self._tried_e.get(self._freeze(self._tree), [])
return [p for p in self.expandable_productions()
if p not in tried_expansions]
def untried_match(self):
"""
@return: Whether the first element of the frontier is a token
that has not yet been matched.
@rtype: C{boolean}
"""
LEAF = self.property('LEAF')
if len(self._rtext) == 0: return 0
tried_matches = self._tried_m.get(self._freeze(self._tree), [])
return (self._rtext[0] not in tried_matches)
def currently_complete(self):
"""
@return: Whether the parser's current state represents a
complete parse.
@rtype: C{boolean}
"""
return (len(self._frontier) == 0 and len(self._rtext) == 0)
def _parse(self, remaining_text, tree, frontier):
"""
A stub version of C{_parse} that sets the parsers current
state to the given arguments. In C{RecursiveDescentParser},
the C{_parse} method is used to recursively continue parsing a
text. C{SteppingRecursiveDescentParser} overrides it to
capture these recursive calls. It records the parser's old
state in the history (to allow for backtracking), and updates
the parser's new state using the given arguments. Finally, it
returns C{[1]}, which is used by C{match} and C{expand} to
detect whether their operations were successful.
@return: C{[1]}
@rtype: C{list} of C{int}
"""
self._history.append( (self._rtext, self._tree, self._frontier) )
self._rtext = remaining_text
self._tree = tree
self._frontier = frontier
# Is it a good parse? If so, record it.
if (len(frontier) == 0 and len(remaining_text) == 0):
self._parses.append(tree)
self._trace_succeed(self._tree, self._frontier)
return [1]
def parses(self):
"""
@return: A list of the parses that have been found by this
parser so far.
@rtype: C{list} of C{Tree}
"""
return self._parses
##//////////////////////////////////////////////////////
## Demonstration Code
##//////////////////////////////////////////////////////
def demo():
"""
A demonstration of the parsers defined by nltk.parser. The user
is prompted to select which parser to run, and that parser is run
on an example sentence with a simple grammar.
"""
# Define some nonterminals
S, VP, NP, PP = nonterminals('S, VP, NP, PP')
V, N, P, Name, Det = nonterminals('V, N, P, Name, Det')
# Define a grammar.
productions = (
# Syntactic Productions
CFGProduction(S, [NP, 'saw', NP]),
CFGProduction(S, [NP, VP]),
CFGProduction(NP, [Det, N]),
CFGProduction(VP, [V, NP, PP]),
CFGProduction(NP, [Det, N, PP]),
CFGProduction(PP, [P, NP]),
# Lexical Productions
CFGProduction(NP, ['I']), CFGProduction(Det, ['the']),
CFGProduction(Det, ['a']), CFGProduction(N, ['man']),
CFGProduction(V, ['saw']), CFGProduction(P, ['in']),
CFGProduction(P, ['with']), CFGProduction(N, ['park']),
CFGProduction(N, ['dog']), CFGProduction(N, ['telescope'])
)
grammar = CFG(S, productions)
# Tokenize a sample sentence.
sent = Token(TEXT='I saw a man in the park')
from nltk.tokenizer import WhitespaceTokenizer
WhitespaceTokenizer(SUBTOKENS='WORDS').tokenize(sent)
# Define a list of parsers.
parsers = [ShiftReduceParser(grammar, LEAF='TEXT', SUBTOKENS='WORDS'),
RecursiveDescentParser(grammar, LEAF='TEXT', SUBTOKENS='WORDS'),
SteppingShiftReduceParser(grammar, LEAF='TEXT', SUBTOKENS='WORDS'),
SteppingRecursiveDescentParser(grammar, LEAF='TEXT', SUBTOKENS='WORDS')]
# Ask the user to choose a parser.
import sys
print 'Choose a parser:'
for i in range(len(parsers)):
print ' %d. %s' % (i+1, parsers[i].__class__.__name__)
print '=> ',
try: parser = parsers[int(sys.stdin.readline())-1]
except: print 'Bad input'; return
# Run the parser.
parser.trace()
for p in parser.get_parse_list(sent):
print p
if __name__ == '__main__': demo()
| {
"content_hash": "f23b3c001091ef08ebbb6863a66afb47",
"timestamp": "",
"source": "github",
"line_count": 1330,
"max_line_length": 87,
"avg_line_length": 39.36766917293233,
"alnum_prop": 0.5941480929735098,
"repo_name": "ronaldahmed/SLAM-for-ugv",
"id": "4bfbdb2dfa3a4fca2589df3c1787c191f09044eb",
"size": "52728",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neural-navigation-with-lstm/MARCO/nltk/parser/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "50665"
},
{
"name": "C++",
"bytes": "9297"
},
{
"name": "M",
"bytes": "494"
},
{
"name": "Makefile",
"bytes": "15281"
},
{
"name": "Matlab",
"bytes": "113735"
},
{
"name": "Python",
"bytes": "2870079"
}
],
"symlink_target": ""
} |
from twisted.internet import protocol
from twisted.protocols.basic import LineReceiver
from twisted.python import log
from signing.processor import NoSuchCommand, WrongNumberOfArguments
class InvalidSignature(Exception):
pass
class SignedProtocol(LineReceiver):
"""
Simple line receiver that receives a public identifier upon connection.
All subsequent lines of data are verified to be signed (based on the
implementation of self.signer) before being split and passed to the
processor. Any response is signed with signer, and returned to the
client.
"""
delimiter = '\n'
def __init__(self, processor, signer, validator):
self.processor = processor
self.signer = signer
self.validator = validator
def lineReceived(self, line):
log.msg('received line %s' % line)
if self.clientkey is None:
self.clientkey = line
self.processor.preargs = [self.clientkey]
elif self.validator.validates(self.clientkey, line):
self.validDataReceived(self.validator.removeSignature(self.clientkey, line))
else:
log.msg('client %s failed validation of %s' % (self.clientkey, line))
self.sendSignedLine('invalid')
def validDataReceived(self, line):
"""
Break the line up into command and arguments, and attempt to execute it.
"""
log.msg('received valid data %s' % line)
d = self.processor.process(*(lambda s: (s[0], s[1:]))(line.split()))
d.addErrback(self.passClientErrors)
d.addCallback(self.sendSignedLine)
d.addErrback(log.err)
return d
def passClientErrors(self, failure):
log.msg(failure.value)
failure.trap(NoSuchCommand, WrongNumberOfArguments)
return repr(failure.value)
def sendSignedLine(self, data):
if data != '':
self.sendLine(self.signer.sign(data))
def connectionMade(self):
log.msg('received connection')
self.clientkey = None
class SignedProtocolFactory(protocol.Factory):
protocol = SignedProtocol
def __init__(self, processor, signer, validator):
self.params = (processor, signer, validator)
def buildProtocol(self, *args):
p = self.protocol(*self.params)
p.factory = self
return p
| {
"content_hash": "a1fdc185b775feb9e7146aa8842365f6",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 88,
"avg_line_length": 33.47142857142857,
"alnum_prop": 0.6598378147673922,
"repo_name": "nkrowlan/signing-server",
"id": "460c93eac207c40e3076848daac8464c22132ce3",
"size": "2403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "signing/signedprotocol.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19087"
}
],
"symlink_target": ""
} |
from datetime import datetime
from .virtualtimescheduler import VirtualTimeScheduler
class HistoricalScheduler(VirtualTimeScheduler):
"""Provides a virtual time scheduler that uses datetime for absolute time
and timedelta for relative time."""
def __init__(self, initial_clock=None, comparer=None):
"""Creates a new historical scheduler with the specified initial clock
value.
Keyword arguments:
initial_clock -- {Number} Initial value for the clock.
comparer -- {Function} Comparer to determine causality of events based
on absolute time."""
def compare_datetimes(a, b):
return (a > b) - (a < b)
clock = initial_clock or datetime.fromtimestamp(0)
comparer = comparer or compare_datetimes
super(HistoricalScheduler, self).__init__(clock, comparer)
def now(self):
"""Represents a notion of time for this scheduler. Tasks being scheduled
on a scheduler will adhere to the time denoted by this property."""
return self.clock
@staticmethod
def add(absolute, relative):
"""Adds a relative time value to an absolute time value.
Keyword arguments:
absolute -- {datetime} Absolute virtual time value.
relative -- {timedelta} Relative virtual time value to add.
Returns resulting absolute virtual time sum value."""
return absolute + relative
def to_datetime_offset(self, absolute):
"""Converts the absolute time value to a datetime value."""
# datetime -> datetime
return absolute
def to_relative(self, timespan):
"""Converts the timespan value to a relative virtual time value.
Keyword arguments:
timespan -- {timedelta} Time_span value to convert.
Returns corresponding relative virtual time value."""
# timedelta -> timedelta
return timespan
| {
"content_hash": "86878cb82581f7fa28dfcf68b14cd0d8",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 80,
"avg_line_length": 32.67796610169491,
"alnum_prop": 0.6633817427385892,
"repo_name": "dbrattli/RxPY",
"id": "861cb9b1fdacfbdfa3d1405e5beab63f43755ecb",
"size": "1928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rx/concurrency/historicalscheduler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1334787"
}
],
"symlink_target": ""
} |
"""
This package contains functions for reading and writing HDF5 tables that are
not meant to be used directly, but instead are available as readers/writers in
`astropy.table`. See :ref:`table_io` for more details.
"""
import os
import warnings
import numpy as np
# NOTE: Do not import anything from astropy.table here.
# https://github.com/astropy/astropy/issues/6604
from astropy.utils.exceptions import AstropyUserWarning, AstropyDeprecationWarning
HDF5_SIGNATURE = b'\x89HDF\r\n\x1a\n'
META_KEY = '__table_column_meta__'
__all__ = ['read_table_hdf5', 'write_table_hdf5']
def meta_path(path):
return path + '.' + META_KEY
def _find_all_structured_arrays(handle):
"""
Find all structured arrays in an HDF5 file
"""
import h5py
structured_arrays = []
def append_structured_arrays(name, obj):
if isinstance(obj, h5py.Dataset) and obj.dtype.kind == 'V':
structured_arrays.append(name)
handle.visititems(append_structured_arrays)
return structured_arrays
def is_hdf5(origin, filepath, fileobj, *args, **kwargs):
if fileobj is not None:
loc = fileobj.tell()
try:
signature = fileobj.read(8)
finally:
fileobj.seek(loc)
return signature == HDF5_SIGNATURE
elif filepath is not None:
return filepath.endswith(('.hdf5', '.h5'))
try:
import h5py
except ImportError:
return False
else:
return isinstance(args[0], (h5py.File, h5py.Group, h5py.Dataset))
def read_table_hdf5(input, path=None, character_as_bytes=True):
"""
Read a Table object from an HDF5 file
This requires `h5py <http://www.h5py.org/>`_ to be installed. If more than one
table is present in the HDF5 file or group, the first table is read in and
a warning is displayed.
Parameters
----------
input : str or :class:`h5py:File` or :class:`h5py:Group` or
:class:`h5py:Dataset` If a string, the filename to read the table from.
If an h5py object, either the file or the group object to read the
table from.
path : str
The path from which to read the table inside the HDF5 file.
This should be relative to the input file or group.
character_as_bytes: bool
If `True` then Table columns are left as bytes.
If `False` then Table columns are converted to unicode.
"""
try:
import h5py
except ImportError:
raise Exception("h5py is required to read and write HDF5 files")
# This function is iterative, and only gets to writing the file when
# the input is an hdf5 Group. Moreover, the input variable is changed in
# place.
# Here, we save its value to be used at the end when the conditions are
# right.
input_save = input
if isinstance(input, (h5py.File, h5py.Group)):
# If a path was specified, follow the path
if path is not None:
try:
input = input[path]
except (KeyError, ValueError):
raise OSError(f"Path {path} does not exist")
# `input` is now either a group or a dataset. If it is a group, we
# will search for all structured arrays inside the group, and if there
# is one we can proceed otherwise an error is raised. If it is a
# dataset, we just proceed with the reading.
if isinstance(input, h5py.Group):
# Find all structured arrays in group
arrays = _find_all_structured_arrays(input)
if len(arrays) == 0:
raise ValueError("no table found in HDF5 group {}".
format(path))
elif len(arrays) > 0:
path = arrays[0] if path is None else path + '/' + arrays[0]
if len(arrays) > 1:
warnings.warn("path= was not specified but multiple tables"
" are present, reading in first available"
" table (path={})".format(path),
AstropyUserWarning)
return read_table_hdf5(input, path=path)
elif not isinstance(input, h5py.Dataset):
# If a file object was passed, then we need to extract the filename
# because h5py cannot properly read in file objects.
if hasattr(input, 'read'):
try:
input = input.name
except AttributeError:
raise TypeError("h5py can only open regular files")
# Open the file for reading, and recursively call read_table_hdf5 with
# the file object and the path.
f = h5py.File(input, 'r')
try:
return read_table_hdf5(f, path=path, character_as_bytes=character_as_bytes)
finally:
f.close()
# If we are here, `input` should be a Dataset object, which we can now
# convert to a Table.
# Create a Table object
from astropy.table import Table, meta, serialize
table = Table(np.array(input))
# Read the meta-data from the file. For back-compatibility, we can read
# the old file format where the serialized metadata were saved in the
# attributes of the HDF5 dataset.
# In the new format, instead, metadata are stored in a new dataset in the
# same file. This is introduced in Astropy 3.0
old_version_meta = META_KEY in input.attrs
new_version_meta = path is not None and meta_path(path) in input_save
if old_version_meta or new_version_meta:
if new_version_meta:
header = meta.get_header_from_yaml(
h.decode('utf-8') for h in input_save[meta_path(path)])
elif old_version_meta:
header = meta.get_header_from_yaml(
h.decode('utf-8') for h in input.attrs[META_KEY])
if 'meta' in list(header.keys()):
table.meta = header['meta']
header_cols = dict((x['name'], x) for x in header['datatype'])
for col in table.columns.values():
for attr in ('description', 'format', 'unit', 'meta'):
if attr in header_cols[col.name]:
setattr(col, attr, header_cols[col.name][attr])
# Construct new table with mixins, using tbl.meta['__serialized_columns__']
# as guidance.
table = serialize._construct_mixins_from_columns(table)
else:
# Read the meta-data from the file
table.meta.update(input.attrs)
if not character_as_bytes:
table.convert_bytestring_to_unicode()
return table
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
from astropy.table import serialize
from astropy.table.table import has_info_class
from astropy import units as u
from astropy.utils.data_info import MixinInfo, serialize_context_as
# If PyYAML is not available then check to see if there are any mixin cols
# that *require* YAML serialization. HDF5 already has support for
# Quantity, so if those are the only mixins the proceed without doing the
# YAML bit, for backward compatibility (i.e. not requiring YAML to write
# Quantity).
try:
import yaml
except ImportError:
for col in tbl.itercols():
if (has_info_class(col, MixinInfo) and
col.__class__ is not u.Quantity):
raise TypeError("cannot write type {} column '{}' "
"to HDF5 without PyYAML installed."
.format(col.__class__.__name__, col.info.name))
# Convert the table to one with no mixins, only Column objects. This adds
# meta data which is extracted with meta.get_yaml_from_table.
with serialize_context_as('hdf5'):
encode_tbl = serialize.represent_mixins_as_columns(tbl)
return encode_tbl
def write_table_hdf5(table, output, path=None, compression=False,
append=False, overwrite=False, serialize_meta=False):
"""
Write a Table object to an HDF5 file
This requires `h5py <http://www.h5py.org/>`_ to be installed.
Parameters
----------
table : `~astropy.table.Table`
Data table that is to be written to file.
output : str or :class:`h5py:File` or :class:`h5py:Group`
If a string, the filename to write the table to. If an h5py object,
either the file or the group object to write the table to.
path : str
The path to which to write the table inside the HDF5 file.
This should be relative to the input file or group.
If not specified, defaults to ``__astropy_table__``.
compression : bool or str or int
Whether to compress the table inside the HDF5 file. If set to `True`,
``'gzip'`` compression is used. If a string is specified, it should be
one of ``'gzip'``, ``'szip'``, or ``'lzf'``. If an integer is
specified (in the range 0-9), ``'gzip'`` compression is used, and the
integer denotes the compression level.
append : bool
Whether to append the table to an existing HDF5 file.
overwrite : bool
Whether to overwrite any existing file without warning.
If ``append=True`` and ``overwrite=True`` then only the dataset will be
replaced; the file/group will not be overwritten.
"""
from astropy.table import meta
try:
import h5py
except ImportError:
raise Exception("h5py is required to read and write HDF5 files")
if path is None:
# table is just an arbitrary, hardcoded string here.
path = '__astropy_table__'
elif path.endswith('/'):
raise ValueError("table path should end with table name, not /")
if '/' in path:
group, name = path.rsplit('/', 1)
else:
group, name = None, path
if isinstance(output, (h5py.File, h5py.Group)):
if len(list(output.keys())) > 0 and name == '__astropy_table__':
raise ValueError("table path should always be set via the "
"path= argument when writing to existing "
"files")
elif name == '__astropy_table__':
warnings.warn("table path was not set via the path= argument; "
"using default path {}".format(path))
if group:
try:
output_group = output[group]
except (KeyError, ValueError):
output_group = output.create_group(group)
else:
output_group = output
elif isinstance(output, str):
if os.path.exists(output) and not append:
if overwrite and not append:
os.remove(output)
else:
raise OSError(f"File exists: {output}")
# Open the file for appending or writing
f = h5py.File(output, 'a' if append else 'w')
# Recursively call the write function
try:
return write_table_hdf5(table, f, path=path,
compression=compression, append=append,
overwrite=overwrite,
serialize_meta=serialize_meta)
finally:
f.close()
else:
raise TypeError('output should be a string or an h5py File or '
'Group object')
# Check whether table already exists
if name in output_group:
if append and overwrite:
# Delete only the dataset itself
del output_group[name]
else:
raise OSError(f"Table {path} already exists")
# Encode any mixin columns as plain columns + appropriate metadata
table = _encode_mixins(table)
# Table with numpy unicode strings can't be written in HDF5 so
# to write such a table a copy of table is made containing columns as
# bytestrings. Now this copy of the table can be written in HDF5.
if any(col.info.dtype.kind == 'U' for col in table.itercols()):
table = table.copy(copy_data=False)
table.convert_unicode_to_bytestring()
# Warn if information will be lost when serialize_meta=False. This is
# hardcoded to the set difference between column info attributes and what
# HDF5 can store natively (name, dtype) with no meta.
if serialize_meta is False:
for col in table.itercols():
for attr in ('unit', 'format', 'description', 'meta'):
if getattr(col.info, attr, None) not in (None, {}):
warnings.warn("table contains column(s) with defined 'unit', 'format',"
" 'description', or 'meta' info attributes. These will"
" be dropped since serialize_meta=False.",
AstropyUserWarning)
# Write the table to the file
if compression:
if compression is True:
compression = 'gzip'
dset = output_group.create_dataset(name, data=table.as_array(),
compression=compression)
else:
dset = output_group.create_dataset(name, data=table.as_array())
if serialize_meta:
header_yaml = meta.get_yaml_from_table(table)
header_encoded = [h.encode('utf-8') for h in header_yaml]
output_group.create_dataset(meta_path(name),
data=header_encoded)
else:
# Write the Table meta dict key:value pairs to the file as HDF5
# attributes. This works only for a limited set of scalar data types
# like numbers, strings, etc., but not any complex types. This path
# also ignores column meta like unit or format.
for key in table.meta:
val = table.meta[key]
try:
dset.attrs[key] = val
except TypeError:
warnings.warn("Attribute `{}` of type {} cannot be written to "
"HDF5 files - skipping. (Consider specifying "
"serialize_meta=True to write all meta data)".format(key, type(val)),
AstropyUserWarning)
def register_hdf5():
"""
Register HDF5 with Unified I/O.
"""
from astropy.io import registry as io_registry
from astropy.table import Table
io_registry.register_reader('hdf5', Table, read_table_hdf5)
io_registry.register_writer('hdf5', Table, write_table_hdf5)
io_registry.register_identifier('hdf5', Table, is_hdf5)
| {
"content_hash": "9d5bf9759b9cb7bccc266acdeb452b07",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 99,
"avg_line_length": 37.99220779220779,
"alnum_prop": 0.6021740616667806,
"repo_name": "stargaser/astropy",
"id": "0312fdbd236e522631a1e689205cee9391e6d6d6",
"size": "14691",
"binary": false,
"copies": "1",
"ref": "refs/heads/placeholder",
"path": "astropy/io/misc/hdf5.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "444651"
},
{
"name": "C++",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Objective-C",
"bytes": "615"
},
{
"name": "Python",
"bytes": "9898387"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
import curses
class helper:
def __init__(self, screen):
self.screen = screen
def get_character(self):
return self.screen.getch()
def write_string( self, x, y, string ):
self.screen.addstr(y,x,string)
def clear( self ):
self.screen.clear()
def refresh( self ):
self.screen.refresh()
def move_cursor( self, x, y ):
self.screen.move( y, x )
class screen:
def __enter__(self):
self.screen = curses.initscr()
self.screen.keypad( 1 )
curses.cbreak()
curses.noecho()
return helper( self.screen )
def __exit__(self, type, value, traceback):
self.screen.keypad( 0 )
curses.echo()
curses.nocbreak()
curses.endwin()
| {
"content_hash": "6f97466b072c4ef19242503dd81c9b1f",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 47,
"avg_line_length": 19.743589743589745,
"alnum_prop": 0.5584415584415584,
"repo_name": "Verdex/TNH-Level-Editor",
"id": "89ceafa05c5c28663b3f303c9ff12cf9bdf2f8b0",
"size": "772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/render/curses_interface.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "102"
},
{
"name": "Python",
"bytes": "2869"
},
{
"name": "Rust",
"bytes": "12853"
}
],
"symlink_target": ""
} |
from openerp import models, fields, api, _
from openerp.tools.safe_eval import safe_eval as eval
from openerp.exceptions import UserError
class AccountInvoiceRefund(models.TransientModel):
"""Refunds invoice"""
_name = "account.invoice.refund"
_description = "Invoice Refund"
@api.model
def _get_reason(self):
context = dict(self._context or {})
active_id = context.get('active_id', False)
if active_id:
inv = self.env['account.invoice'].browse(active_id)
return inv.name
return ''
date_invoice = fields.Date(string='Refund Date', default=fields.Date.context_today, required=True)
date = fields.Date(string='Accounting Date')
description = fields.Char(string='Reason', required=True, default=_get_reason)
refund_only = fields.Boolean(string='Technical field to hide filter_refund in case invoice is partially paid', compute='_get_refund_only')
filter_refund = fields.Selection([('refund', 'Create a draft refund'), ('cancel', 'Cancel: create refund and reconcile'), ('modify', 'Modify: create refund, reconcile and create a new draft invoice')],
default='refund', string='Refund Method', required=True, help='Refund base on this type. You can not Modify and Cancel if the invoice is already reconciled')
@api.depends('date_invoice')
@api.one
def _get_refund_only(self):
invoice_id = self.env['account.invoice'].browse(self._context.get('active_id',False))
if len(invoice_id.payment_move_line_ids) != 0 and invoice_id.state != 'paid':
self.refund_only = True
else:
self.refund_only = False
@api.multi
def compute_refund(self, mode='refund'):
inv_obj = self.env['account.invoice']
inv_tax_obj = self.env['account.invoice.tax']
inv_line_obj = self.env['account.invoice.line']
context = dict(self._context or {})
xml_id = False
for form in self:
created_inv = []
date = False
description = False
for inv in inv_obj.browse(context.get('active_ids')):
if inv.state in ['draft', 'proforma2', 'cancel']:
raise UserError(_('Cannot refund draft/proforma/cancelled invoice.'))
if inv.reconciled and mode in ('cancel', 'modify'):
raise UserError(_('Cannot refund invoice which is already reconciled, invoice should be unreconciled first. You can only refund this invoice.'))
date = form.date or False
description = form.description or inv.name
refund = inv.refund(form.date_invoice, date, description, inv.journal_id.id)
refund.compute_taxes()
created_inv.append(refund.id)
if mode in ('cancel', 'modify'):
movelines = inv.move_id.line_ids
to_reconcile_ids = {}
to_reconcile_lines = self.env['account.move.line']
for line in movelines:
if line.account_id.id == inv.account_id.id:
to_reconcile_lines += line
to_reconcile_ids.setdefault(line.account_id.id, []).append(line.id)
if line.reconciled:
line.remove_move_reconcile()
refund.signal_workflow('invoice_open')
for tmpline in refund.move_id.line_ids:
if tmpline.account_id.id == inv.account_id.id:
to_reconcile_lines += tmpline
to_reconcile_lines.reconcile()
if mode == 'modify':
invoice = inv.read(
['name', 'type', 'number', 'reference',
'comment', 'date_due', 'partner_id',
'partner_insite', 'partner_contact',
'partner_ref', 'payment_term_id', 'account_id',
'currency_id', 'invoice_line_ids', 'tax_line_ids',
'journal_id', 'date'])
invoice = invoice[0]
del invoice['id']
invoice_lines = inv_line_obj.browse(invoice['invoice_line_ids'])
invoice_lines = inv_obj.with_context(mode='modify')._refund_cleanup_lines(invoice_lines)
tax_lines = inv_tax_obj.browse(invoice['tax_line_ids'])
tax_lines = inv_obj._refund_cleanup_lines(tax_lines)
invoice.update({
'type': inv.type,
'date_invoice': form.date_invoice,
'state': 'draft',
'number': False,
'invoice_line_ids': invoice_lines,
'tax_line_ids': tax_lines,
'date': date,
'name': description,
'origin': inv.origin,
'fiscal_position_id': inv.fiscal_position_id.id,
})
for field in ('partner_id', 'account_id', 'currency_id',
'payment_term_id', 'journal_id'):
invoice[field] = invoice[field] and invoice[field][0]
inv_refund = inv_obj.create(invoice)
if inv_refund.payment_term_id.id:
inv_refund._onchange_payment_term_date_invoice()
created_inv.append(inv_refund.id)
xml_id = (inv.type in ['out_refund', 'out_invoice']) and 'action_invoice_tree1' or \
(inv.type in ['in_refund', 'in_invoice']) and 'action_invoice_tree2'
# Put the reason in the chatter
subject = _("Invoice refund")
body = description
refund.message_post(body=body, subject=subject)
if xml_id:
result = self.env.ref('account.%s' % (xml_id)).read()[0]
invoice_domain = eval(result['domain'])
invoice_domain.append(('id', 'in', created_inv))
result['domain'] = invoice_domain
return result
return True
@api.multi
def invoice_refund(self):
data_refund = self.read(['filter_refund'])[0]['filter_refund']
return self.compute_refund(data_refund)
| {
"content_hash": "dda6daad491e0ae1a4c85623a4f00320",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 205,
"avg_line_length": 52.023622047244096,
"alnum_prop": 0.5199031330407144,
"repo_name": "vileopratama/vitech",
"id": "33cba1f0984b0be3d2c61380d376b23c2684fe96",
"size": "6632",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "src/addons/account/wizard/account_invoice_refund.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Snapshot.identifier'
db.add_column(u'backup_snapshot', 'identifier',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'Snapshot.volume'
db.add_column(u'backup_snapshot', 'volume',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='backups', null=True, on_delete=models.SET_NULL, to=orm['physical.Volume']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Snapshot.identifier'
db.delete_column(u'backup_snapshot', 'identifier')
# Deleting field 'Snapshot.volume'
db.delete_column(u'backup_snapshot', 'volume_id')
models = {
u'backup.backupgroup': {
'Meta': {'object_name': 'BackupGroup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'backup.snapshot': {
'Meta': {'object_name': 'Snapshot'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'end_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'backup_environment'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Environment']"}),
'error': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'export_path': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'backups'", 'null': 'True', 'to': u"orm['backup.BackupGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'backup_instance'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Instance']"}),
'is_automatic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'purge_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'size': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'snapshopt_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'snapshot_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'start_at': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'type': ('django.db.models.fields.IntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'volume': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'backups'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Volume']"})
},
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_vm_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_stamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'ordering': "(u'engine_type__name', u'version')", 'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
'has_users': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'read_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'write_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'physical.enginetype': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_memory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'migrate_environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Environment']"}),
'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'offering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Offering']", 'null': 'True'}),
'os_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shard': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'total_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'physical.offering': {
'Meta': {'object_name': 'Offering'},
'cpus': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'offerings'", 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'memory_size_mb': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.parameter': {
'Meta': {'ordering': "(u'engine_type__name', u'name')", 'unique_together': "((u'name', u'engine_type'),)", 'object_name': 'Parameter'},
'allowed_values': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_method': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'dynamic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'enginetype'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'plans'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
'has_persistence': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'migrate_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'stronger_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'main_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'weaker_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'weaker_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'can_change_parameters': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_clone_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_reinstall_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_resize_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_switch_master': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_upgrade_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
'has_horizontal_scalability': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'replication_topologies'", 'blank': 'True', 'to': u"orm['physical.Parameter']"}),
'script': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'replication_topologies'", 'null': 'True', 'to': u"orm['physical.Script']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.script': {
'Meta': {'object_name': 'Script'},
'configuration': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initialization': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_database': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'start_replication': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.volume': {
'Meta': {'object_name': 'Volume'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'volumes'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'total_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['backup'] | {
"content_hash": "589ef8f84056979b45f428ab4921ef85",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 227,
"avg_line_length": 92.58921161825727,
"alnum_prop": 0.5614860625616205,
"repo_name": "globocom/database-as-a-service",
"id": "7a195b542ec250118e5c7fab8e5fc78bcc73da75",
"size": "22338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbaas/backup/migrations/0010_auto__add_field_snapshot_identifier__add_field_snapshot_volume.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "243568"
},
{
"name": "Dockerfile",
"bytes": "1372"
},
{
"name": "HTML",
"bytes": "310401"
},
{
"name": "JavaScript",
"bytes": "988830"
},
{
"name": "Makefile",
"bytes": "5199"
},
{
"name": "Python",
"bytes": "9674426"
},
{
"name": "Shell",
"bytes": "215115"
}
],
"symlink_target": ""
} |
import datetime
import json
import logging
import logging.config
import boto3
import watchtower
import etl.monitor
from etl.config import get_config_value
from etl.logs.formatter import JsonFormatter
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def add_cloudwatch_logging(prefix: str) -> None:
"""
Add logging to CloudWatch by adding another handler and formatter to the log stream.
Args:
prefix: Top-level group of CloudWatch stream.
"""
client = boto3.client("logs")
log_group = get_config_value("arthur_settings.logging.cloudwatch.log_group")
now = datetime.datetime.utcnow()
stream_name = f"{prefix}/{now.year}/{now.month}/{now.day}/{etl.monitor.Monitor.etl_id}"
logger.info(f"Starting logging to CloudWatch stream '{log_group}/{stream_name}'")
handler = watchtower.CloudWatchLogHandler(
boto3_client=client,
log_group_name=log_group,
log_group_retention_days=180,
log_stream_name=stream_name,
send_interval=10,
)
log_level = get_config_value("arthur_settings.logging.cloudwatch.log_level")
handler.setLevel(log_level)
# The extra "str()" gets around the meta class approach to store the etl_id.
handler.setFormatter(JsonFormatter(prefix, str(etl.monitor.Monitor.etl_id)))
root_logger = logging.getLogger()
root_logger.addHandler(handler)
def tail_logs(prefix: str, start_time: datetime.datetime, filter_warnings=False) -> None:
"""
Fetch log lines from CloudWatch, filtering for `prefix` as the environment.
Args:
prefix: Top-level group of CloudWatch stream.
start_time: How far to go back when loading log lines.
filter_warnings: Boolean to decide whether to keep only warnings (and above).
"""
client = boto3.client("logs")
log_group = get_config_value("arthur_settings.logging.cloudwatch.log_group")
logger.info(f"Searching log streams '{log_group}/{prefix}/*' (starting at '{start_time})'")
paginator = client.get_paginator("filter_log_events")
response_iterator = paginator.paginate(
logGroupName=log_group,
logStreamNamePrefix=prefix,
startTime=int(start_time.timestamp() * 1000.0),
)
for response in response_iterator:
for event in response["events"]:
stream_name = event["logStreamName"]
message = json.loads(event["message"])
if filter_warnings and message["log_level"] in ("DEBUG", "INFO"):
continue
print(f"{stream_name} {message['gmtime']} {message['log_level']} {message['message']}")
if "metrics" in message:
print(f"{stream_name} {message['gmtime']} (metrics) {message['metrics']}")
| {
"content_hash": "a7ca11fe9d35f21ad183ec348be96768",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 99,
"avg_line_length": 38.27777777777778,
"alnum_prop": 0.6748911465892597,
"repo_name": "harrystech/arthur-redshift-etl",
"id": "647cd96f077dfd9bda402d30d3564ebfc762c9c0",
"size": "2756",
"binary": false,
"copies": "1",
"ref": "refs/heads/next",
"path": "python/etl/logs/cloudwatch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1894"
},
{
"name": "Dockerfile",
"bytes": "3430"
},
{
"name": "HTML",
"bytes": "1551"
},
{
"name": "JavaScript",
"bytes": "5280"
},
{
"name": "PLpgSQL",
"bytes": "536"
},
{
"name": "Python",
"bytes": "578354"
},
{
"name": "Ruby",
"bytes": "82"
},
{
"name": "Shell",
"bytes": "87818"
}
],
"symlink_target": ""
} |
from openre.agent.decorators import action
import logging
@action(namespace='server')
def ping(event):
return 'pong'
@action(namespace='server')
def exception(event):
raise Exception('Test exception')
@action(namespace='server')
def check_args(event, *args, **kwargs):
return {'args': args, 'kwargs': kwargs}
@action(namespace='server')
def debug(event):
logging.debug('Debug message: %s', event.data)
@action(namespace='server')
def error(event):
logging.error('Error message: %s', event.data)
@action(namespace='server')
def warn(event):
logging.warn('Warn message: %s', event.data)
| {
"content_hash": "5e05bbeb2b8d1e0446a295176fa8d1a0",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 50,
"avg_line_length": 23.653846153846153,
"alnum_prop": 0.7040650406504065,
"repo_name": "openre/openre",
"id": "8e142c763fdb8d924ffaccc126ec69589aa927d1",
"size": "640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openre/agent/server/action/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "17920"
},
{
"name": "Python",
"bytes": "389791"
}
],
"symlink_target": ""
} |
import os
import urllib
from google.appengine.api import users
from google.appengine.ext import ndb
import jinja2
import webapp2
import models
#
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
# Una costante que representa el nombre del libro
DEFAULT_GUESTBOOK_NAME = 'default_guestbook'
def guestbook_key(guestbook_name=DEFAULT_GUESTBOOK_NAME):
"""Construye una clave de Datastore para una entidad Guestbook con guestbook_name."""
return ndb.Key('Guestbook', guestbook_name)
# Las clases que heredan de RequestHandler se encargan de procesar
# la solicitud y construir una respuesta.
class MainPage( webapp2.RequestHandler ):
def get(self):
self.response.write('<html><body>')
guestbook_name = 'guestbook_name'
# Las consultas Ancestor, como se muestra quí, tienen consistencia fuerte
# con el Hight Replication Datastore. Las consultas que abarcan grupos de
# entidades son eventualmente consistentes. Si omitimos el Ancestor de
# esta consulta habría una ligera propabilidad de que el saludo que acaba
# de ser escrito no se presente en una consulta.
greetings_query = models.Greeting.query(
ancestor=guestbook_key(guestbook_name)).order(-models.Greeting.date)
greetings = greetings_query.fetch(10)
for greeting in greetings:
if greeting.author:
self.response.write(
'<b>%s</b> escribío:'.decode('utf-8') % greeting.author.nickname())
else:
self.response.write( 'Un anonimo escribío:'.decode('utf-8') )
self.response.write('<blockquote>%s</blockquote>' % greeting.content)
if users.get_current_user():
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
template_values = {
'greetings': greetings,
'guestbook_name': urllib.quote_plus(guestbook_name),
'url': url,
'url_linktext': url_linktext,
}
template = JINJA_ENVIRONMENT.get_template('/app/views/index.html')
self.response.write(template.render(template_values))
def post(self):
# Fijamos la misma clave padre en el 'Saludo' para garantizar que cada
# Saludo esta en el mismo gurpo de entidades. Las consultas a través
# del grupo de entidades individuales seran consistentes. Sin embargo,
# la tasa de escritura para un grupo de entidades individuales debera
# limitarse a ~1/segundo.
guestbook_name = self.request.get('guestbook_name', DEFAULT_GUESTBOOK_NAME)
greeting = models.Greeting( parent=guestbook_key(guestbook_name) )
if users.get_current_user():
greeting.author = users.get_current_user()
greeting.content = self.request.get('content')
greeting.put()
query_params = {'guestbook_name': guestbook_name}
self.redirect('/?' + urllib.urlencode(query_params))
# Una instancia de WSGIAplication que dirige las solicitudes entrantes
# a un controlador basado en la URL
application = webapp2.WSGIApplication([
('/', MainPage),
('/sign', MainPage),
], debug=True)
| {
"content_hash": "34c3f7f1c8494b811c92ac4683f09195",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 86,
"avg_line_length": 34.065934065934066,
"alnum_prop": 0.7303225806451613,
"repo_name": "luillyfe/GAE",
"id": "6255b04d5e87fbf665e88e5ebc6071a9d2ef76b5",
"size": "3130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "17066"
},
{
"name": "JavaScript",
"bytes": "19972"
},
{
"name": "Python",
"bytes": "937220"
}
],
"symlink_target": ""
} |
"""
set up for wire scan for HF mode
"""
import bluesky.plans as bp
from bluesky.callbacks import LiveRaster
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import time
import epics
import os
import numpy
#matplotlib.pyplot.ticklabel_format(style='plain')
def get_stock_md():
md = {}
md['beamline_status'] = {'energy': energy.energy.position
#'slt_wb': str(slt_wb.position),
#'slt_ssa': str(slt_ssa.position)
}
md['initial_sample_position'] = {'hf_stage_x': hf_stage.x.position,
'hf_stage_y': hf_stage.y.position,
'hf_stage_z': hf_stage.z.position}
md['wb_slits'] = {'v_gap' : slt_wb.v_gap.position,
'h_gap' : slt_wb.h_gap.position,
'v_cen' : slt_wb.v_cen.position,
'h_cen' : slt_wb.h_cen.position
}
md['hfm'] = {'y' : hfm.y.position,
'bend' : hfm.bend.position}
md['ssa_slits'] = {'v_gap' : slt_ssa.v_gap.position,
'h_gap' : slt_ssa.h_gap.position,
'v_cen' : slt_ssa.v_cen.position,
'h_cen' : slt_ssa.h_cen.position
}
return md
def get_stock_md_xfm():
md = {}
md['beamline_status'] = {'energy': energy.energy.position
#'slt_wb': str(slt_wb.position),
#'slt_ssa': str(slt_ssa.position)
}
md['initial_sample_position'] = {'stage27a_x': stage.x.position,
'stage27a_y': stage.y.position,
'stage27a_z': stage.z.position}
md['wb_slits'] = {'v_gap' : slt_wb.v_gap.position,
'h_gap' : slt_wb.h_gap.position,
'v_cen' : slt_wb.v_cen.position,
'h_cen' : slt_wb.h_cen.position
}
md['hfm'] = {'y' : hfm.y.position,
'bend' : hfm.bend.position}
md['ssa_slits'] = {'v_gap' : slt_ssa.v_gap.position,
'h_gap' : slt_ssa.h_gap.position,
'v_cen' : slt_ssa.v_cen.position,
'h_cen' : slt_ssa.h_cen.position
}
return md
def hf2dwire(*, xstart, xnumstep, xstepsize,
zstart, znumstep, zstepsize,
acqtime, numrois=1, i0map_show=True, itmap_show=False,
energy=None, u_detune=None):
'''
input:
xstart, xnumstep, xstepsize (float)
zstart, znumstep, zstepsize (float)
acqtime (float): acqusition time to be set for both xspress3 and F460
numrois (integer): number of ROIs set to display in the live raster scans. This is for display ONLY.
The actualy number of ROIs saved depend on how many are enabled and set in the read_attr
However noramlly one cares only the raw XRF spectra which are all saved and will be used for fitting.
i0map_show (boolean): When set to True, map of the i0 will be displayed in live raster, default is True
itmap_show (boolean): When set to True, map of the trasnmission diode will be displayed in the live raster, default is True
energy (float): set energy, use with caution, hdcm might become misaligned
u_detune (float): amount of undulator to detune in the unit of keV
'''
#record relevant meta data in the Start document, defined in 90-usersetup.py
md = get_stock_md()
#setup the detector
# TODO do this with configure
current_preamp.exp_time.put(acqtime-0.09)
xs.settings.acquire_time.put(acqtime)
xs.total_points.put((xnumstep+1)*(znumstep+1))
# det = [current_preamp, xs]
det = [xs]
#setup the live callbacks
livecallbacks = []
livetableitem = [hf_stage.x, hf_stage.z, 'current_preamp_ch0', 'current_preamp_ch2', 'xs_channel1_rois_roi01_value']
xstop = xstart + xnumstep*xstepsize
zstop = zstart + znumstep*zstepsize
print('xstop = '+str(xstop))
print('zstop = '+str(zstop))
for roi_idx in range(numrois):
roi_name = 'roi{:02}'.format(roi_idx+1)
roi_key = getattr(xs.channel1.rois, roi_name).value.name
livetableitem.append(roi_key)
# livetableitem.append('saturn_mca_rois_roi'+str(roi_idx)+'_net_count')
# livetableitem.append('saturn_mca_rois_roi'+str(roi_idx)+'_count')
# #roimap = LiveRaster((xnumstep, znumstep), 'saturn_mca_rois_roi'+str(roi_idx)+'_net_count', clim=None, cmap='viridis', xlabel='x', ylabel='y', extent=None)
colormap = 'inferno' #previous set = 'viridis'
# roimap = LiveRaster((znumstep, xnumstep), 'saturn_mca_rois_roi'+str(roi_idx)+'_count', clim=None, cmap='inferno',
# xlabel='x (mm)', ylabel='y (mm)', extent=[xstart, xstop, zstop, zstart])
# roimap = myLiveRaster((znumstep+1, xnumstep+1), roi_key, clim=None, cmap='inferno', aspect='equal',
# xlabel='x (mm)', ylabel='y (mm)', extent=[xstart, xstop, zstop, zstart])
roimap = LiveRaster((znumstep+1, xnumstep+1), roi_key, clim=None, cmap='inferno', aspect=0.01,
xlabel='x (mm)', ylabel='y (mm)', extent=[xstart, xstop, zstop, zstart])
# liveplotfig = plt.figure('through focus')
# roiplot = LivePlot(roi_key,x=hf_stage.x.name, fig=liveplotfig)
livecallbacks.append(roimap)
# livecallbacks.append(roiplot)
# if i0map_show is True:
# i0map = myLiveRaster((znumstep+1, xnumstep+1), 'current_preamp_ch2', clim=None, cmap='inferno',
# xlabel='x (mm)', ylabel='y (mm)', extent=[xstart, xstop, zstop, zstart])
# livecallbacks.append(i0map)
# if itmap_show is True:
# itmap = myLiveRaster((znumstep+1, xnumstep+1), 'current_preamp_ch0', clim=None, cmap='inferno',
# xlabel='x (mm)', ylabel='y (mm)', extent=[xstart, xstop, zstop, zstart])
# livecallbacks.append(itmap)
# commented out liveTable in 2D scan for now until the prolonged time issue is resolved
livecallbacks.append(LiveTable(livetableitem))
#setup the plan
if energy is not None:
if u_detune is not None:
# TODO maybe do this with set
energy.detune.put(u_detune)
# TODO fix name shadowing
yield from bp.abs_set(energy, energy, wait=True)
# shut_b.open_cmd.put(1)
# while (shut_b.close_status.get() == 1):
# epics.poll(.5)
# shut_b.open_cmd.put(1)
hf2dwire_scanplan = bp.grid_scan(det,
hf_stage.z, zstart, zstop, znumstep+1,
hf_stage.x, xstart, xstop, xnumstep+1, True,
md=md)
hf2dwire_scanplan = bp.subs_wrapper(hf2dwire_scanplan, livecallbacks)
scaninfo = yield from hf2dwire_scanplan
# shut_b.close_cmd.put(1)
# while (shut_b.close_status.get() == 0):
# epics.poll(.5)
# shut_b.close_cmd.put(1)
#write to scan log
logscan('2dwire')
return scaninfo
class myLiveRaster(CallbackBase):
"""Simple callback that fills in values based on a raster
This simply wraps around a `AxesImage`. seq_num is used to
determine which pixel to fill in
Parameters
----------
raster_shap : tuple
The (row, col) shape of the raster
I : str
The field to use for the color of the markers
clim : tuple, optional
The color limits
cmap : str or colormap, optional
The color map to use
"""
def __init__(self, raster_shape, I, *,
clim=None, cmap='viridis',
xlabel='x', ylabel='y', extent=None):
fig, ax = plt.subplots()
self.I = I
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_aspect(.001)
self.ax = ax
self.fig = fig
self._Idata = np.ones(raster_shape) * np.nan
self._norm = mcolors.Normalize()
if clim is not None:
self._norm.vmin, self._norm.vmax = clim
self.clim = clim
self.cmap = cmap
self.raster_shape = raster_shape
self.im = None
self.extent = extent
def start(self, doc):
if self.im is not None:
raise RuntimeError("Can not re-use LiveRaster")
self._Idata = np.ones(self.raster_shape) * np.nan
im = self.ax.imshow(self._Idata, norm=self._norm,
cmap=self.cmap, interpolation='none',
extent=self.extent)
self.im = im
self.ax.set_title('scan {uid} [{sid}]'.format(sid=doc['scan_id'],
uid=doc['uid'][:6]))
self.snaking = doc.get('snaking', (False, False))
cb = self.fig.colorbar(im)
cb.set_label(self.I)
def event(self, doc):
if self.I not in doc['data']:
return
seq_num = doc['seq_num'] - 1
pos = list(np.unravel_index(seq_num, self.raster_shape))
if self.snaking[1] and (pos[0] % 2):
pos[1] = self.raster_shape[1] - pos[1] - 1
pos = tuple(pos)
self._Idata[pos] = doc['data'][self.I]
if self.clim is None:
self.im.set_clim(np.nanmin(self._Idata), np.nanmax(self._Idata))
self.im.set_array(self._Idata)
def hf2dwire_y(*, xstart, xnumstep, xstepsize,
zstart, znumstep, zstepsize,
acqtime, numrois=1, i0map_show=True, itmap_show=False,
energy=None, u_detune=None):
'''
input:
xstart, xnumstep, xstepsize (float)
zstart, znumstep, zstepsize (float)
acqtime (float): acqusition time to be set for both xspress3 and F460
numrois (integer): number of ROIs set to display in the live raster scans. This is for display ONLY.
The actualy number of ROIs saved depend on how many are enabled and set in the read_attr
However noramlly one cares only the raw XRF spectra which are all saved and will be used for fitting.
i0map_show (boolean): When set to True, map of the i0 will be displayed in live raster, default is True
itmap_show (boolean): When set to True, map of the trasnmission diode will be displayed in the live raster, default is True
energy (float): set energy, use with caution, hdcm might become misaligned
u_detune (float): amount of undulator to detune in the unit of keV
'''
#record relevant meta data in the Start document, defined in 90-usersetup.py
md = get_stock_md()
#setup the detector
# TODO do this with configure
current_preamp.exp_time.put(acqtime-0.09)
xs.settings.acquire_time.put(acqtime)
xs.total_points.put((xnumstep+1)*(znumstep+1))
# det = [current_preamp, xs]
det = [xs]
#setup the live callbacks
livecallbacks = []
livetableitem = [hf_stage.y, hf_stage.z, 'current_preamp_ch0', 'current_preamp_ch2', 'xs_channel1_rois_roi01_value']
xstop = xstart + xnumstep*xstepsize
zstop = zstart + znumstep*zstepsize
print('xstop = '+str(xstop))
print('zstop = '+str(zstop))
for roi_idx in range(numrois):
roi_name = 'roi{:02}'.format(roi_idx+1)
roi_key = getattr(xs.channel1.rois, roi_name).value.name
livetableitem.append(roi_key)
# livetableitem.append('saturn_mca_rois_roi'+str(roi_idx)+'_net_count')
# livetableitem.append('saturn_mca_rois_roi'+str(roi_idx)+'_count')
# #roimap = LiveRaster((xnumstep, znumstep), 'saturn_mca_rois_roi'+str(roi_idx)+'_net_count', clim=None, cmap='viridis', xlabel='x', ylabel='y', extent=None)
colormap = 'inferno' #previous set = 'viridis'
# roimap = LiveRaster((znumstep, xnumstep), 'saturn_mca_rois_roi'+str(roi_idx)+'_count', clim=None, cmap='inferno',
# xlabel='x (mm)', ylabel='y (mm)', extent=[xstart, xstop, zstop, zstart])
# roimap = myLiveRaster((znumstep+1, xnumstep+1), roi_key, clim=None, cmap='inferno', aspect='equal',
# xlabel='x (mm)', ylabel='y (mm)', extent=[xstart, xstop, zstop, zstart])
roimap = LiveRaster((znumstep+1, xnumstep+1), roi_key, clim=None, cmap='inferno', aspect=0.01,
xlabel='x (mm)', ylabel='y (mm)', extent=[xstart, xstop, zstop, zstart])
# liveplotfig = plt.figure('through focus')
# roiplot = LivePlot(roi_key,x=hf_stage.x.name, fig=liveplotfig)
livecallbacks.append(roimap)
# livecallbacks.append(roiplot)
# if i0map_show is True:
# i0map = myLiveRaster((znumstep+1, xnumstep+1), 'current_preamp_ch2', clim=None, cmap='inferno',
# xlabel='x (mm)', ylabel='y (mm)', extent=[xstart, xstop, zstop, zstart])
# livecallbacks.append(i0map)
# if itmap_show is True:
# itmap = myLiveRaster((znumstep+1, xnumstep+1), 'current_preamp_ch0', clim=None, cmap='inferno',
# xlabel='x (mm)', ylabel='y (mm)', extent=[xstart, xstop, zstop, zstart])
# livecallbacks.append(itmap)
# commented out liveTable in 2D scan for now until the prolonged time issue is resolved
livecallbacks.append(LiveTable(livetableitem))
#setup the plan
if energy is not None:
if u_detune is not None:
# TODO maybe do this with set
energy.detune.put(u_detune)
# TODO fix name shadowing
yield from bp.abs_set(energy, energy, wait=True)
# shut_b.open_cmd.put(1)
# while (shut_b.close_status.get() == 1):
# epics.poll(.5)
# shut_b.open_cmd.put(1)
hf2dwire_scanplan = bp.grid_scan(det,
hf_stage.z, zstart, zstop, znumstep+1,
hf_stage.y, xstart, xstop, xnumstep+1, True,
md=md)
hf2dwire_scanplan = bp.subs_wrapper( hf2dwire_scanplan, livecallbacks)
scaninfo = yield from hf2dwire_scanplan
# shut_b.close_cmd.put(1)
# while (shut_b.close_status.get() == 0):
# epics.poll(.5)
# shut_b.close_cmd.put(1)
#write to scan log
logscan('2dwire')
return scaninfo
| {
"content_hash": "e3c8356b70061db5f33aafcc4e1de557",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 164,
"avg_line_length": 42.443502824858754,
"alnum_prop": 0.5510815307820299,
"repo_name": "NSLS-II-SRX/ipython_ophyd",
"id": "db0e669c924d065985943a05e75838da3cfcaa2e",
"size": "15049",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "profile_xf05id1/startup/91-wirescan.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "864"
},
{
"name": "JavaScript",
"bytes": "11618"
},
{
"name": "Python",
"bytes": "974706"
},
{
"name": "Shell",
"bytes": "285"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class VisitorConfig(AppConfig):
name = 'visitor'
| {
"content_hash": "147dc1dbdc1d9aea7419b29d65e66507",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 17.8,
"alnum_prop": 0.7528089887640449,
"repo_name": "narupo/visitor",
"id": "c3c678d1b25b86adb3369bcfa0bc3af532dfcf91",
"size": "89",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "13402"
},
{
"name": "Python",
"bytes": "32999"
}
],
"symlink_target": ""
} |
import pandas as pd
import numpy as np
import matplotlib
from matplotlib.dates import num2date, date2num
from matplotlib import pyplot
import scipy.stats
from dcs.analyze import textAnalysis
import traceback
import base64
import dateutil.parser
from StringIO import StringIO # USE for production
# from io import StringIO # ONLY USE for Python 3 (when compiling sphinx documentation)
def filterWithSearchQuery(df, columnIndices, query, isRegex=False):
"""Filters the rows of :class:`pandas.DataFrame` object that match a pattern in the specified column(s), returning a :class:`pandas.DataFrame` object
containing the search results
The search can be performed with a regular expression.
.. note::
If filtering with multiple columns, a row is considered a match if the pattern occurs in *any* of the specified columns.
.. note::
The search is performed on the string representation of the column, meaning a floating point column with value ``2`` will match the pattern ``'2.0'``
Args:
df (pandas.DataFrame): data frame
columnIndices (list<int>): indices of columns to include in search
query (str): search query or regular expression
isRegex (bool, optional): must be set to ``True`` if searching using regular expression
Returns:
pandas.DataFrame: data frame containing search results (all columns included, not just search columns)
"""
matchedIndices = pd.Series([False for _ in range(df.shape[0])], index=df.index)
for index in columnIndices:
column = df[df.columns[index]]
matches = column.astype(str).str.contains(query, regex=isRegex)
matches = matches[matches == True].index.tolist()
for index in matches:
if pd.notnull(column[index]):
matchedIndices[index] = True
return df[matchedIndices]
def histogram(df, columnIndices, options={}):
"""Uses ``matplotlib`` to generate a histogram of the specified :class:`pandas.DataFrame` column(s)
The function supports multiple columns. The function uses the :func:`matplotlib.axes.Axes.hist` function to plot a histogram, exports the generated chart to a PNG image and encodes the
image into a string using Base64.
.. note::
The *options* kwarg can be used to customize the plot and may have the following key-value pairs:
* **numberOfBins** : an ``int`` directly passed to *bins* argument of :func:`matplotlib.axes.Axes.hist`
* **axis** : a ``dict`` specifying the axis/window settings for the plot with the structure
``{'x': {'start': x-axis min, 'end': x-axis max}, 'y': {'start': y-axis min, 'end': y-axis max}}``
The function returns a dictionary with the following key-value pairs:
* **image** : *StringIO.StringIO* – :class:`StringIO.StringIO` object containing Base64 encoded PNG image of generated plot
* **axis** : *dict* – dictionary containing axis/window settings for the generated plot with the structure
``{'x': {'start': x-axis min, 'end': x-axis max}, 'y': {'start': y-axis min, 'end': y-axis max}}``
Args:
df (pandas.DataFrame): data frame
columnIndices (list<int>): 'indices of columns to plot'
options (dict, optional): options dictionary
Returns:
dict: dictionary containing image and axis settings
"""
pyplot.style.use('ggplot')
fig = pyplot.figure(figsize=(10, 8))
ax = fig.add_subplot(111)
numberOfBins = 10
if type(options) is dict:
if "numberOfBins" in options and type(options["numberOfBins"]) is int:
numberOfBins = int(options["numberOfBins"])
if "axis" in options and type(options["axis"]) is dict:
ax.axis([options["axis"]["x"]["start"], options["axis"]["x"]["end"], options["axis"]["y"]["start"], options["axis"]["y"]["end"]])
ax.hist([df[df.columns[colIndex]].dropna() for colIndex in columnIndices], bins=numberOfBins, label=[df.columns[colIndex] for colIndex in columnIndices])
ax.legend(prop={'size': 10})
ax.set_xlabel("Value")
ax.set_ylabel("Frequency")
stream = StringIO()
fig.tight_layout()
fig.savefig(stream, format="png", dpi=300)
axis = ax.axis()
axisInformation = {"x": {"start": axis[0], "end": axis[1]}, "y": {"start": axis[2], "end": axis[3]}}
pyplot.close(fig)
return {'image': base64.b64encode(stream.getvalue()).decode('utf-8'), 'axis': axisInformation}
def frequency(df, columnIndex, options={}):
"""Uses ``matplotlib`` to generate a horizontal frequency bar chart of the specified :class:`pandas.DataFrame` column
This function uses the :meth:`pandas.Series.value_counts` method (or :func:`dcs.analyze.textAnalysis`['word_frequencies'] if plotting word frequency)
to get the (value, frequency) tuples for the specified column. A horizontal bar chart is generated with the :func:`matplotlib.axes.Axes.barh` function,
and the chart is exported to a PNG image and then encoded into a string using Base64.
.. note::
The *options* kwarg can be used to customize the plot and may have the following key-value pairs:
* **useWords** : a ``bool`` flag which may be set to ``True`` to plot word frequencies instad of row value frequencies for a string column
* **cutoff** : an ``int`` specifying the top *n* values by frequency to plot, default is 50, maximum is 50
The function returns a dictionary with the following key-value pairs:
* **image** : *StringIO.StringIO* – :class:`StringIO.StringIO` object containing Base64 encoded PNG image of generated plot
Args:
df (pandas.DataFrame): data frame
columnIndices (list<int>): indices of columns to plot
options (dict, optional): options dictionary
Returns:
dict: dictionary containing image
"""
cutoff = 50
useWords = False
column = df[df.columns[columnIndex]]
if type(options) is dict:
if options.get("useWords", False) is True and not issubclass(column.dtype.type, np.datetime64) and not issubclass(column.dtype.type, np.number):
useWords = True
if options.get("cutoff", -1) > 0 and options.get("cutoff", -1) <= 50:
cutoff = int(options["cutoff"])
values = []
counts = []
if useWords:
tuples = textAnalysis(column)["word_frequencies"]
for x in reversed(tuples[:cutoff]):
values.append(x[0].decode("utf-8", "replace") if isinstance(x[0], basestring) else x[0])
counts.append(x[1])
else:
tuples = column.value_counts()
for index in range(min(cutoff - 1, len(tuples) - 1), -1, -1):
values.append(tuples.index[index].decode("utf-8", "replace") if isinstance(tuples.index[index], basestring) else tuples.index[index])
counts.append(tuples.iloc[index])
pyplot.style.use('ggplot')
fig = pyplot.figure(figsize=(10, 8))
ax = fig.add_subplot(111)
ax.set_ylim(-0.5, len(values) - 0.5)
ax.barh(np.arange(len(values)), counts, tick_label=values, align="center")
ax.set_xlabel("Frequency")
ax.set_ylabel("Value")
stream = StringIO()
fig.savefig(stream, format="png", dpi=300)
pyplot.close(fig)
return {'image': base64.b64encode(stream.getvalue()).decode('utf-8')}
def scatter(df, xIndex, yIndices, options={}):
"""Uses ``matplotlib`` to generate a scatter plot of the specified :class:`pandas.DataFrame` column(s)
This function uses :func:`matplotlib.axes.Axes.scatter` function to generate a scatter plot, exports the chart to a PNG image
and encodes the image into a string using Base64.
The function also performs linear regression using :func:`scipy.stats.linregress` to plot a linear trend-line and compute an |R2| value
for each y-axis column. The |R2| value, along with the Pearson correlation p-value computed with :func:`scipy.stats.pearsonr`, is then
rendered next to the trend-line.
.. note::
The function supports plotting multiple columns with respect to one axis, but the number of columns should be limited to 6
for optimal color assignment of the plot points.
.. note::
The *options* kwarg can be used to customize the plot and may have the following key-value pairs:
* **axis** : a ``dict`` specifying the axis/window settings for the plot with the structure
``{'x': {'start': x-axis min, 'end': x-axis max}, 'y': {'start': y-axis min, 'end': y-axis max}}``
The function returns a dictionary with the following key-value pairs:
* **image** : *StringIO.StringIO* – :class:`StringIO.StringIO` object containing Base64 encoded PNG image of generated plot
* **axis** : *dict* – dictionary containing axis/window settings for the generated plot with the structure
``{'x': {'start': x-axis min, 'end': x-axis max}, 'y': {'start': y-axis min, 'end': y-axis max}}``
Args:
df (pandas.DataFrame): data frame
xIndex (int): index of column to plot on x-axis
yIndices (list<int>, optional): indices of columns to plot on y-axis
options (dict, optional): options dictionary
Returns:
dict: dictionary containing image and axis settings
.. |R2| replace:: R\ :sup:`2`
"""
xColumn = df.columns[xIndex]
yColumns = [df.columns[index] for index in yIndices]
df = df.dropna(subset=[xColumn] + yColumns)
pyplot.style.use('ggplot')
fig = pyplot.figure(figsize=(10, 8))
ax = fig.add_subplot(111)
if(type(options) is dict):
if "axis" in options and type(options["axis"]) is dict:
ax.axis([options["axis"]["x"]["start"], options["axis"]["x"]["end"], options["axis"]["y"]["start"], options["axis"]["y"]["end"]])
ax.set_xlabel(xColumn)
ax.set_ylabel(yColumns[0] if len(yColumns) == 1 else "")
# plot data
colors = ["b", "m", "g", "y", "c", "k"]
for index, column in enumerate(yColumns):
ax.scatter(df[xColumn], df[column], c=colors[index % len(colors)], marker="x")
ax.legend(loc=2, prop={'size': 10})
# plot trendlines
for index, column in enumerate(yColumns):
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(df[xColumn], df[column])
pearson, p_value = scipy.stats.pearsonr(df[xColumn], df[column])
lineOfBestFit = df[xColumn] * slope + intercept
ax.plot(df[xColumn], lineOfBestFit, colors[index % len(colors)] + "-", label="best fit")
ax.text(df[xColumn].max(), lineOfBestFit[df[xColumn].idxmax()], r' $R^{2} = %.3f$' % (r_value ** 2) + "\n" + r' $p = %.3f$' % pearson, fontsize=12)
stream = StringIO()
fig.tight_layout()
fig.savefig(stream, format="png", dpi=300)
axis = ax.axis()
axisInformation = {"x": {"start": axis[0], "end": axis[1]}, "y": {"start": axis[2], "end": axis[3]}}
pyplot.close(fig)
return {'image': base64.b64encode(stream.getvalue()).decode('utf-8'), 'axis': axisInformation}
def line(df, xIndex, yIndices, options={}):
"""Uses ``matplotlib`` to generate a line chart of the specified :class:`pandas.DataFrame` column(s)
This function uses :func:`matplotlib.axes.Axes.plot` function to plot a line chart, exports the chart to a PNG image
and encodes the image into a string using Base64.
.. note::
The function supports plotting multiple columns with respect to one axis, but the number of columns should be limited to 6
for optimal color assignment of the plot points.
.. note::
The *options* kwarg can be used to customize the plot and may have the following key-value pairs:
* **axis** : a ``dict`` specifying the axis/window settings for the plot with the structure
``{'x': {'start': x-axis min, 'end': x-axis max}, 'y': {'start': y-axis min, 'end': y-axis max}}``
The function returns a dictionary with the following key-value pairs:
* **image** : *StringIO.StringIO* – :class:`StringIO.StringIO` object containing Base64 encoded PNG image of generated plot
* **axis** : *dict* – dictionary containing axis/window settings for the generated plot with the structure
``{'x': {'start': x-axis min, 'end': x-axis max}, 'y': {'start': y-axis min, 'end': y-axis max}}``
Args:
df (pandas.DataFrame): data frame
xIndex (int): index of column to plot on x-axis
yIndices (list<int>, optional): indices of columns to plot on y-axis
options (dict, optional): options dictionary
Returns:
dict: dictionary containing image and axis settings"""
xColumn = df.columns[xIndex]
yColumns = [df.columns[index] for index in yIndices]
df = df.dropna(subset=[xColumn])
pyplot.style.use('ggplot')
fig = pyplot.figure(figsize=(10, 8))
ax = fig.add_subplot(111)
if(type(options) is dict):
if "axis" in options and type(options["axis"]) is dict:
ax.axis([options["axis"]["x"]["start"], options["axis"]["x"]["end"], options["axis"]["y"]["start"], options["axis"]["y"]["end"]])
ax.set_xlabel(xColumn)
ax.set_ylabel(yColumns[0] if len(yColumns) == 1 else "")
# plot data
colors = ["b", "m", "g", "y", "c", "k"]
for index, column in enumerate(yColumns):
ax.plot(df[xColumn], df[column], colors[index % len(colors)] + "-")
ax.legend(loc=2, prop={'size': 10})
stream = StringIO()
fig.tight_layout()
fig.savefig(stream, format="png", dpi=300)
axis = ax.axis()
axisInformation = {"x": {"start": axis[0], "end": axis[1]}, "y": {"start": axis[2], "end": axis[3]}}
pyplot.close(fig)
return {'image': base64.b64encode(stream.getvalue()).decode('utf-8'), 'axis': axisInformation}
def date(df, xIndex, yIndices, options={}):
"""Uses ``matplotlib`` to generate a time-series chart of the specified :class:`pandas.DataFrame` column(s)
This function uses :func:`matplotlib.axes.Axes.plot` function to plot a line chart, exports the chart to a PNG image
and encodes the image into a string using Base64.
.. note::
The function supports plotting multiple columns with respect to one axis, but the number of columns should be limited to 6
for optimal color assignment of the plot points.
.. note::
The *options* kwarg can be used to customize the plot and may have the following key-value pairs:
* **axis** : a ``dict`` specifying the axis/window settings for the plot with the structure
``{'x': {'start': x-axis min, 'end': x-axis max}, 'y': {'start': y-axis min, 'end': y-axis max}}``.
The values in the **axis** dictionary should be strings that are parseable using :func:`dateutil.parser.parse`
The function returns a dictionary with the following key-value pairs:
* **image** : *StringIO.StringIO* – :class:`StringIO.StringIO` object containing Base64 encoded PNG image of generated plot
* **axis** : *dict* – dictionary containing axis/window settings for the generated plot with the structure
``{'x': {'start': x-axis min, 'end': x-axis max}, 'y': {'start': y-axis min, 'end': y-axis max}}``
The values in the **axis** dictionary are date strings formatted using the :meth:`ISO8601 date format <python:datetime.datetime.isoformat>`
Args:
df (pandas.DataFrame): data frame
xIndex (int): index of column to plot on x-axis
yIndices (list<int>, optional): indices of columns to plot on y-axis
options (dict, optional): options dictionary
Returns:
dict: dictionary containing image and axis settings"""
xColumn = df.columns[xIndex]
yColumns = [df.columns[index] for index in yIndices]
df = df.dropna(subset=[xColumn])
df.sort_values(xColumn, inplace=True)
pyplot.style.use('ggplot')
fig = pyplot.figure(figsize=(10, 8))
ax = fig.add_subplot(111)
if(type(options) is dict):
if "axis" in options and type(options["axis"]) is dict:
xStart = date2num(dateutil.parser.parse(options["axis"]["x"]["start"]))
xEnd = date2num(dateutil.parser.parse(options["axis"]["x"]["end"]))
ax.axis([xStart, xEnd, options["axis"]["y"]["start"], options["axis"]["y"]["end"]])
pyplot.setp( ax.xaxis.get_majorticklabels(), rotation=45 )
# plot data
colors = ["b", "m", "g", "y", "c", "k"]
for index, column in enumerate(yColumns):
ax.plot(df[xColumn], df[column], colors[index % len(colors)] + "-")
ax.legend(loc=2, prop={'size': 10})
ax.set_xlabel(xColumn)
ax.set_ylabel(yColumns[0] if len(yColumns) == 1 else "")
stream = StringIO()
fig.tight_layout()
fig.savefig(stream, format="png", dpi=300)
axis = ax.axis()
axisInformation = {"x": {"start": num2date(axis[0]).isoformat(), "end": num2date(axis[1]).isoformat()}, "y": {"start": axis[2], "end": axis[3]}}
pyplot.close(fig)
return {'image': base64.b64encode(stream.getvalue()).decode('utf-8'), 'axis': axisInformation} | {
"content_hash": "ac0a6f246081920cacde4a2e2e1b1421",
"timestamp": "",
"source": "github",
"line_count": 384,
"max_line_length": 185,
"avg_line_length": 41.348958333333336,
"alnum_prop": 0.6964353193097368,
"repo_name": "SeldonIO/seldon-ucl",
"id": "c50a7bbd473c826f061e5be7d7144d76a6da4747",
"size": "15927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dcs/view.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9738"
},
{
"name": "HTML",
"bytes": "76234"
},
{
"name": "JavaScript",
"bytes": "139079"
},
{
"name": "Nginx",
"bytes": "1144"
},
{
"name": "Python",
"bytes": "135462"
},
{
"name": "Shell",
"bytes": "1597"
}
],
"symlink_target": ""
} |
'''
Contains PidSchedstat() class
Typical contents of file /proc/<pid>/schedstat::
765507966 715563 2922
'''
from ast import literal_eval
from logging import getLogger
from os import path as ospath
from .readfile import ReadFile
LOGGER = getLogger(__name__)
class PidSchedStat(ReadFile):
'''
PidSchedStat handling
'''
FILENAME = ospath.join('proc', '%s', 'schedstat')
KEY = 'pidschedstat'
FIELDS = ('run', 'wait', 'num', )
def normalize(self):
'''
The <pid>/schedstat file is one record
'''
LOGGER.debug("Normalize")
lines = self.lines
if not lines:
return {}
vals = lines[0].split()
return dict(zip(self.FIELDS, (literal_eval(val) for val in vals)))
| {
"content_hash": "7b1b996dfd17afd0a534e8b805abe850",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 74,
"avg_line_length": 20.783783783783782,
"alnum_prop": 0.6085825747724317,
"repo_name": "eccles/lnxproc",
"id": "7a8ed9b53f9de1769f843576c2647ce6b09743ce",
"size": "769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lnxproc/pidschedstat.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2465"
},
{
"name": "Python",
"bytes": "111336"
},
{
"name": "Shell",
"bytes": "3691"
}
],
"symlink_target": ""
} |
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
from datetime import datetime
project = u'aha.buildout'
copyright = u'%s, Serge Davidov.' % datetime.now().year
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'buildoutdoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [
('index',
'buildout.tex',
u'aha.buildout Documentation',
u'', 'manual'
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| {
"content_hash": "43fe8d8cc4cfaafcc523b0b94f7dbfbf",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 79,
"avg_line_length": 31.24848484848485,
"alnum_prop": 0.7119860356865787,
"repo_name": "a25kk/aha",
"id": "2beb388cb0febe222a96e463cf6076f313d342a5",
"size": "5991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1032109"
},
{
"name": "Dockerfile",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "368331"
},
{
"name": "JavaScript",
"bytes": "1232863"
},
{
"name": "Makefile",
"bytes": "2549"
},
{
"name": "Python",
"bytes": "52409"
},
{
"name": "Shell",
"bytes": "3270"
}
],
"symlink_target": ""
} |
import random
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.core.mail import send_mail
from django.shortcuts import redirect
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.db.models import Q
from django.views.decorators.csrf import csrf_exempt
from django.template import RequestContext
from annoying.decorators import ajax_request
from annoying.decorators import render_to
from accounts.models import UserProfile
from api.models import WhiteListItem
from tags.models import Tag
from stats.models import MoralData
from common.admin import email_templates
from common.view_helpers import _template_values
from eyebrowse.log import logger
from django.db.models.aggregates import Count
@render_to('common/about.html')
def about(request):
return _template_values(request,
page_title="Eyebrowse - About",
nav_about='active')
@render_to('common/faq.html')
def faq(request):
return _template_values(request,
page_title="Eyebrowse - FAQ",
nav_faq='active')
@render_to('common/api_docs.html')
def api_docs(request):
return _template_values(request,
page_title="Eyebrowse - API Docs",
nav_api='active')
@render_to('common/tutorial.html')
def tutorial(request):
return _template_values(request)
@render_to('common/home.html')
def home(request):
if not request.user.is_authenticated():
return _template_values(request, page_title="home", navbar='nav_home')
else:
user = get_object_or_404(User, username=request.user.username)
userprof = UserProfile.objects.get(user=user)
confirmed = userprof.confirmed
if not confirmed:
return redirect('/consent')
else:
return redirect('/live_stream/')
@login_required
@render_to('common/consent.html')
def consent(request):
return _template_values(request,
page_title="consent",
navbar='nav_home')
@login_required
@render_to('common/getting_started.html')
def getting_started(request):
user_whitelist = WhiteListItem.objects.filter(
user=request.user).values_list('url', flat=True)
top_whitelists = WhiteListItem.objects.filter(~Q(url__in=user_whitelist)).values(
'url').annotate(count=Count('url')).order_by('-count')[0:5]
user_prof = UserProfile.objects.get(user=request.user)
user_follows = list(
user_prof.follows.all().values_list('user__username', flat=True))
user_follows.append(request.user.username)
top_people = UserProfile.objects.filter(~Q(user__username__in=user_follows)).annotate(
num_followed=Count('followed_by')).order_by('-num_followed')[0:5]
return _template_values(request, page_title="getting started", navbar='nav_home', top_whitelists=top_whitelists, top_people=top_people)
@render_to('common/downloads.html')
def downloads(request):
return _template_values(request,
page_title="downloads",
navbar='nav_home')
@login_required
@ajax_request
def consent_accept(request):
"""
Endpoint to submit consent
"""
accept = request.POST.get('consent', None)
if not accept:
return {'res': 'failed'}
user = get_object_or_404(User, username=request.user.username)
prof = UserProfile.objects.get(user=user)
prof.confirmed = True
prof.save()
return {'res': 'success'}
@login_required
@ajax_request
def feedback(request):
"""
Endpoint to submit feedback
"""
feedback = request.POST.get('feedback', None)
if not feedback:
return {'res': 'failed'}
feedback.replace('\n', '<br>')
user = request.user
subject = email_templates.feedback['subject']
content = email_templates.feedback['content'] % (user.username, feedback)
admin_emails = [admin[1] for admin in settings.ADMINS]
send_mail(subject, content, from_email=user.email,
recipient_list=admin_emails)
return {'res': 'success'}
@login_required
@ajax_request
def add_tag(request):
domain = request.POST.get('domain', None)
name = request.POST.get('tag', None)
if not domain or not name:
return {'res': 'failed'}
user = request.user
try:
tags = Tag.objects.filter(user=user, domain=domain)
if tags.count() > 0:
tag = tags[0]
tag.name = name
tag.is_private = True
tag.save()
else:
color_tags = Tag.objects.filter(user=user, name=name)
if color_tags.count() > 0:
color = color_tags[0].color
else:
r = lambda: random.randint(0, 255)
color = '%02X%02X%02X' % (r(), r(), r())
Tag.objects.get_or_create(
user=user, domain=domain, name=name, color=color)
except Exception, e:
logger.info(e)
return {'res': 'success'}
@login_required
@ajax_request
def color_tag(request):
name = request.POST.get('tag', None)
user = request.user
tags = Tag.objects.filter(user=user, name=name)
r = lambda: random.randint(0, 255)
color = '%02X%02X%02X' % (r(), r(), r())
for tag in tags:
tag.color = color
tag.save()
return {'res': 'success'}
@csrf_exempt
@login_required
@ajax_request
def delete_tag(request):
domain = request.POST.get('domain', None)
name = request.POST.get('tag', None)
page_url = request.POST.get('url', None)
user = request.user
if domain and name:
tags = Tag.objects.filter(user=user, domain=domain, name=name)
tags.delete()
elif name and page_url:
tags = Tag.objects.filter(user=user, common_tag__name=name, page__url=process_url(page_url))
tags.delete()
elif name:
tags = Tag.objects.filter(user=user, name=name)
tags.delete()
return {'res': 'success'}
@render_to('google3a0cf4e7f8daa91b.html')
def google_verify(request):
return {}
@login_required
@render_to('common/mft_results_827.html')
def mft_results_treatment(request):
auth = 0
loy = 0
fair = 0
care = 0
pure = 0
user = request.user
if request.POST:
a1 = int(request.POST.get("a1")[0]) # auth
b1 = int(request.POST.get("b1")[0]) # ???
c1 = int(request.POST.get("c1")[0]) # loy
d1 = int(request.POST.get("d1")[0]) # care
e1 = int(request.POST.get("e1")[0]) # fair
f1 = int(request.POST.get("f1")[0]) # pure
g1 = int(request.POST.get("g1")[0]) # loy
q1 = int(request.POST.get("q1")[0]) # loy
i1 = int(request.POST.get("i1")[0]) # auth
j1 = int(request.POST.get("j1")[0]) # care
k1 = int(request.POST.get("k1")[0]) # care
l1 = int(request.POST.get("l1")[0]) # pure
m1 = int(request.POST.get("m1")[0]) # auth
n1 = int(request.POST.get("n1")[0]) # fair
o1 = int(request.POST.get("o1")[0]) # fair
p1 = int(request.POST.get("p1")[0]) # pure
a2 = int(request.POST.get("a2")[0]) # auth
b2 = int(request.POST.get("b2")[0]) # care
c2 = int(request.POST.get("c2")[0]) # auth
d2 = int(request.POST.get("d2")[0]) # pure
e2 = int(request.POST.get("e2")[0]) # fair
f2 = int(request.POST.get("f2")[0]) # care
g2 = int(request.POST.get("g2")[0]) # auth
q2 = int(request.POST.get("q2")[0]) # loy
i2 = int(request.POST.get("i2")[0]) # pure
j2 = int(request.POST.get("j2")[0]) # pure
k2 = int(request.POST.get("k2")[0]) # fairn
l2 = int(request.POST.get("l2")[0]) # loy
m2 = int(request.POST.get("m2")[0]) # fair
n2 = int(request.POST.get("n2")[0]) # care
o2 = int(request.POST.get("o2")[0]) # pure
p2 = int(request.POST.get("p2")[0]) # loy
auth = float(a1 + i1 + m1 + a2 + c2 + g2) / 6.0
loy = float(c1 + g1 + q1 + q2 + l2 + p2) / 6.0
care = float(d1 + j1 + k1 + b2 + f2 + n2) / 6.0
fair = float(e1 + n1 + o1 + e2 + k2 + m2) / 6.0
pure = float(f1 + l1 + p1 + d2 + i2 + j2) / 6.0
m = MoralData(authority=auth, loyalty=loy, care=care, fairness=fair, purity=pure, user=user, is_treatment=True)
m.save()
else:
try:
m = MoralData.objects.get(user=user)
auth = m.authority
loy = m.loyalty
fair = m.fairness
pure = m.purity
care = m.care
except:
pass
return _template_values(request,
page_title="Moral Questionnaire Results", authority=auth, loyalty=loy, fairness=fair, care=care, purity=pure);
@login_required
@render_to('common/mft_results_543.html')
def mft_results_control(request):
auth = 0
loy = 0
fair = 0
care = 0
pure = 0
user = request.user
if request.POST:
a1 = int(request.POST.get("a1")[0]) # auth
b1 = int(request.POST.get("b1")[0]) # ???
c1 = int(request.POST.get("c1")[0]) # loy
d1 = int(request.POST.get("d1")[0]) # care
e1 = int(request.POST.get("e1")[0]) # fair
f1 = int(request.POST.get("f1")[0]) # pure
g1 = int(request.POST.get("g1")[0]) # loy
q1 = int(request.POST.get("q1")[0]) # loy
i1 = int(request.POST.get("i1")[0]) # auth
j1 = int(request.POST.get("j1")[0]) # care
k1 = int(request.POST.get("k1")[0]) # care
l1 = int(request.POST.get("l1")[0]) # pure
m1 = int(request.POST.get("m1")[0]) # auth
n1 = int(request.POST.get("n1")[0]) # fair
o1 = int(request.POST.get("o1")[0]) # fair
p1 = int(request.POST.get("p1")[0]) # pure
a2 = int(request.POST.get("a2")[0]) # auth
b2 = int(request.POST.get("b2")[0]) # care
c2 = int(request.POST.get("c2")[0]) # auth
d2 = int(request.POST.get("d2")[0]) # pure
e2 = int(request.POST.get("e2")[0]) # fair
f2 = int(request.POST.get("f2")[0]) # care
g2 = int(request.POST.get("g2")[0]) # auth
q2 = int(request.POST.get("q2")[0]) # loy
i2 = int(request.POST.get("i2")[0]) # pure
j2 = int(request.POST.get("j2")[0]) # pure
k2 = int(request.POST.get("k2")[0]) # fairn
l2 = int(request.POST.get("l2")[0]) # loy
m2 = int(request.POST.get("m2")[0]) # fair
n2 = int(request.POST.get("n2")[0]) # care
o2 = int(request.POST.get("o2")[0]) # pure
p2 = int(request.POST.get("p2")[0]) # loy
auth = float(a1 + i1 + m1 + a2 + c2 + g2) / 6.0
loy = float(c1 + g1 + q1 + q2 + l2 + p2) / 6.0
care = float(d1 + j1 + k1 + b2 + f2 + n2) / 6.0
fair = float(e1 + n1 + o1 + e2 + k2 + m2) / 6.0
pure = float(f1 + l1 + p1 + d2 + i2 + j2) / 6.0
m = MoralData(authority=auth, loyalty=loy, care=care, fairness=fair, purity=pure, user=user)
m.save()
return _template_values(request,
page_title="Moral Questionnaire Results");
@login_required
def mft(request, token=None):
user = request.user
part_one = {
"Whether or not someone conformed to the traditions of society.": "a1",
"Whether or not someone was good at math.": "b1",
"Whether or not someone showed a lack of loyalty.": "c1",
"Whether or not someone was cruel.": "d1",
"Whether or not some people were treated differently than others.": "e1",
"Whether or not someone did something disgusting.": "f1",
"Whether or not someone did something to betray his or her group.": "g1",
"Whether or not someone's action showed love for his or her country.": "q1",
"Whether or not someone showed a lack of respect for authority.": "i1",
"Whether or not someone cared for someone weak or vulnerable.": "j1",
"Whether or not someone suffered emotionally.": "k1",
"Whether or not an action caused chaos or disorder.": "l1",
"Whether or not someone acted in a way that God would approve of.": "m1",
"Whether or not someone was denied his or her rights.": "n1",
"Whether or not someone acted unfairly.": "o1",
"Whether or not someone violated standards of purity and decency.": "p1",
}
part_two = {
"If I were a soldier and disagreed with my commanding officer's orders, I would obey anyway because that is my duty.": "a2",
"Compassion for those who are suffering is the most crucial virtue.": "b2",
"Respect for authority is something all children need to learn.": "c2",
"Chastity is an important and valuable virtue.": "d2",
"Justice is the most important requirement for a society.": "e2",
"It is better to do good than to do bad.": "f2",
"Men and women each have different roles to play in society.": "g2",
"I am proud of my country's history.": "q2",
"People should not do things that are disgusting, even if no one is harmed.": "i2",
"I would call some acts wrong on the grounds that they are unnatural.": "j2",
"I think it's morally wrong that rich children inherit a lot of money while poor children inherit nothing.": "k2",
"It is more important to be a team player than to express oneself.": "l2",
"When the government makes laws, the number one principle should be ensuring that everyone is treated fairly.": "m2",
"One of the worst things a person could do is hurt a defenseless animal.": "n2",
"It can never be right to kill a human being.": "o2",
"People should be loyal to their family members, even when they have done something wrong.": "p2",
}
q1_array = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
for q in part_one:
rand = random.randint(0, 15)
while q1_array[rand] != 0:
rand = random.randint(0, 15)
q1_array[rand] = {'question': q, 'class': part_one[q]}
q2_array = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
for q in part_two:
rand = random.randint(0, 15)
while q2_array[rand] != 0:
rand = random.randint(0, 15)
q2_array[rand] = {'question': q, 'class': part_two[q]}
try:
md_objs = MoralData.objects.filter(user=user)
m = md_objs[len(md_objs) - 1]
if m.is_treatment:
return render(request, 'common/mft_results_827.html',
{'authority':m.authority, 'loyalty':m.loyalty, 'care':m.care, 'fairness':m.fairness, 'purity':m.purity});
else:
return render(request, 'common/mft_results_543.html');
except:
pass
return render(request, 'common/mft.html', {
'token': token,
'part_one': q1_array,
'part_two': q2_array,
})
# context_instance=RequestContext(request)
# return ren(request,
# page_title="Your Morals", token=token, part_one=q1_array, part_two=q2_array, )
# Helper function to parse urls minus query strings
def process_url(url):
for i in range(len(url)):
if url[i] == "?":
return url[:i]
return url
| {
"content_hash": "08a9a1fbd7286690aa039a3c84957317",
"timestamp": "",
"source": "github",
"line_count": 420,
"max_line_length": 139,
"avg_line_length": 36.48095238095238,
"alnum_prop": 0.5893486490014358,
"repo_name": "haystack/eyebrowse-server",
"id": "a527d545d6d0b32b7c98eb7f213894a8116e0974",
"size": "15322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eyebrowse/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13941"
},
{
"name": "HTML",
"bytes": "149107"
},
{
"name": "Java",
"bytes": "4327"
},
{
"name": "JavaScript",
"bytes": "82373"
},
{
"name": "Makefile",
"bytes": "1104"
},
{
"name": "PostScript",
"bytes": "1643"
},
{
"name": "Python",
"bytes": "1092543"
},
{
"name": "Shell",
"bytes": "758"
}
],
"symlink_target": ""
} |
"""REST API permissions"""
from rest_framework.permissions import BasePermission
from .models import Member
class GuildPermissions(BasePermission):
def has_permission(self, request, view):
return (request.user.is_superuser or
(request.user.is_staff and view.action == 'list') or
(request.user.is_authenticated and view.action == 'retrieve'))
def has_object_permission(self, request, view, obj):
return (request.user.is_superuser or request.user.is_staff or
Member.objects.filter(user=request.user, guild=obj).exists())
class StringPermissions(BasePermission):
def has_permission(self, request, view):
return (request.user.is_superuser or
(request.user.is_staff and view.action == 'destroy') or
(request.user.is_authenticated and view.action == 'create') or
view.action == 'list' or
view.action == 'retrieve')
class MessagePermissions(BasePermission):
def has_permission(self, request, view):
return (request.user.is_superuser or
(request.user.is_staff and
(view.action == 'list' or view.action == 'retrieve')))
class UserPermissions(BasePermission):
def has_permission(self, request, view):
return (request.user.is_superuser or
(request.user.is_staff and
(view.action == 'list' or view.action == 'retrieve')))
class MemberPermissions(BasePermission):
def has_permission(self, request, view):
return (request.user.is_superuser or
(request.user.is_staff and view.action == 'list') or
(request.user.is_authenticated and view.action == 'retrieve'))
def has_object_permission(self, request, view, obj):
return (request.user.is_superuser or
request.user.is_staff or
Member.objects.filter(user=request.user, guild=obj.guild).exists())
class RolePermissions(BasePermission):
def has_permission(self, request, view):
return (request.user.is_superuser or
(request.user.is_staff and view.action == 'list') or
(request.user.is_authenticated and view.action == 'retrieve'))
def has_object_permission(self, request, view, obj):
return (request.user.is_superuser or
request.user.is_staff or
Member.objects.filter(user=request.user, guild=obj.guild).exists())
class ChannelPermissions(BasePermission):
def has_permission(self, request, view):
return (request.user.is_superuser or
(request.user.is_staff and
(view.action == 'list' or view.action == 'retrieve')))
def has_object_permission(self, request, view, obj):
return request.user.is_superuser or request.user.is_staff
| {
"content_hash": "14308f773dbb11331ea2effbae6164e5",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 83,
"avg_line_length": 39.013698630136986,
"alnum_prop": 0.636938202247191,
"repo_name": "Dwarf-Community/Dwarf",
"id": "546c78587729c20876c2fd7db2ff0a23f1b1e8a8",
"size": "2848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "permissions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "102693"
}
],
"symlink_target": ""
} |
"""
Node-related objects and collections
"""
import itertools
import operator
import traceback
from datetime import datetime
from netaddr import IPAddress
from netaddr import IPNetwork
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import subqueryload_all
from nailgun import consts
from nailgun.objects.serializers.node import NodeSerializer
from nailgun.db import db
from nailgun.db.sqlalchemy import models
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun.objects import Cluster
from nailgun.objects import NailgunCollection
from nailgun.objects import NailgunObject
from nailgun.objects import Notification
from nailgun.settings import settings
class Node(NailgunObject):
"""Node object
"""
#: SQLAlchemy model for Node
model = models.Node
#: Serializer for Node
serializer = NodeSerializer
#: Node JSON schema
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Node",
"description": "Serialized Node object",
"type": "object",
"properties": {
"id": {"type": "number"},
"cluster_id": {"type": "number"},
"name": {"type": "string"},
"status": {
"type": "string",
"enum": list(consts.NODE_STATUSES)
},
"group_id": {"type": "number"},
"meta": {"type": "object"},
"mac": {"type": "string"},
"fqdn": {"type": "string"},
"manufacturer": {"type": "string"},
"platform_name": {"type": "string"},
"kernel_params": {"type": "string"},
"progress": {"type": "number"},
"os_platform": {"type": "string"},
"pending_addition": {"type": "boolean"},
"pending_deletion": {"type": "boolean"},
"error_type": {
"type": "string",
"enum": list(consts.NODE_ERRORS)
},
"error_msg": {"type": "string"},
"online": {"type": "boolean"},
"roles": {"type": "array"},
"pending_roles": {"type": "array"},
"agent_checksum": {"type": "string"}
}
}
@classmethod
def get_by_mac_or_uid(cls, mac=None, node_uid=None):
"""Get Node instance by MAC or ID.
:param mac: MAC address as string
:param node_uid: Node ID
:returns: Node instance
"""
node = None
if not mac and not node_uid:
return node
q = db().query(cls.model)
if mac:
node = q.filter_by(mac=mac.lower()).first()
else:
node = q.get(node_uid)
return node
@classmethod
def get_by_meta(cls, meta):
"""Search for instance using mac, node id or interfaces
:param meta: dict with nodes metadata
:returns: Node instance
"""
node = cls.get_by_mac_or_uid(
mac=meta.get('mac'), node_uid=meta.get('id'))
if not node:
can_search_by_ifaces = all([
meta.get('meta'), meta['meta'].get('interfaces')])
if can_search_by_ifaces:
node = cls.search_by_interfaces(meta['meta']['interfaces'])
return node
@classmethod
def search_by_interfaces(cls, interfaces):
"""Search for instance using MACs on interfaces
:param interfaces: dict of Node interfaces
:returns: Node instance
"""
return db().query(cls.model).join(
models.NodeNICInterface,
cls.model.nic_interfaces
).filter(
models.NodeNICInterface.mac.in_(
[n["mac"].lower() for n in interfaces]
)
).first()
@classmethod
def should_have_public(cls, instance):
"""Determine whether this node has Public network.
:param instance: Node DB instance
:returns: True when node has Public network
"""
if Cluster.should_assign_public_to_all_nodes(instance.cluster):
return True
roles = itertools.chain(instance.roles, instance.pending_roles)
for role in roles:
roles_metadata = instance.cluster.release.roles_metadata
if roles_metadata.get(role, {}).get('public_ip_required'):
return True
return False
@classmethod
def create(cls, data):
"""Create Node instance with specified parameters in DB.
This includes:
* generating its name by MAC (if name is not specified in data)
* adding node to Cluster (if cluster_id is not None in data) \
(see :func:`add_into_cluster`) with specified roles \
(see :func:`update_roles` and :func:`update_pending_roles`)
* creating interfaces for Node in DB (see :func:`update_interfaces`)
* creating default Node attributes (see :func:`create_attributes`)
* creating default volumes allocation for Node \
(see :func:`update_volumes`)
* creating Notification about newly discovered Node \
(see :func:`create_discover_notification`)
:param data: dictionary of key-value pairs as object fields
:returns: Node instance
"""
if "name" not in data:
data["name"] = "Untitled ({0})".format(
data['mac'][-5:].lower()
)
data["timestamp"] = datetime.now()
data.pop("id", None)
#TODO(enchantner): fix this temporary hack in clients
if "cluster_id" not in data and "cluster" in data:
cluster_id = data.pop("cluster", None)
data["cluster_id"] = cluster_id
roles = data.pop("roles", None)
pending_roles = data.pop("pending_roles", None)
primary_roles = data.pop("primary_roles", None)
new_node_meta = data.pop("meta", {})
new_node_cluster_id = data.pop("cluster_id", None)
new_node = super(Node, cls).create(data)
new_node.create_meta(new_node_meta)
db().flush()
# Add interfaces for node from 'meta'.
if new_node.meta and new_node.meta.get('interfaces'):
cls.update_interfaces(new_node)
# adding node into cluster
if new_node_cluster_id:
cls.add_into_cluster(new_node, new_node_cluster_id)
# updating roles
if roles is not None:
cls.update_roles(new_node, roles)
if pending_roles is not None:
cls.update_pending_roles(new_node, pending_roles)
if primary_roles is not None:
cls.update_primary_roles(new_node, primary_roles)
# creating attributes
cls.create_attributes(new_node)
cls.update_volumes(new_node)
cls.create_discover_notification(new_node)
return new_node
@classmethod
def assign_group(cls, instance):
if instance.group_id is None and instance.ip:
admin_ngs = db().query(models.NetworkGroup).filter_by(
name="fuelweb_admin")
ip = IPAddress(instance.ip)
for ng in admin_ngs:
if ip in IPNetwork(ng.cidr):
instance.group_id = ng.group_id
break
if not instance.group_id:
instance.group_id = Cluster.get_default_group(instance.cluster).id
db().add(instance)
db().flush()
@classmethod
def create_attributes(cls, instance):
"""Create attributes for Node instance
:param instance: Node instance
:returns: NodeAttributes instance
"""
new_attributes = models.NodeAttributes()
instance.attributes = new_attributes
db().add(new_attributes)
db().add(instance)
db().flush()
return new_attributes
@classmethod
def hardware_info_locked(cls, instance):
"""Returns true if update of hardware information is not allowed.
It is not allowed during provision/deployment, after
successful provision/deployment and during node removal.
"""
return instance.status not in (
consts.NODE_STATUSES.discover,
consts.NODE_STATUSES.error,
)
@classmethod
def update_interfaces(cls, instance, update_by_agent=False):
"""Update interfaces for Node instance using Cluster
network manager (see :func:`get_network_manager`)
:param instance: Node instance
:returns: None
"""
try:
network_manager = Cluster.get_network_manager(instance.cluster)
network_manager.update_interfaces_info(instance, update_by_agent)
db().refresh(instance)
except errors.InvalidInterfacesInfo as exc:
logger.warning(
"Failed to update interfaces for node '%s' - invalid info "
"in meta: %s", instance.human_readable_name, exc.message
)
logger.warning(traceback.format_exc())
@classmethod
def update_volumes(cls, instance):
"""Update volumes for Node instance.
Adds pending "disks" changes for Cluster which Node belongs to
:param instance: Node instance
:returns: None
"""
attrs = instance.attributes
if not attrs:
attrs = cls.create_attributes(instance)
try:
# TODO(eli): update volumes method should be moved
# into an extension
# Should be done as a part of blueprint:
# https://blueprints.launchpad.net/fuel/+spec
# /volume-manager-refactoring
from nailgun.extensions.volume_manager.extension \
import VolumeManagerExtension
VolumeManagerExtension.set_volumes(
instance,
instance.volume_manager.gen_volumes_info())
except Exception as exc:
msg = (
u"Failed to generate volumes "
u"info for node '{0}': '{1}'"
).format(
instance.name or instance.mac or instance.id,
str(exc) or "see logs for details"
)
logger.warning(traceback.format_exc())
Notification.create({
"topic": "error",
"message": msg,
"node_id": instance.id
})
if instance.cluster_id:
Cluster.add_pending_changes(
instance.cluster,
"disks",
node_id=instance.id
)
db().add(attrs)
db().flush()
@classmethod
def create_discover_notification(cls, instance):
"""Create notification about discovering new Node
:param instance: Node instance
:returns: None
"""
try:
# we use multiplier of 1024 because there are no problems here
# with unfair size calculation
ram = str(round(float(
instance.meta['memory']['total']) / 1073741824, 1)) + " GB RAM"
except Exception:
logger.warning(traceback.format_exc())
ram = "unknown RAM"
try:
# we use multiplier of 1000 because disk vendors specify HDD size
# in terms of decimal capacity. Sources:
# http://knowledge.seagate.com/articles/en_US/FAQ/172191en
# http://physics.nist.gov/cuu/Units/binary.html
hd_size = round(
float(
sum(
[d["size"] for d in instance.meta["disks"]]
) / 1000000000
),
1
)
# if HDD > 100 GB we show it's size in TB
if hd_size > 100:
hd_size = str(hd_size / 1000) + " TB HDD"
else:
hd_size = str(hd_size) + " GB HDD"
except Exception:
logger.warning(traceback.format_exc())
hd_size = "unknown HDD"
cores = str(instance.meta.get('cpu', {}).get('total', "unknown"))
Notification.create({
"topic": "discover",
"message": u"New node is discovered: "
u"{0} CPUs / {1} / {2} ".format(cores, ram, hd_size),
"node_id": instance.id
})
@classmethod
def update(cls, instance, data):
"""Update Node instance with specified parameters in DB.
This includes:
* adding node to Cluster (if cluster_id is not None in data) \
(see :func:`add_into_cluster`)
* updating roles for Node if it belongs to Cluster \
(see :func:`update_roles` and :func:`update_pending_roles`)
* removing node from Cluster (if cluster_id is None in data) \
(see :func:`remove_from_cluster`)
* updating interfaces for Node in DB (see :func:`update_interfaces`)
* creating default Node attributes (see :func:`create_attributes`)
* updating volumes allocation for Node using Cluster's Release \
metadata (see :func:`update_volumes`)
:param data: dictionary of key-value pairs as object fields
:returns: Node instance
"""
data.pop("id", None)
data.pop("network_data", None)
roles = data.pop("roles", None)
pending_roles = data.pop("pending_roles", None)
new_meta = data.pop("meta", None)
update_by_agent = data.pop("is_agent", False)
disks_changed = None
if new_meta and "disks" in new_meta and "disks" in instance.meta:
key = operator.itemgetter("name")
new_disks = sorted(new_meta["disks"], key=key)
old_disks = sorted(instance.meta["disks"], key=key)
disks_changed = (new_disks != old_disks)
# TODO(enchantner): fix this temporary hack in clients
if "cluster_id" not in data and "cluster" in data:
cluster_id = data.pop("cluster", None)
data["cluster_id"] = cluster_id
if new_meta:
instance.update_meta(new_meta)
# The call to update_interfaces will execute a select query for
# the current instance. This appears to overwrite the object in the
# current session and we lose the meta changes.
db().flush()
if cls.hardware_info_locked(instance):
logger.info("Interfaces are locked for update on node %s",
instance.human_readable_name)
else:
cls.update_interfaces(instance, update_by_agent)
cluster_changed = False
if "cluster_id" in data:
new_cluster_id = data.pop("cluster_id")
if instance.cluster_id:
if new_cluster_id is None:
# removing node from cluster
cluster_changed = True
cls.remove_from_cluster(instance)
elif new_cluster_id != instance.cluster_id:
# changing node cluster to another
# (is currently not allowed)
raise errors.CannotUpdate(
u"Changing cluster on the fly is not allowed"
)
else:
if new_cluster_id is not None:
# assigning node to cluster
cluster_changed = True
cls.add_into_cluster(instance, new_cluster_id)
if "group_id" in data:
new_group_id = data.pop("group_id")
if instance.group_id != new_group_id:
nm = Cluster.get_network_manager(instance.cluster)
nm.clear_assigned_networks(instance)
nm.clear_bond_configuration(instance)
instance.group_id = new_group_id
cls.add_into_cluster(instance, instance.cluster_id)
# calculating flags
roles_changed = (
roles is not None and set(roles) != set(instance.roles)
)
pending_roles_changed = (
pending_roles is not None and
set(pending_roles) != set(instance.pending_roles)
)
super(Node, cls).update(instance, data)
if roles_changed:
cls.update_roles(instance, roles)
if pending_roles_changed:
cls.update_pending_roles(instance, pending_roles)
if any((
roles_changed,
pending_roles_changed,
cluster_changed,
disks_changed,
)) and instance.status not in (
consts.NODE_STATUSES.provisioning,
consts.NODE_STATUSES.deploying
):
cls.update_volumes(instance)
return instance
@classmethod
def reset_to_discover(cls, instance):
"""Flush database objects which is not consistent with actual node
configuration in the event of resetting node to discover state
:param instance: Node database object
:returns: None
"""
node_data = {
"online": False,
"status": consts.NODE_STATUSES.discover,
"pending_addition": True,
"pending_deletion": False,
}
cls.update_volumes(instance)
cls.update(instance, node_data)
cls.move_roles_to_pending_roles(instance)
# when node reseted to discover:
# - cobbler system is deleted
# - mac to ip mapping from dnsmasq.conf is deleted
# imho we need to revert node to original state, as it was
# added to cluster (without any additonal state in database)
netmanager = Cluster.get_network_manager()
netmanager.clear_assigned_ips(instance)
db().flush()
@classmethod
def update_by_agent(cls, instance, data):
"""Update Node instance with some specific cases for agent.
* don't update provisioning or error state back to discover
* don't update volume information if disks arrays is empty
:param data: dictionary of key-value pairs as object fields
:returns: Node instance
"""
# don't update provisioning and error back to discover
if instance.status in ('provisioning', 'error'):
if data.get('status', 'discover') == 'discover':
logger.debug(
u"Node {0} has provisioning or error status - "
u"status not updated by agent".format(
instance.human_readable_name
)
)
data['status'] = instance.status
meta = data.get('meta', {})
# don't update volume information, if agent has sent an empty array
if len(meta.get('disks', [])) == 0 and instance.meta.get('disks'):
logger.warning(
u'Node {0} has received an empty disks array - '
u'volume information will not be updated'.format(
instance.human_readable_name
)
)
meta['disks'] = instance.meta['disks']
# don't update volume information, if it is locked by node status
if 'disks' in meta and cls.hardware_info_locked(instance):
logger.info("Volume information is locked for update on node %s",
instance.human_readable_name)
meta['disks'] = instance.meta['disks']
#(dshulyak) change this verification to NODE_STATUSES.deploying
# after we will reuse ips from dhcp range
netmanager = Cluster.get_network_manager()
admin_ng = netmanager.get_admin_network_group(instance.id)
if data.get('ip') and not netmanager.is_same_network(data['ip'],
admin_ng.cidr):
logger.debug(
'Corrupted network data %s, skipping update',
instance.id)
return instance
return cls.update(instance, data)
@classmethod
def update_roles(cls, instance, new_roles):
"""Update roles for Node instance.
Logs an error if node doesn't belong to Cluster
:param instance: Node instance
:param new_roles: list of new role names
:returns: None
"""
if not instance.cluster_id:
logger.warning(
u"Attempting to assign roles to node "
u"'{0}' which isn't added to cluster".format(
instance.full_name))
return
logger.debug(
u"Updating roles for node {0}: {1}".format(
instance.full_name,
new_roles))
instance.roles = new_roles
db().flush()
@classmethod
def update_pending_roles(cls, instance, new_pending_roles):
"""Update pending_roles for Node instance.
Logs an error if node doesn't belong to Cluster
:param instance: Node instance
:param new_pending_roles: list of new pending role names
:returns: None
"""
if not instance.cluster_id:
logger.warning(
u"Attempting to assign pending roles to node "
u"'{0}' which isn't added to cluster".format(
instance.full_name))
return
logger.debug(
u"Updating pending roles for node {0}: {1}".format(
instance.full_name,
new_pending_roles))
if new_pending_roles == []:
#TODO(enchantner): research why the hell we need this
Cluster.clear_pending_changes(
instance.cluster,
node_id=instance.id
)
instance.pending_roles = new_pending_roles
db().flush()
@classmethod
def update_primary_roles(cls, instance, new_primary_roles):
"""Update primary_roles for Node instance.
Logs an error if node doesn't belong to Cluster
:param instance: Node instance
:param new_primary_roles: list of new pending role names
:returns: None
"""
if not instance.cluster_id:
logger.warning(
u"Attempting to assign pending roles to node "
u"'{0}' which isn't added to cluster".format(
instance.full_name))
return
assigned_roles = set(instance.roles + instance.pending_roles)
for role in new_primary_roles:
if role not in assigned_roles:
logger.warning(
u"Could not mark node {0} as primary for {1} role, "
u"because there's no assigned {1} role.".format(
instance.full_name, role)
)
return
logger.debug(
u"Updating primary roles for node {0}: {1}".format(
instance.full_name,
new_primary_roles))
instance.primary_roles = new_primary_roles
db().flush()
@classmethod
def add_into_cluster(cls, instance, cluster_id):
"""Adds Node to Cluster by its ID.
Also assigns networks by default for Node.
:param instance: Node instance
:param cluster_id: Cluster ID
:returns: None
"""
instance.cluster_id = cluster_id
db().flush()
cls.assign_group(instance)
network_manager = Cluster.get_network_manager(instance.cluster)
network_manager.assign_networks_by_default(instance)
cls.add_pending_change(instance, consts.CLUSTER_CHANGES.interfaces)
@classmethod
def add_pending_change(cls, instance, change):
"""Add pending change into Cluster.
:param instance: Node instance
:param change: string value of cluster change
:returns: None
"""
if instance.cluster:
Cluster.add_pending_changes(
instance.cluster, change, node_id=instance.id
)
@classmethod
def get_admin_physical_iface(cls, instance):
"""Returns node's physical iface.
In case if we have bonded admin iface, first
of the bonded ifaces will be returned
:param instance: Node instance
:returns: interface instance
"""
admin_iface = Cluster.get_network_manager(instance.cluster) \
.get_admin_interface(instance)
if admin_iface.type != consts.NETWORK_INTERFACE_TYPES.bond:
return admin_iface
iface = filter(lambda i: i.mac == instance.mac, admin_iface.slaves)
return iface[0] if iface else admin_iface.slaves[-1]
@classmethod
def remove_from_cluster(cls, instance):
"""Remove Node from Cluster.
Also drops networks assignment for Node and clears both
roles and pending roles
:param instance: Node instance
:returns: None
"""
if instance.cluster:
Cluster.clear_pending_changes(
instance.cluster,
node_id=instance.id
)
netmanager = Cluster.get_network_manager(
instance.cluster
)
netmanager.clear_assigned_networks(instance)
netmanager.clear_bond_configuration(instance)
cls.update_roles(instance, [])
cls.update_pending_roles(instance, [])
cls.remove_replaced_params(instance)
instance.cluster_id = None
instance.group_id = None
instance.kernel_params = None
instance.primary_roles = []
instance.reset_name_to_default()
db().flush()
db().refresh(instance)
@classmethod
def move_roles_to_pending_roles(cls, instance):
"""Move roles to pending_roles
"""
instance.pending_roles = instance.pending_roles + instance.roles
instance.roles = []
instance.primary_roles = []
db().flush()
@classmethod
def make_slave_name(cls, instance):
return u"node-{node_id}".format(node_id=instance.id)
@classmethod
def make_slave_fqdn(cls, instance):
return u"{instance_name}.{dns_domain}" \
.format(instance_name=cls.make_slave_name(instance),
dns_domain=settings.DNS_DOMAIN)
@classmethod
def get_kernel_params(cls, instance):
"""Return cluster kernel_params if they wasnot replaced by
custom params.
"""
return (instance.kernel_params or
Cluster.get_default_kernel_params(instance.cluster))
@classmethod
def remove_replaced_params(cls, instance):
instance.replaced_deployment_info = []
instance.replaced_provisioning_info = {}
@classmethod
def all_roles(cls, instance):
roles = set(instance.roles + instance.pending_roles)
roles -= set(instance.primary_roles)
primary_roles = set([
'primary-{0}'.format(role) for role in instance.primary_roles])
return sorted(roles | primary_roles)
class NodeCollection(NailgunCollection):
"""Node collection
"""
#: Single Node object class
single = Node
@classmethod
def eager_nodes_handlers(cls, iterable):
"""Eager load objects instances that is used in nodes handler.
:param iterable: iterable (SQLAlchemy query)
:returns: iterable (SQLAlchemy query)
"""
options = (
joinedload('cluster'),
subqueryload_all('nic_interfaces.assigned_networks_list'),
subqueryload_all('bond_interfaces.assigned_networks_list'),
subqueryload_all('ip_addrs.network_data')
)
return cls.eager_base(iterable, options)
@classmethod
def update_slave_nodes_fqdn(cls, instances):
for n in instances:
n.fqdn = cls.single.make_slave_fqdn(n)
db().flush()
@classmethod
def prepare_for_lt_6_1_deployment(cls, instances):
"""Prepare environment for deployment,
assign management, public, storage ips
"""
cls.update_slave_nodes_fqdn(instances)
# TODO(enchantner): check network manager instance for each node
netmanager = Cluster.get_network_manager()
if instances:
netmanager.assign_ips(instances, 'management')
netmanager.assign_ips(instances, 'public')
netmanager.assign_ips(instances, 'storage')
netmanager.assign_admin_ips(instances)
@classmethod
def prepare_for_deployment(cls, instances, nst=None):
"""Prepare environment for deployment,
assign management, public, storage, private ips
"""
cls.update_slave_nodes_fqdn(instances)
# TODO(enchantner): check network manager instance for each node
netmanager = Cluster.get_network_manager()
if instances:
netmanager.assign_ips(instances, 'management')
netmanager.assign_ips(instances, 'public')
netmanager.assign_ips(instances, 'storage')
if nst == consts.NEUTRON_SEGMENT_TYPES.gre:
netmanager.assign_ips(instances, 'private')
netmanager.assign_admin_ips(instances)
@classmethod
def prepare_for_provisioning(cls, instances):
"""Prepare environment for provisioning,
update fqdns, assign admin IPs
"""
cls.update_slave_nodes_fqdn(instances)
netmanager = Cluster.get_network_manager()
netmanager.assign_admin_ips(instances)
@classmethod
def lock_nodes(cls, instances):
"""Locking nodes instances, fetched before, but required to be locked
:param instances: list of nodes
:return: list of locked nodes
"""
instances_ids = [instance.id for instance in instances]
q = cls.filter_by_list(None, 'id', instances_ids, order_by='id')
return cls.lock_for_update(q).all()
@classmethod
def get_by_group_id(cls, group_id):
return cls.filter_by(None, group_id=group_id)
| {
"content_hash": "5991911f528d5a07ce30aab06dc37524",
"timestamp": "",
"source": "github",
"line_count": 854,
"max_line_length": 79,
"avg_line_length": 35.058548009367684,
"alnum_prop": 0.5729458917835671,
"repo_name": "nebril/fuel-web",
"id": "fe13a0cd1fb743f6e2d37fe3f81b5a121e6435a1",
"size": "30575",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nailgun/nailgun/objects/node.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "61043"
},
{
"name": "HTML",
"bytes": "7268"
},
{
"name": "JavaScript",
"bytes": "724039"
},
{
"name": "Mako",
"bytes": "1449"
},
{
"name": "Puppet",
"bytes": "282"
},
{
"name": "Python",
"bytes": "3974663"
},
{
"name": "Ruby",
"bytes": "33991"
},
{
"name": "Shell",
"bytes": "28796"
}
],
"symlink_target": ""
} |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import re
from codecs import open
version = ""
with open("koordinates/__init__.py", "r") as fd:
version = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE
).group(1)
with open("README.md", "r", "utf-8") as f:
readme = f.read()
setup(
name="koordinates",
packages=[
"koordinates",
],
version=version,
description="A Python client library for a number of Koordinates web APIs",
long_description=readme,
long_description_content_type="text/markdown",
author="Koordinates Limited",
author_email="support@koordinates.com",
url="https://github.com/koordinates/python-client",
keywords="koordinates api",
license="BSD",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering :: GIS",
],
python_requires=">=3.5",
install_requires=[
"python-dateutil>=2,<3",
"pytz",
"requests>=2.5,<3",
"requests-toolbelt>=0.4,<1",
],
tests_require=[
"pytest>=3.3",
"responses>=0.3",
"coverage>=3.7,<4",
],
zip_safe=False,
)
| {
"content_hash": "9692ac9f08076425124b2f52db8b8e44",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 79,
"avg_line_length": 29.310344827586206,
"alnum_prop": 0.5841176470588235,
"repo_name": "koordinates/python-client",
"id": "167b9d54f191aacac3b039e57d98b6492388b782",
"size": "1747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1400"
},
{
"name": "Python",
"bytes": "332997"
},
{
"name": "Shell",
"bytes": "150"
}
],
"symlink_target": ""
} |
from imaplib import IMAP4
import time
import re
import json
import daemon
# change these variable values
DEBUG = False
HOSTNAME = 'mail.server.dom'
USERNAME = 'account@server.dom'
PASSWORD = 'passsword'
MAILBOX = 'Inbox'
DATAFILE='/tmp/led.db'
NEWMAIL_OFFSET = 1 # my unread messages never goes to zero, yours might
MAIL_CHECK_FREQ = 30 # check mail every 60 seconds
#do not change bellow this lines
def checkmail():
try:
server = IMAP4(HOSTNAME)
server.login(USERNAME, PASSWORD)
except:
print 'connection failed!'
return
if DEBUG:
print('Logging in as ' + USERNAME)
select_info = server.select(MAILBOX)
total_email=re.findall(r'\d+(?:[.,]\d+)?', select_info[1][0])
print('%d messages in %s' % (int(total_email[0]), MAILBOX))
folder_status = server.status(MAILBOX, '(UNSEEN)')
total_unseen = re.findall(r'\d+(?:[.,]\d+)?', folder_status[1][0])
newmails = int(total_unseen[0])
if DEBUG:
print "You have", newmails, "new email(s)!"
if newmails >= NEWMAIL_OFFSET:
update_signal('new', 11)
else:
update_signal('normal', 11)
time.sleep(MAIL_CHECK_FREQ)
def update_signal(mesg='new', port=12):
data={'port': int(port), 'mesg': mesg}
dataserial=json.dumps(data)
try:
dbfile=file(DATAFILE, 'w')
dbfile.write(dataserial)
dbfile.close()
except:
if DEBUG:
print 'error update datafile.'
def run():
with daemon.DaemonContext():
while True:
checkmail()
if __name__ == '__main__':
run()
| {
"content_hash": "9cb7c218083d89e282a22e6f5ea7da76",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 73,
"avg_line_length": 23.375,
"alnum_prop": 0.6524064171122995,
"repo_name": "guspri/walang",
"id": "34eaa14c896da2c6d26146b70fa832eddeb5fa5a",
"size": "1519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "walang.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3060"
}
],
"symlink_target": ""
} |
from flask import current_app
def make_json(jsonstring, status_code=200, headers={}):
"""like jsonify, except accepts string, so we can do our own custom
json serialization. should move this to continuumweb later
"""
return current_app.response_class(response=jsonstring,
status=status_code,
headers=headers,
mimetype='application/json')
| {
"content_hash": "efbd2fdd2accf64f490203f318876081",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 71,
"avg_line_length": 42.81818181818182,
"alnum_prop": 0.5668789808917197,
"repo_name": "sahat/bokeh",
"id": "719439b53ae60670eb625091c71120be95c6f0ce",
"size": "471",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bokeh/server/views/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "329134"
},
{
"name": "CoffeeScript",
"bytes": "2099237"
},
{
"name": "JavaScript",
"bytes": "2683660"
},
{
"name": "Python",
"bytes": "973217"
},
{
"name": "Scala",
"bytes": "27312"
},
{
"name": "Shell",
"bytes": "12135"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os
import ujson
from django.conf import settings
from django.db import migrations, models
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
def populate_new_fields(apps, schema_editor):
# type: (StateApps, DatabaseSchemaEditor) -> None
# Open the JSON file which contains the data to be used for migration.
MIGRATION_DATA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "management", "data")
path_to_unified_reactions = os.path.join(MIGRATION_DATA_PATH, "unified_reactions.json")
unified_reactions = ujson.load(open(path_to_unified_reactions))
Reaction = apps.get_model('zerver', 'Reaction')
for reaction in Reaction.objects.all():
reaction.emoji_code = unified_reactions.get(reaction.emoji_name)
if reaction.emoji_code is None:
# If it's not present in the unified_reactions map, it's a realm emoji.
reaction.emoji_code = reaction.emoji_name
if reaction.emoji_name == 'zulip':
# `:zulip:` emoji is a zulip special custom emoji.
reaction.reaction_type = 'zulip_extra_emoji'
else:
reaction.reaction_type = 'realm_emoji'
reaction.save()
class Migration(migrations.Migration):
dependencies = [
('zerver', '0096_add_password_required'),
]
operations = [
migrations.AddField(
model_name='reaction',
name='emoji_code',
field=models.TextField(default='unset'),
preserve_default=False,
),
migrations.AddField(
model_name='reaction',
name='reaction_type',
field=models.CharField(choices=[('unicode_emoji', 'Unicode emoji'), ('realm_emoji', 'Realm emoji'), ('zulip_extra_emoji', 'Zulip extra emoji')], default='unicode_emoji', max_length=30),
),
migrations.RunPython(populate_new_fields,
reverse_code=migrations.RunPython.noop),
]
| {
"content_hash": "3e9693d75db1e3377676cf3ed2af0e7a",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 197,
"avg_line_length": 41.05882352941177,
"alnum_prop": 0.6446991404011462,
"repo_name": "vaidap/zulip",
"id": "ec8774170824300975461437b54cf7312777a402",
"size": "2167",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zerver/migrations/0097_reactions_emoji_code.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "416449"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "472724"
},
{
"name": "JavaScript",
"bytes": "2123247"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "84574"
},
{
"name": "Python",
"bytes": "3669105"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "44486"
}
],
"symlink_target": ""
} |
"""
Class for reading the configuration file
Uses the ConfigParser lib to return the values present in the config file
"""
__authors__ = "Claudio Marques, David Palma, Luis Cordeiro"
__copyright__ = "Copyright (c) 2014 OneSource Consultoria Informatica, Lda"
__license__ = "Apache 2"
__contact__ = "www.onesource.pt"
__date__ = "01/09/2014"
__version__ = "1.0"
import ConfigParser
class ReadConfFile:
config = None
def __init__(self, file="proxy.conf"):
"""
Method to read from conf file specific options
:param file:
"""
self.conf_file_name=file
self.config = ConfigParser.SafeConfigParser()
self.config.readfp(open(file))
def read_option(self, group, name):
"""
:return:
"""
value = self.config.get(group, name)
return value
| {
"content_hash": "95a235363bc4618430cfb1ac46f0e28c",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 75,
"avg_line_length": 22.263157894736842,
"alnum_prop": 0.6158392434988179,
"repo_name": "hocchudong/ZabbixCeilometer-Proxy",
"id": "acd5192bcdd89df3db1a139ba81535744d65ef74",
"size": "846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "readFile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "65720"
},
{
"name": "Shell",
"bytes": "676"
}
],
"symlink_target": ""
} |
"""
Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
This file is part of Toolium.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import mock
import pytest
from toolium.config_files import ConfigFiles
from toolium.driver_wrapper import DriverWrapper
from toolium.driver_wrappers_pool import DriverWrappersPool
from toolium.visual_test import VisualTest
@pytest.fixture
def driver_wrapper():
# Reset wrappers pool values
DriverWrappersPool._empty_pool()
# Create default wrapper
driver_wrapper = DriverWrappersPool.get_default_wrapper()
# Configure properties
config_files = ConfigFiles()
root_path = os.path.dirname(os.path.realpath(__file__))
config_files.set_config_directory(os.path.join(root_path, 'conf'))
config_files.set_output_directory(os.path.join(root_path, 'output'))
driver_wrapper.configure(config_files)
return driver_wrapper
def test_singleton(driver_wrapper):
# Request default wrapper
new_wrapper = DriverWrappersPool.get_default_wrapper()
# Modify new wrapper
new_driver_type = 'opera'
new_wrapper.config.set('Driver', 'type', new_driver_type)
# Check that both wrappers are the same object
assert new_driver_type == driver_wrapper.config.get('Driver', 'type')
assert new_driver_type == new_wrapper.config.get('Driver', 'type')
assert driver_wrapper == new_wrapper
assert DriverWrappersPool.driver_wrappers[0] == driver_wrapper
def test_multiple(driver_wrapper):
# Request a new additional wrapper
new_wrapper = DriverWrapper()
# Check that wrapper and new_wrapper are different
assert driver_wrapper != new_wrapper
assert DriverWrappersPool.driver_wrappers[0] == driver_wrapper
assert DriverWrappersPool.driver_wrappers[1] == new_wrapper
def test_connect_default_driver_wrapper(driver_wrapper):
driver_wrapper.connect = mock.MagicMock()
# Connect default driver wrapper
new_wrapper = DriverWrappersPool.connect_default_driver_wrapper()
# Check that both wrappers are the same object and connect has been called
assert new_wrapper == driver_wrapper
driver_wrapper.connect.assert_called_once_with()
def test_connect_default_driver_wrapper_already_connected(driver_wrapper):
driver_wrapper.connect = mock.MagicMock()
driver_wrapper.driver = 'fake'
# Connect default driver wrapper
new_wrapper = DriverWrappersPool.connect_default_driver_wrapper()
# Check that both wrappers are the same object and connect has not been called
assert new_wrapper == driver_wrapper
driver_wrapper.connect.assert_not_called()
close_drivers_scopes = (
'function',
'module',
'session',
)
@pytest.mark.parametrize("scope", close_drivers_scopes)
def test_close_drivers_function(scope, driver_wrapper):
DriverWrappersPool.save_all_webdriver_logs = mock.MagicMock()
VisualTest.update_latest_report = mock.MagicMock()
# Close drivers
DriverWrappersPool.close_drivers(scope, 'test_name')
# Check that save_all_webdriver_logs method has been called only in function scope
# Check that update_latest_report method has been called only in session scope
if scope == 'function':
DriverWrappersPool.save_all_webdriver_logs.assert_called_once_with('test_name', True)
VisualTest.update_latest_report.assert_not_called()
elif scope == 'module':
DriverWrappersPool.save_all_webdriver_logs.assert_not_called()
VisualTest.update_latest_report.assert_not_called()
elif scope == 'session':
DriverWrappersPool.save_all_webdriver_logs.assert_not_called()
VisualTest.update_latest_report.assert_called_once_with()
def test_find_parent_directory_relative():
directory = 'conf'
filename = 'properties.cfg'
expected_config_directory = os.path.join(os.getcwd(), 'conf')
assert expected_config_directory == DriverWrappersPool._find_parent_directory(directory, filename)
def test_find_parent_directory_file_not_found():
directory = 'conf'
filename = 'unknown'
expected_config_directory = os.path.join(os.getcwd(), 'conf')
assert expected_config_directory == DriverWrappersPool._find_parent_directory(directory, filename)
def test_find_parent_directory_absolute():
directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'conf')
filename = 'properties.cfg'
expected_config_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'conf')
assert expected_config_directory == DriverWrappersPool._find_parent_directory(directory, filename)
def test_find_parent_directory_absolute_recursively():
directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'unknown', 'conf')
filename = 'properties.cfg'
expected_config_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'conf')
assert expected_config_directory == DriverWrappersPool._find_parent_directory(directory, filename)
def test_initialize_config_files_new():
config_files = None
# Initialize config files
init_config_files = DriverWrappersPool.initialize_config_files(config_files)
# Check expected config files
assert init_config_files.config_properties_filenames is None
assert init_config_files.output_log_filename is None
def test_initialize_config_files_new_environment():
config_files = None
os.environ['TOOLIUM_CONFIG_ENVIRONMENT'] = 'android'
# Initialize config files
config_files = DriverWrappersPool.initialize_config_files(config_files)
del os.environ['TOOLIUM_CONFIG_ENVIRONMENT']
# Check expected config files
expected_properties_filenames = 'properties.cfg;android-properties.cfg;local-android-properties.cfg'
assert config_files.config_properties_filenames == expected_properties_filenames
assert config_files.output_log_filename == 'toolium_android.log'
def test_initialize_config_files_configured():
config_files = ConfigFiles()
config_files.set_config_properties_filenames('test.conf', 'local-test.conf')
config_files.set_output_log_filename('test.log')
# Initialize config files
config_files = DriverWrappersPool.initialize_config_files(config_files)
# Check expected config files
assert config_files.config_properties_filenames == 'test.conf;local-test.conf'
assert config_files.output_log_filename == 'test.log'
def test_initialize_config_files_configured_environment():
config_files = ConfigFiles()
config_files.set_config_properties_filenames('test.conf', 'local-test.conf')
config_files.set_output_log_filename('test.log')
os.environ['TOOLIUM_CONFIG_ENVIRONMENT'] = 'android'
# Initialize config files
config_files = DriverWrappersPool.initialize_config_files(config_files)
del os.environ['TOOLIUM_CONFIG_ENVIRONMENT']
# Check expected config files
expected_properties_filenames = 'test.conf;local-test.conf;android-test.conf;local-android-test.conf'
assert config_files.config_properties_filenames == expected_properties_filenames
assert config_files.output_log_filename == 'test_android.log'
def test_initialize_config_files_configured_environment_with_points():
config_files = ConfigFiles()
config_files.set_config_properties_filenames('test.new.conf', 'local-test.new.conf')
config_files.set_output_log_filename('test.new.log')
os.environ['TOOLIUM_CONFIG_ENVIRONMENT'] = 'ios'
# Initialize config files
config_files = DriverWrappersPool.initialize_config_files(config_files)
del os.environ['TOOLIUM_CONFIG_ENVIRONMENT']
# Check expected config files
expected_properties_filenames = 'test.new.conf;local-test.new.conf;ios-test.new.conf;local-ios-test.new.conf'
assert config_files.config_properties_filenames == expected_properties_filenames
assert config_files.output_log_filename == 'test.new_ios.log'
| {
"content_hash": "1814465ebf75f3b71133f816ee4b8373",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 113,
"avg_line_length": 37.711711711711715,
"alnum_prop": 0.7411610129001434,
"repo_name": "Telefonica/toolium",
"id": "4383d7283969cafad7d048e0759cce1aafba6efd",
"size": "8398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toolium/test/test_driver_wrappers_pool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1301"
},
{
"name": "HTML",
"bytes": "724"
},
{
"name": "JavaScript",
"bytes": "734"
},
{
"name": "Makefile",
"bytes": "2598"
},
{
"name": "Python",
"bytes": "560909"
}
],
"symlink_target": ""
} |
import unittest
import datetime
from voodoo.gen import CoordAddress
from weblab.core.new_server import simplify_response
class SimplifyResponseTestCase(unittest.TestCase):
def test_simplify_response_str(self):
self._check("hello","hello")
def test_simplify_response_unicode(self):
self._check(u"hello",u"hello")
def test_simplify_response_number(self):
self._check(5,5)
def test_simplify_response_simplelist(self):
simplelist = [5,6,"foo"]
self._check(simplelist, simplelist)
def test_simplify_response_simpletuple(self):
simplelist = (5,6,"foo")
self._check(simplelist, list(simplelist))
def test_simplify_response_simpledict(self):
simpledict = { "a" : "b", 5 : "foo", u"bar" : 5 }
self._check(simpledict, simpledict)
def test_simplify_response_datetime_datetime(self):
dt = datetime.datetime(2009, 7, 19, 9, 39)
expected = '2009-07-19T09:39:00'
self._check(dt, expected)
def test_simplify_response_datetime_date(self):
dt = datetime.date(2009, 7, 19)
expected = '2009-07-19'
self._check(dt, expected)
def test_simplify_response_datetime_time(self):
dt = datetime.time(9, 39)
expected = '09:39:00'
self._check(dt, expected)
def test_simplify_response_oldclass(self):
class A:
def __init__(self):
self.attr1 = "foo"
self.attr2 = "bar"
def method(self):
pass
a = A()
self._check(a, {"attr1" : "foo", "attr2" : "bar"})
def test_simplify_response_newclass(self):
class A(object):
def __init__(self):
super(A, self).__init__()
self.attr1 = "foo"
self.attr2 = "bar"
def method(self):
pass
a = A()
self._check(a, {"attr1" : "foo", "attr2" : "bar"})
def test_simplify_response_coordaddr(self):
addr = CoordAddress('mach','inst','serv')
self._check(addr, {'process': 'inst', 'component': 'serv', 'host': 'mach'})
def test_simplify_response_maxdepth(self):
class A(object):
def __init__(self):
super(A, self).__init__()
self.attr1 = "foo"
def method(self):
pass
a = A()
a.a = a
self._check(a, {
"attr1" : "foo",
"a" : {
"attr1" : "foo",
"a" : {
"attr1" : None,
"a" : None
}
}
}, limit = 3)
def _check(self, msg, expected, limit = None):
if limit is not None:
simplified = simplify_response(msg, limit = limit)
else:
simplified = simplify_response(msg)
self.assertEquals(simplified, expected)
def suite():
return unittest.makeSuite(SimplifyResponseTestCase)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "8b01c058ac8dd4f7ed82961c7ff9859c",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 83,
"avg_line_length": 28.927272727272726,
"alnum_prop": 0.5040854808296669,
"repo_name": "zstars/weblabdeusto",
"id": "f5699a3d2f71e01f4c609483a9b48aae4da01e68",
"size": "3576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/src/test/unit/weblab/comm/test_server.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP",
"bytes": "4785"
},
{
"name": "ActionScript",
"bytes": "8508"
},
{
"name": "ApacheConf",
"bytes": "122186"
},
{
"name": "Batchfile",
"bytes": "7753"
},
{
"name": "C",
"bytes": "19456"
},
{
"name": "C#",
"bytes": "315160"
},
{
"name": "C++",
"bytes": "9547"
},
{
"name": "CSS",
"bytes": "150709"
},
{
"name": "CoffeeScript",
"bytes": "30909"
},
{
"name": "Go",
"bytes": "7076"
},
{
"name": "HTML",
"bytes": "452001"
},
{
"name": "Java",
"bytes": "1234794"
},
{
"name": "JavaScript",
"bytes": "1656027"
},
{
"name": "Makefile",
"bytes": "1571"
},
{
"name": "Mako",
"bytes": "824"
},
{
"name": "PHP",
"bytes": "155137"
},
{
"name": "Python",
"bytes": "3435335"
},
{
"name": "Shell",
"bytes": "2596"
},
{
"name": "Smarty",
"bytes": "20160"
},
{
"name": "VHDL",
"bytes": "5874"
}
],
"symlink_target": ""
} |
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext_lazy as _
SORT_OPTIONS = SortedDict((
('priority', _('Priority')),
('date', _('Last Seen')),
('new', _('First Seen')),
('freq', _('Frequency')),
('tottime', _('Total Time Spent')),
('avgtime', _('Average Time Spent')),
('accel_15', _('Trending: %(minutes)d minutes' % {'minutes': 15})),
('accel_60', _('Trending: %(minutes)d minutes' % {'minutes': 60})),
))
SORT_CLAUSES = {
'priority': 'sentry_groupedmessage.score',
'date': 'EXTRACT(EPOCH FROM sentry_groupedmessage.last_seen)',
'new': 'EXTRACT(EPOCH FROM sentry_groupedmessage.first_seen)',
'freq': 'sentry_groupedmessage.times_seen',
'tottime': 'sentry_groupedmessage.time_spent_total',
'avgtime': '(sentry_groupedmessage.time_spent_total / sentry_groupedmessage.time_spent_count)',
}
SQLITE_SORT_CLAUSES = SORT_CLAUSES.copy()
SQLITE_SORT_CLAUSES.update({
'date': 'sentry_groupedmessage.last_seen',
'new': 'sentry_groupedmessage.first_seen',
})
MYSQL_SORT_CLAUSES = SORT_CLAUSES.copy()
MYSQL_SORT_CLAUSES.update({
'date': 'UNIX_TIMESTAMP(sentry_groupedmessage.last_seen)',
'new': 'UNIX_TIMESTAMP(sentry_groupedmessage.first_seen)',
})
SEARCH_SORT_OPTIONS = SortedDict((
('score', _('Score')),
('date', _('Last Seen')),
('new', _('First Seen')),
))
| {
"content_hash": "09dc8031f355763e0c7809a18a858191",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 99,
"avg_line_length": 38.44444444444444,
"alnum_prop": 0.6495664739884393,
"repo_name": "alex/sentry",
"id": "a4ccc382674a16a1076fea9461cf7e3cbf09a5a0",
"size": "1384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sentry/constants.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "224751"
},
{
"name": "JavaScript",
"bytes": "55298"
},
{
"name": "Python",
"bytes": "1311737"
},
{
"name": "Shell",
"bytes": "4106"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from distutils.version import LooseVersion
import warnings
import numpy as np
import pandas as pd
def _flatten_multi_geoms(geoms, colors=None):
"""
Returns Series like geoms and colors, except that any Multi geometries
are split into their components and colors are repeated for all component
in the same Multi geometry. Maintains 1:1 matching of geometry to color.
Passing `color` is optional, and when no `color` is passed a list of None
values is returned as `component_colors`.
"Colors" are treated opaquely and so can actually contain any values.
Returns
-------
components : list of geometry
component_colors : list of whatever type `colors` contains
"""
if colors is None:
colors = [None] * len(geoms)
components, component_colors = [], []
if not geoms.geom_type.str.startswith('Multi').any():
return geoms, colors
# precondition, so zip can't short-circuit
assert len(geoms) == len(colors)
for geom, color in zip(geoms, colors):
if geom.type.startswith('Multi'):
for poly in geom:
components.append(poly)
# repeat same color for all components
component_colors.append(color)
else:
components.append(geom)
component_colors.append(color)
return components, component_colors
def plot_polygon_collection(ax, geoms, values=None, color=None,
cmap=None, vmin=None, vmax=None, **kwargs):
"""
Plots a collection of Polygon and MultiPolygon geometries to `ax`
Parameters
----------
ax : matplotlib.axes.Axes
where shapes will be plotted
geoms : a sequence of `N` Polygons and/or MultiPolygons (can be mixed)
values : a sequence of `N` values, optional
Values will be mapped to colors using vmin/vmax/cmap. They should
have 1:1 correspondence with the geometries (not their components).
Otherwise follows `color` / `facecolor` kwargs.
edgecolor : single color or sequence of `N` colors
Color for the edge of the polygons
facecolor : single color or sequence of `N` colors
Color to fill the polygons. Cannot be used together with `values`.
color : single color or sequence of `N` colors
Sets both `edgecolor` and `facecolor`
**kwargs
Additional keyword arguments passed to the collection
Returns
-------
collection : matplotlib.collections.Collection that was plotted
"""
try:
from descartes.patch import PolygonPatch
except ImportError:
raise ImportError("The descartes package is required"
" for plotting polygons in geopandas.")
from matplotlib.collections import PatchCollection
geoms, values = _flatten_multi_geoms(geoms, values)
if None in values:
values = None
# PatchCollection does not accept some kwargs.
if 'markersize' in kwargs:
del kwargs['markersize']
# color=None overwrites specified facecolor/edgecolor with default color
if color is not None:
kwargs['color'] = color
collection = PatchCollection([PolygonPatch(poly) for poly in geoms],
**kwargs)
if values is not None:
collection.set_array(np.asarray(values))
collection.set_cmap(cmap)
collection.set_clim(vmin, vmax)
ax.add_collection(collection, autolim=True)
ax.autoscale_view()
return collection
def plot_linestring_collection(ax, geoms, values=None, color=None,
cmap=None, vmin=None, vmax=None, **kwargs):
"""
Plots a collection of LineString and MultiLineString geometries to `ax`
Parameters
----------
ax : matplotlib.axes.Axes
where shapes will be plotted
geoms : a sequence of `N` LineStrings and/or MultiLineStrings (can be
mixed)
values : a sequence of `N` values, optional
Values will be mapped to colors using vmin/vmax/cmap. They should
have 1:1 correspondence with the geometries (not their components).
color : single color or sequence of `N` colors
Cannot be used together with `values`.
Returns
-------
collection : matplotlib.collections.Collection that was plotted
"""
from matplotlib.collections import LineCollection
geoms, values = _flatten_multi_geoms(geoms, values)
if None in values:
values = None
# LineCollection does not accept some kwargs.
if 'markersize' in kwargs:
del kwargs['markersize']
# color=None gives black instead of default color cycle
if color is not None:
kwargs['color'] = color
segments = [np.array(linestring)[:, :2] for linestring in geoms]
collection = LineCollection(segments, **kwargs)
if values is not None:
collection.set_array(np.asarray(values))
collection.set_cmap(cmap)
collection.set_clim(vmin, vmax)
ax.add_collection(collection, autolim=True)
ax.autoscale_view()
return collection
def plot_point_collection(ax, geoms, values=None, color=None,
cmap=None, vmin=None, vmax=None,
marker='o', markersize=None, **kwargs):
"""
Plots a collection of Point and MultiPoint geometries to `ax`
Parameters
----------
ax : matplotlib.axes.Axes
where shapes will be plotted
geoms : sequence of `N` Points or MultiPoints
values : a sequence of `N` values, optional
Values mapped to colors using vmin, vmax, and cmap.
Cannot be specified together with `color`.
markersize : scalar or array-like, optional
Size of the markers. Note that under the hood ``scatter`` is
used, so the specified value will be proportional to the
area of the marker (size in points^2).
Returns
-------
collection : matplotlib.collections.Collection that was plotted
"""
if values is not None and color is not None:
raise ValueError("Can only specify one of 'values' and 'color' kwargs")
geoms, values = _flatten_multi_geoms(geoms, values)
if None in values:
values = None
x = [p.x for p in geoms]
y = [p.y for p in geoms]
# matplotlib 1.4 does not support c=None, and < 2.0 does not support s=None
if values is not None:
kwargs['c'] = values
if markersize is not None:
kwargs['s'] = markersize
collection = ax.scatter(x, y, color=color, vmin=vmin, vmax=vmax, cmap=cmap,
marker=marker, **kwargs)
return collection
def plot_series(s, cmap=None, color=None, ax=None, figsize=None, **style_kwds):
"""
Plot a GeoSeries.
Generate a plot of a GeoSeries geometry with matplotlib.
Parameters
----------
s : Series
The GeoSeries to be plotted. Currently Polygon,
MultiPolygon, LineString, MultiLineString and Point
geometries can be plotted.
cmap : str (default None)
The name of a colormap recognized by matplotlib. Any
colormap will work, but categorical colormaps are
generally recommended. Examples of useful discrete
colormaps include:
tab10, tab20, Accent, Dark2, Paired, Pastel1, Set1, Set2
color : str (default None)
If specified, all objects will be colored uniformly.
ax : matplotlib.pyplot.Artist (default None)
axes on which to draw the plot
figsize : pair of floats (default None)
Size of the resulting matplotlib.figure.Figure. If the argument
ax is given explicitly, figsize is ignored.
**style_kwds : dict
Color options to be passed on to the actual plot function, such
as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,
``alpha``.
Returns
-------
ax : matplotlib axes instance
"""
if 'colormap' in style_kwds:
warnings.warn("'colormap' is deprecated, please use 'cmap' instead "
"(for consistency with matplotlib)", FutureWarning)
cmap = style_kwds.pop('colormap')
if 'axes' in style_kwds:
warnings.warn("'axes' is deprecated, please use 'ax' instead "
"(for consistency with pandas)", FutureWarning)
ax = style_kwds.pop('axes')
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
ax.set_aspect('equal')
if s.empty:
warnings.warn("The GeoSeries you are attempting to plot is "
"empty. Nothing has been displayed.", UserWarning)
return ax
# if cmap is specified, create range of colors based on cmap
values = None
if cmap is not None:
values = np.arange(len(s))
if hasattr(cmap, 'N'):
values = values % cmap.N
style_kwds['vmin'] = style_kwds.get('vmin', values.min())
style_kwds['vmax'] = style_kwds.get('vmax', values.max())
geom_types = s.geometry.type
poly_idx = np.asarray((geom_types == 'Polygon')
| (geom_types == 'MultiPolygon'))
line_idx = np.asarray((geom_types == 'LineString')
| (geom_types == 'MultiLineString'))
point_idx = np.asarray((geom_types == 'Point')
| (geom_types == 'MultiPoint'))
# plot all Polygons and all MultiPolygon components in the same collection
polys = s.geometry[poly_idx]
if not polys.empty:
# color overrides both face and edgecolor. As we want people to be
# able to use edgecolor as well, pass color to facecolor
facecolor = style_kwds.pop('facecolor', None)
if color is not None:
facecolor = color
values_ = values[poly_idx] if cmap else None
plot_polygon_collection(ax, polys, values_, facecolor=facecolor,
cmap=cmap, **style_kwds)
# plot all LineStrings and MultiLineString components in same collection
lines = s.geometry[line_idx]
if not lines.empty:
values_ = values[line_idx] if cmap else None
plot_linestring_collection(ax, lines, values_, color=color, cmap=cmap,
**style_kwds)
# plot all Points in the same collection
points = s.geometry[point_idx]
if not points.empty:
values_ = values[point_idx] if cmap else None
plot_point_collection(ax, points, values_, color=color, cmap=cmap,
**style_kwds)
plt.draw()
return ax
def plot_dataframe(df, column=None, cmap=None, color=None, ax=None,
categorical=False, legend=False, scheme=None, k=5,
vmin=None, vmax=None, markersize=None, figsize=None,
legend_kwds=None, **style_kwds):
"""
Plot a GeoDataFrame.
Generate a plot of a GeoDataFrame with matplotlib. If a
column is specified, the plot coloring will be based on values
in that column.
Parameters
----------
df : GeoDataFrame
The GeoDataFrame to be plotted. Currently Polygon,
MultiPolygon, LineString, MultiLineString and Point
geometries can be plotted.
column : str, np.array, pd.Series (default None)
The name of the dataframe column, np.array, or pd.Series to be plotted.
If np.array or pd.Series are used then it must have same length as
dataframe. Values are used to color the plot. Ignored if `color` is
also set.
cmap : str (default None)
The name of a colormap recognized by matplotlib.
color : str (default None)
If specified, all objects will be colored uniformly.
ax : matplotlib.pyplot.Artist (default None)
axes on which to draw the plot
categorical : bool (default False)
If False, cmap will reflect numerical values of the
column being plotted. For non-numerical columns, this
will be set to True.
legend : bool (default False)
Plot a legend. Ignored if no `column` is given, or if `color` is given.
scheme : str (default None)
Name of a choropleth classification scheme (requires PySAL).
A pysal.esda.mapclassify.Map_Classifier object will be used
under the hood. Supported schemes: 'Equal_interval', 'Quantiles',
'Fisher_Jenks'
k : int (default 5)
Number of classes (ignored if scheme is None)
vmin : None or float (default None)
Minimum value of cmap. If None, the minimum data value
in the column to be plotted is used.
vmax : None or float (default None)
Maximum value of cmap. If None, the maximum data value
in the column to be plotted is used.
markersize : str or float or sequence (default None)
Only applies to point geometries within a frame.
If a str, will use the values in the column of the frame specified
by markersize to set the size of markers. Otherwise can be a value
to apply to all points, or a sequence of the same length as the
number of points.
figsize : tuple of integers (default None)
Size of the resulting matplotlib.figure.Figure. If the argument
axes is given explicitly, figsize is ignored.
legend_kwds : dict (default None)
Keyword arguments to pass to ax.legend()
**style_kwds : dict
Color options to be passed on to the actual plot function, such
as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,
``alpha``.
Returns
-------
ax : matplotlib axes instance
"""
if 'colormap' in style_kwds:
warnings.warn("'colormap' is deprecated, please use 'cmap' instead "
"(for consistency with matplotlib)", FutureWarning)
cmap = style_kwds.pop('colormap')
if 'axes' in style_kwds:
warnings.warn("'axes' is deprecated, please use 'ax' instead "
"(for consistency with pandas)", FutureWarning)
ax = style_kwds.pop('axes')
if column is not None and color is not None:
warnings.warn("Only specify one of 'column' or 'color'. Using "
"'color'.", UserWarning)
column = None
import matplotlib
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
ax.set_aspect('equal')
if df.empty:
warnings.warn("The GeoDataFrame you are attempting to plot is "
"empty. Nothing has been displayed.", UserWarning)
return ax
if isinstance(markersize, str):
markersize = df[markersize].values
if column is None:
return plot_series(df.geometry, cmap=cmap, color=color, ax=ax,
figsize=figsize, markersize=markersize,
**style_kwds)
# To accept pd.Series and np.arrays as column
if isinstance(column, (np.ndarray, pd.Series)):
if column.shape[0] != df.shape[0]:
raise ValueError("The dataframe and given column have different "
"number of rows.")
else:
values = np.asarray(column)
else:
values = np.asarray(df[column])
if values.dtype is np.dtype('O'):
categorical = True
# Define `values` as a Series
if categorical:
if cmap is None:
if LooseVersion(matplotlib.__version__) >= '2.0.1':
cmap = 'tab10'
elif LooseVersion(matplotlib.__version__) >= '2.0.0':
# Erroneous name.
cmap = 'Vega10'
else:
cmap = 'Set1'
categories = list(set(values))
categories.sort()
valuemap = dict((k, v) for (v, k) in enumerate(categories))
values = np.array([valuemap[k] for k in values])
if scheme is not None:
binning = __pysal_choro(values, scheme, k=k)
# set categorical to True for creating the legend
categorical = True
binedges = [values.min()] + binning.bins.tolist()
categories = ['{0:.2f} - {1:.2f}'.format(binedges[i], binedges[i+1])
for i in range(len(binedges)-1)]
values = np.array(binning.yb)
mn = values.min() if vmin is None else vmin
mx = values.max() if vmax is None else vmax
geom_types = df.geometry.type
poly_idx = np.asarray((geom_types == 'Polygon')
| (geom_types == 'MultiPolygon'))
line_idx = np.asarray((geom_types == 'LineString')
| (geom_types == 'MultiLineString'))
point_idx = np.asarray((geom_types == 'Point')
| (geom_types == 'MultiPoint'))
# plot all Polygons and all MultiPolygon components in the same collection
polys = df.geometry[poly_idx]
if not polys.empty:
plot_polygon_collection(ax, polys, values[poly_idx],
vmin=mn, vmax=mx, cmap=cmap, **style_kwds)
# plot all LineStrings and MultiLineString components in same collection
lines = df.geometry[line_idx]
if not lines.empty:
plot_linestring_collection(ax, lines, values[line_idx],
vmin=mn, vmax=mx, cmap=cmap, **style_kwds)
# plot all Points in the same collection
points = df.geometry[point_idx]
if not points.empty:
if isinstance(markersize, np.ndarray):
markersize = markersize[point_idx]
plot_point_collection(ax, points, values[point_idx], vmin=mn, vmax=mx,
markersize=markersize, cmap=cmap,
**style_kwds)
if legend and not color:
from matplotlib.lines import Line2D
from matplotlib.colors import Normalize
from matplotlib import cm
norm = Normalize(vmin=mn, vmax=mx)
n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)
if categorical:
patches = []
for value, cat in enumerate(categories):
patches.append(
Line2D([0], [0], linestyle="none", marker="o",
alpha=style_kwds.get('alpha', 1), markersize=10,
markerfacecolor=n_cmap.to_rgba(value)))
if legend_kwds is None:
legend_kwds = {}
legend_kwds.setdefault('numpoints', 1)
legend_kwds.setdefault('loc', 'best')
ax.legend(patches, categories, **legend_kwds)
else:
n_cmap.set_array([])
ax.get_figure().colorbar(n_cmap, ax=ax)
plt.draw()
return ax
def __pysal_choro(values, scheme, k=5):
"""
Wrapper for choropleth schemes from PySAL for use with plot_dataframe
Parameters
----------
values
Series to be plotted
scheme : str
One of pysal.esda.mapclassify classification schemes
Options are 'Equal_interval', 'Quantiles', 'Fisher_Jenks'
k : int
number of classes (2 <= k <=9)
Returns
-------
binning
Binning objects that holds the Series with values replaced with
class identifier and the bins.
"""
try:
from pysal.esda.mapclassify import (
Quantiles, Equal_Interval, Fisher_Jenks)
schemes = {}
schemes['equal_interval'] = Equal_Interval
schemes['quantiles'] = Quantiles
schemes['fisher_jenks'] = Fisher_Jenks
scheme = scheme.lower()
if scheme not in schemes:
raise ValueError("Invalid scheme. Scheme must be in the"
" set: %r" % schemes.keys())
binning = schemes[scheme](values, k)
return binning
except ImportError:
raise ImportError("PySAL is required to use the 'scheme' keyword")
| {
"content_hash": "a43093b148bfc22fe126c556beb59489",
"timestamp": "",
"source": "github",
"line_count": 550,
"max_line_length": 79,
"avg_line_length": 36.06,
"alnum_prop": 0.6131699692431806,
"repo_name": "ozak/geopandas",
"id": "02763e67f5bc58e219feb5e72b6730c9ff0bfa97",
"size": "19833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geopandas/plotting.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "32111"
},
{
"name": "Python",
"bytes": "361135"
}
],
"symlink_target": ""
} |
"""
csv.py - read/write/investigate CSV files
"""
import re
from functools import reduce
from _csv import Error, __version__, writer, reader, register_dialect, \
unregister_dialect, get_dialect, list_dialects, \
field_size_limit, \
QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
__doc__
from _csv import Dialect as _Dialect
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = [ "QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
"Error", "Dialect", "__doc__", "excel", "excel_tab",
"field_size_limit", "reader", "writer",
"register_dialect", "get_dialect", "list_dialects", "Sniffer",
"unregister_dialect", "__version__", "DictReader", "DictWriter" ]
class Dialect:
"""Describe an Excel dialect.
This must be subclassed (see csv.excel). Valid attributes are:
delimiter, quotechar, escapechar, doublequote, skipinitialspace,
lineterminator, quoting.
"""
_name = ""
_valid = False
# placeholders
delimiter = None
quotechar = None
escapechar = None
doublequote = None
skipinitialspace = None
lineterminator = None
quoting = None
def __init__(self):
if self.__class__ != Dialect:
self._valid = True
self._validate()
def _validate(self):
try:
_Dialect(self)
except TypeError, e:
# We do this for compatibility with py2.3
raise Error(str(e))
class excel(Dialect):
"""Describe the usual properties of Excel-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
register_dialect("excel", excel)
class excel_tab(excel):
"""Describe the usual properties of Excel-generated TAB-delimited files."""
delimiter = '\t'
register_dialect("excel-tab", excel_tab)
class DictReader:
def __init__(self, f, fieldnames=None, restkey=None, restval=None,
dialect="excel", *args, **kwds):
self._fieldnames = fieldnames # list of keys for the dict
self.restkey = restkey # key to catch long rows
self.restval = restval # default value for short rows
self.reader = reader(f, dialect, *args, **kwds)
self.dialect = dialect
self.line_num = 0
def __iter__(self):
return self
@property
def fieldnames(self):
if self._fieldnames is None:
try:
self._fieldnames = self.reader.next()
except StopIteration:
pass
self.line_num = self.reader.line_num
return self._fieldnames
@fieldnames.setter
def fieldnames(self, value):
self._fieldnames = value
def next(self):
if self.line_num == 0:
# Used only for its side effect.
self.fieldnames
row = self.reader.next()
self.line_num = self.reader.line_num
# unlike the basic reader, we prefer not to return blanks,
# because we will typically wind up with a dict full of None
# values
while row == []:
row = self.reader.next()
d = dict(zip(self.fieldnames, row))
lf = len(self.fieldnames)
lr = len(row)
if lf < lr:
d[self.restkey] = row[lf:]
elif lf > lr:
for key in self.fieldnames[lr:]:
d[key] = self.restval
return d
class DictWriter:
def __init__(self, f, fieldnames, restval="", extrasaction="raise",
dialect="excel", *args, **kwds):
self.fieldnames = fieldnames # list of keys for the dict
self.restval = restval # for writing short dicts
if extrasaction.lower() not in ("raise", "ignore"):
raise ValueError, \
("extrasaction (%s) must be 'raise' or 'ignore'" %
extrasaction)
self.extrasaction = extrasaction
self.writer = writer(f, dialect, *args, **kwds)
def writeheader(self):
header = dict(zip(self.fieldnames, self.fieldnames))
self.writerow(header)
def _dict_to_list(self, rowdict):
if self.extrasaction == "raise":
wrong_fields = [k for k in rowdict if k not in self.fieldnames]
if wrong_fields:
raise ValueError("dict contains fields not in fieldnames: " +
", ".join(wrong_fields))
return [rowdict.get(key, self.restval) for key in self.fieldnames]
def writerow(self, rowdict):
return self.writer.writerow(self._dict_to_list(rowdict))
def writerows(self, rowdicts):
rows = []
for rowdict in rowdicts:
rows.append(self._dict_to_list(rowdict))
return self.writer.writerows(rows)
# Guard Sniffer's type checking against builds that exclude complex()
try:
complex
except NameError:
complex = float
class Sniffer:
'''
"Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
Returns a Dialect object.
'''
def __init__(self):
# in case there is more than one possible delimiter
self.preferred = [',', '\t', ';', ' ', ':']
def sniff(self, sample, delimiters=None):
"""
Returns a dialect (or None) corresponding to the sample
"""
quotechar, doublequote, delimiter, skipinitialspace = \
self._guess_quote_and_delimiter(sample, delimiters)
if not delimiter:
delimiter, skipinitialspace = self._guess_delimiter(sample,
delimiters)
if not delimiter:
raise Error, "Could not determine delimiter"
class dialect(Dialect):
_name = "sniffed"
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
# escapechar = ''
dialect.doublequote = doublequote
dialect.delimiter = delimiter
# _csv.reader won't accept a quotechar of ''
dialect.quotechar = quotechar or '"'
dialect.skipinitialspace = skipinitialspace
return dialect
def _guess_quote_and_delimiter(self, data, delimiters):
"""
Looks for text enclosed between two identical quotes
(the probable quotechar) which are preceded and followed
by the same character (the probable delimiter).
For example:
,'some text',
The quote with the most wins, same with the delimiter.
If there is no quotechar the delimiter can't be determined
this way.
"""
matches = []
for restr in ('(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
'(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(data)
if matches:
break
if not matches:
# (quotechar, doublequote, delimiter, skipinitialspace)
return ('', False, None, 0)
quotes = {}
delims = {}
spaces = 0
for m in matches:
n = regexp.groupindex['quote'] - 1
key = m[n]
if key:
quotes[key] = quotes.get(key, 0) + 1
try:
n = regexp.groupindex['delim'] - 1
key = m[n]
except KeyError:
continue
if key and (delimiters is None or key in delimiters):
delims[key] = delims.get(key, 0) + 1
try:
n = regexp.groupindex['space'] - 1
except KeyError:
continue
if m[n]:
spaces += 1
quotechar = reduce(lambda a, b, quotes = quotes:
(quotes[a] > quotes[b]) and a or b, quotes.keys())
if delims:
delim = reduce(lambda a, b, delims = delims:
(delims[a] > delims[b]) and a or b, delims.keys())
skipinitialspace = delims[delim] == spaces
if delim == '\n': # most likely a file with a single column
delim = ''
else:
# there is *no* delimiter, it's a single column of quoted data
delim = ''
skipinitialspace = 0
# if we see an extra quote between delimiters, we've got a
# double quoted format
dq_regexp = re.compile(r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \
{'delim':delim, 'quote':quotechar}, re.MULTILINE)
if dq_regexp.search(data):
doublequote = True
else:
doublequote = False
return (quotechar, doublequote, delim, skipinitialspace)
def _guess_delimiter(self, data, delimiters):
"""
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of freqencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
"""
data = filter(None, data.split('\n'))
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
# build frequency tables
chunkLength = min(10, len(data))
iteration = 0
charFrequency = {}
modes = {}
delims = {}
start, end = 0, min(chunkLength, len(data))
while start < len(data):
iteration += 1
for line in data[start:end]:
for char in ascii:
metaFrequency = charFrequency.get(char, {})
# must count even if frequency is 0
freq = line.count(char)
# value is the mode
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
charFrequency[char] = metaFrequency
for char in charFrequency.keys():
items = charFrequency[char].items()
if len(items) == 1 and items[0][0] == 0:
continue
# get the mode of the frequencies
if len(items) > 1:
modes[char] = reduce(lambda a, b: a[1] > b[1] and a or b,
items)
# adjust the mode - subtract the sum of all
# other frequencies
items.remove(modes[char])
modes[char] = (modes[char][0], modes[char][1]
- reduce(lambda a, b: (0, a[1] + b[1]),
items)[1])
else:
modes[char] = items[0]
# build a list of possible delimiters
modeList = modes.items()
total = float(chunkLength * iteration)
# (rows of consistent data) / (number of rows) = 100%
consistency = 1.0
# minimum consistency threshold
threshold = 0.9
while len(delims) == 0 and consistency >= threshold:
for k, v in modeList:
if v[0] > 0 and v[1] > 0:
if ((v[1]/total) >= consistency and
(delimiters is None or k in delimiters)):
delims[k] = v
consistency -= 0.01
if len(delims) == 1:
delim = delims.keys()[0]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
# analyze another chunkLength lines
start = end
end += chunkLength
if not delims:
return ('', 0)
# if there's more than one, fall back to a 'preferred' list
if len(delims) > 1:
for d in self.preferred:
if d in delims.keys():
skipinitialspace = (data[0].count(d) ==
data[0].count("%c " % d))
return (d, skipinitialspace)
# nothing else indicates a preference, pick the character that
# dominates(?)
items = [(v,k) for (k,v) in delims.items()]
items.sort()
delim = items[-1][1]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
def has_header(self, sample):
# Creates a dictionary of types of data in each column. If any
# column is of a single type (say, integers), *except* for the first
# row, then the first row is presumed to be labels. If the type
# can't be determined, it is assumed to be a string in which case
# the length of the string is the determining factor: if all of the
# rows except for the first are the same length, it's a header.
# Finally, a 'vote' is taken at the end for each column, adding or
# subtracting from the likelihood of the first row being a header.
rdr = reader(StringIO(sample), self.sniff(sample))
header = rdr.next() # assume first row is header
columns = len(header)
columnTypes = {}
for i in range(columns): columnTypes[i] = None
checked = 0
for row in rdr:
# arbitrary number of rows to check, to keep it sane
if checked > 20:
break
checked += 1
if len(row) != columns:
continue # skip rows that have irregular number of columns
for col in columnTypes.keys():
for thisType in [int, long, float, complex]:
try:
thisType(row[col])
break
except (ValueError, OverflowError):
pass
else:
# fallback to length of string
thisType = len(row[col])
# treat longs as ints
if thisType == long:
thisType = int
if thisType != columnTypes[col]:
if columnTypes[col] is None: # add new column type
columnTypes[col] = thisType
else:
# type is inconsistent, remove column from
# consideration
del columnTypes[col]
# finally, compare results against first row and "vote"
# on whether it's a header
hasHeader = 0
for col, colType in columnTypes.items():
if type(colType) == type(0): # it's a length
if len(header[col]) != colType:
hasHeader += 1
else:
hasHeader -= 1
else: # attempt typecast
try:
colType(header[col])
except (ValueError, TypeError):
hasHeader += 1
else:
hasHeader -= 1
return hasHeader > 0
| {
"content_hash": "9ea3b181179f39614607c4d0a3f233c5",
"timestamp": "",
"source": "github",
"line_count": 450,
"max_line_length": 131,
"avg_line_length": 36.315555555555555,
"alnum_prop": 0.5206217109288949,
"repo_name": "xxd3vin/spp-sdk",
"id": "1df506230fd029aaa8f6415bfb2ab2fb84119de7",
"size": "16343",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "opt/Python27/Lib/csv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "759663"
},
{
"name": "C#",
"bytes": "9892"
},
{
"name": "C++",
"bytes": "56155"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "3065"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "JavaScript",
"bytes": "163687"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Pascal",
"bytes": "8738"
},
{
"name": "Python",
"bytes": "22177886"
},
{
"name": "Shell",
"bytes": "15704"
},
{
"name": "Tcl",
"bytes": "2065501"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
import argparse
import json
import os
import netaddr
PART = 'RPC'
PREFIX_NAME = 'RPC'
SNAT_POOL = (
'### CREATE SNATPOOL ###\n'
'create ltm snatpool /' + PART + '/' + PREFIX_NAME + '_SNATPOOL { members replace-all-with {'
' %(snat_pool_addresses)s } }'
)
#Persistance Profile:
PERSISTANCE = [
r'create ltm persistence source-addr /' + PART + '/' + PREFIX_NAME + '_PROF_PERSIST_IP {'
r' app-service none defaults-from /Common/source_addr'
r' match-across-services enabled timeout 3600 }',
r'create ltm persistence cookie /' + PART + '/' + PREFIX_NAME + '_PROF_PERSIST_COOKIE {'
r' app-service none cookie-name RPC-COOKIE defaults-from /Common/cookie }''\n'
]
MONITORS = [
r'create ltm monitor mysql /' + PART + '/' + PREFIX_NAME + '_MON_GALERA { count 1 database'
r' information_schema debug no defaults-from mysql destination *:*'
r' interval 3 recv big5_chinese_ci recv-column 2 recv-row 0 send "select'
r' * from CHARACTER_SETS;" time-until-up 0 timeout 10 username monitoring }',
r'create ltm monitor http /' + PART + '/' + PREFIX_NAME + '_MON_HTTP_KEYSTONE_ADMIN { defaults-from'
r' http destination *:35357 recv "200 OK" send "HEAD /v3 HTTP/1.1\r\nHost:'
r' rpc\r\n\r\n" }',
r'create ltm monitor http /' + PART + '/' + PREFIX_NAME + '_MON_HTTP_NOVA_API_METADATA {'
r' defaults-from http destination *:8775 recv "200 OK" send "HEAD /'
r' HTTP/1.1\r\nHost: rpc\r\n\r\n" }',
r'create ltm monitor http /' + PART + '/' + PREFIX_NAME + '_MON_HTTP_HORIZON { defaults-from http'
r' destination *:80 recv "302 Found" send "HEAD /auth/login/ HTTP/1.1\r\nHost:'
r' rpc\r\n\r\n" }',
r'create ltm monitor http /' + PART + '/' + PREFIX_NAME + '_MON_HTTP_NOVA_SPICE_CONSOLE {'
r' defaults-from http destination *:6082 recv "200 OK" send "HEAD /spice_auto.html'
r' HTTP/1.1\r\nHost: rpc\r\n\r\n" }',
r'create ltm monitor https /' + PART + '/' + PREFIX_NAME + '_MON_HTTPS_HORIZON_SSL { defaults-from'
r' https destination *:443 recv "200 OK" send "HEAD /auth/login/ HTTP/1.1\r\nHost:'
r' rpc\r\n\r\n" }',
r'create ltm monitor https /' + PART + '/' + PREFIX_NAME + '_MON_HTTPS_NOVA_SPICE_CONSOLE {'
r' defaults-from https destination *:6082 recv "200 OK" send "HEAD /'
r' HTTP/1.1\r\nHost: rpc\r\n\r\n" }',
r'create ltm monitor tcp /' + PART + '/' + PREFIX_NAME + '_MON_TCP_HEAT_API_CFN { defaults-from tcp'
r' destination *:8000 }',
r'create ltm monitor tcp /' + PART + '/' + PREFIX_NAME + '_MON_TCP_HEAT_API_CLOUDWATCH {'
r' defaults-from tcp destination *:8003 }',
r'create ltm monitor tcp /' + PART + '/' + PREFIX_NAME + '_MON_TCP_KIBANA { defaults-from tcp'
r' destination *:80 }',
r'create ltm monitor tcp /' + PART + '/' + PREFIX_NAME + '_MON_TCP_KIBANA_SSL { defaults-from tcp'
r' destination *:8443 }',
r'create ltm monitor tcp /' + PART + '/' + PREFIX_NAME + '_MON_TCP_ELASTICSEARCH { defaults-from'
r' tcp destination *:9200 }',
r'create ltm monitor http /' + PART + '/' + PREFIX_NAME + '_MON_HTTP_REPO {'
r' defaults-from http destination *:8181 recv "200 OK" send "HEAD /'
r' HTTP/1.1\r\nHost: rpc\r\n\r\n" }'
'\n'
]
NODES = (
'create ltm node /' + PART + '/%(node_name)s { address %(container_address)s }'
)
SNAT_IDLE = (
'modify ltm snat-translation /' + PART + '/%s { ip-idle-timeout 3600 }'
)
PRIORITY_ENTRY = '{ priority-group %(priority_int)s }'
POOL_NODE = {
'beginning': 'create ltm pool /' + PART + '/%(pool_name)s {'
' load-balancing-mode least-connections-node members replace-all-with'
' { %(nodes)s }',
'priority': 'min-active-members 1',
'end': 'monitor %(mon_type)s }'
}
VIRTUAL_ENTRIES_PARTS = {
'command': 'create ltm virtual /' + PART + '/%(vs_name)s',
}
PERSIST_OPTION = 'persist replace-all-with { /' + PART + '/' + PREFIX_NAME + '_PROF_PERSIST_IP }'
END_COMMANDS = [
'save sys config',
'run cm config-sync to-group SYNC-FAILOVER'
]
VIRTUAL_ENTRIES = (
'create ltm virtual /' + PART + '/%(vs_name)s {'
' destination %(internal_lb_vip_address)s:%(port)s'
' ip-protocol tcp mask 255.255.255.255'
' pool /' + PART + '/%(pool_name)s'
r' profiles replace-all-with { /Common/fastL4 { } }'
' %(persist)s'
' source 0.0.0.0/0'
' source-address-translation { pool /' + PART + '/' + PREFIX_NAME + '_SNATPOOL type snat }'
' }'
)
PUB_SSL_VIRTUAL_ENTRIES = (
'create ltm virtual /' + PART + '/%(vs_name)s {'
' destination %(ssl_public_ip)s:%(port)s ip-protocol tcp'
' pool /' + PART + '/%(pool_name)s'
r' profiles replace-all-with { /Common/tcp { } %(ltm_profiles)s }'
' %(persist)s'
' source-address-translation { pool /' + PART + '/' + PREFIX_NAME + '_SNATPOOL type snat }'
' }'
)
PUB_NONSSL_VIRTUAL_ENTRIES = (
'create ltm virtual /' + PART + '/%(vs_name)s {'
' destination %(ssl_public_ip)s:%(port)s ip-protocol tcp'
' pool /' + PART + '/%(pool_name)s'
r' profiles replace-all-with { /Common/fastL4 { } }'
' %(persist)s'
' source-address-translation { pool /' + PART + '/' + PREFIX_NAME + '_SNATPOOL type snat }'
' }'
)
SEC_HOSTNET_VIRTUAL_ENTRIES = (
'create ltm virtual /' + PART + '/' + PREFIX_NAME + '_LIMIT_ACCESS_TO_HOST_NET {'
' destination %(sec_host_net)s:0 ip-forward mask %(sec_host_netmask)s'
r' profiles replace-all-with { /Common/fastL4 { } }'
'rules { /' + PART + '/' + PREFIX_NAME + '_DISCARD_ALL }'
' translate-address disabled translate-port disabled vlans'
' replace-all-with { /Common/%(sec_public_vlan_name)s }'
' }'
)
SEC_AFM_RULES = (
'\n### CREATE AFM LIST AND RULES ###\n'
#Port Lists
'create security firewall port-list RPC_VIP_PORTS '
'{ ports add { 80 { } 443 { } 3306 { } 3307 { } 5000 { } 6082 { } 8000 { } 8003 { } 8004 { } 8080 { } '
'8181 { } 8443 { } 8774 { } 8775 { } 8776 { } 8888 { } 9191 { } 9200 { } 9292 { } 9696 { } 35357 { } } }\n'
'\n'
#Addr Lists
'create security firewall address-list RPC_PUB_VIP_ALLOW_IPS { addresses add { 0.0.0.0/0 } }\n'
'create security firewall address-list RPC_PRI_VIP_ALLOW_IPS { addresses add { 0.0.0.0/0 } }\n'
'create security firewall address-list RPC-HOST-NET { addresses replace-all-with { %(sec_host_net)s { } } }\n'
'\n'
#Rule Lists
'create security firewall rule-list RPC_PUB_VIP_RULELIST '
'{ rules replace-all-with { RPC_PUB_VIP_ALLOW { action accept-decisively '
'ip-protocol tcp source { address-lists replace-all-with { RPC_PUB_VIP_ALLOW_IPS } } '
'destination { addresses replace-all-with { %(ssl_public_ip)s { } } '
'port-lists replace-all-with { RPC_VIP_PORTS } } } } }\n'
#
'create security firewall rule-list RPC_PRI_VIP_RULELIST '
'{ rules replace-all-with { RPC_PRI_VIP_ALLOW { action accept-decisively '
'ip-protocol tcp source { address-lists replace-all-with { RPC_PRI_VIP_ALLOW_IPS } } '
'destination { addresses replace-all-with { %(private_ip)s { } } '
'port-lists replace-all-with { RPC_VIP_PORTS } } } } }\n'
#
'create security firewall rule-list RPC_SECURITY_RULES '
'rules add { RPC_PROTECT_HOST { action drop '
'source { vlans add { RPC_GATEWAY_NET } } '
'destination { address-lists replace-all-with { RPC-HOST-NET } } place-before first } }\n'
'\n'
#Apply to Global Policy
'modify security firewall policy GLOBAL-POLICY rules add { RPC_PUB_VIP_RULE { rule-list RPC_PUB_VIP_RULELIST place-after RACKNEST } }\n'
'modify security firewall policy GLOBAL-POLICY rules add { RPC_PRI_VIP_RULE { rule-list RPC_PRI_VIP_RULELIST place-after RACKNEST } }\n'
'modify security firewall policy GLOBAL-POLICY rules add { RPC_SECURITY { place-after ICMP-ALLOW rule-list RPC_SECURITY_RULES } }\n'
)
SEC_CONTAINER_VIRTUAL_ENTRIES = (
'create ltm virtual /' + PART + '/' + PREFIX_NAME + '_LIMIT_ACCESS_TO_CONTAINER_NET {'
' connection-limit 1 destination %(sec_container_net)s:0 ip-forward mask'
' %(sec_container_netmask)s profiles replace-all-with'
' { /Common/fastL4 { } } rules { /' + PART + '/' + PREFIX_NAME + '_DISCARD_ALL'
' } translate-address disabled translate-port disabled'
' }'
)
# This is a dict of all groups and their respected values / requirements
POOL_PARTS = {
'galera': {
'port': 3306,
'backend_port': 3306,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_GALERA',
'priority': True,
'group': 'galera',
'hosts': []
},
'glance_api': {
'port': 9292,
'backend_port': 9292,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'glance_api',
'make_public': True,
'x-forwarded-proto': True,
'hosts': []
},
'glance_registry': {
'port': 9191,
'backend_port': 9191,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'glance_registry',
'hosts': []
},
'heat_api_cfn': {
'port': 8000,
'backend_port': 8000,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_TCP_HEAT_API_CFN',
'group': 'heat_api_cfn',
'make_public': True,
'x-forwarded-proto': True,
'hosts': []
},
'heat_api_cloudwatch': {
'port': 8003,
'backend_port': 8003,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_TCP_HEAT_API_CLOUDWATCH',
'group': 'heat_api_cloudwatch',
'make_public': True,
'x-forwarded-proto': True,
'hosts': []
},
'heat_api': {
'port': 8004,
'backend_port': 8004,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'heat_api',
'make_public': True,
'x-forwarded-proto': True,
'hosts': []
},
'keystone_admin': {
'port': 35357,
'backend_port': 35357,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_HTTP_KEYSTONE_ADMIN',
'group': 'keystone',
'hosts': []
},
'keystone_service': {
'port': 5000,
'backend_port': 5000,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'keystone',
'make_public': True,
'x-forwarded-proto': True,
'hosts': []
},
'neutron_server': {
'port': 9696,
'backend_port': 9696,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'neutron_server',
'make_public': True,
'x-forwarded-proto': True,
'hosts': []
},
'nova_api_metadata': {
'port': 8775,
'backend_port': 8775,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_HTTP_NOVA_API_METADATA',
'group': 'nova_api_metadata',
'hosts': []
},
'nova_api_os_compute': {
'port': 8774,
'backend_port': 8774,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'nova_api_os_compute',
'make_public': True,
'x-forwarded-proto': True,
'hosts': []
},
'nova_spice_console': {
'port': 6082,
'backend_port': 6082,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_HTTP_NOVA_SPICE_CONSOLE',
'group': 'nova_console',
'hosts': [],
'ssl_impossible': True,
'make_public': True,
'persist': True
},
'cinder_api': {
'port': 8776,
'backend_port': 8776,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'cinder_api',
'make_public': True,
'x-forwarded-proto': True,
'hosts': []
},
'horizon': {
'port': 80,
'backend_port': 80,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_HTTP_HORIZON',
'group': 'horizon',
'hosts': [],
},
'horizon_ssl': {
'port': 443,
'backend_port': 443,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_HTTPS_HORIZON_SSL',
'group': 'horizon',
'hosts': [],
'make_public': True,
'persist': True,
'backend_ssl': True
},
'elasticsearch': {
'port': 9200,
'backend_port': 9200,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_TCP_ELASTICSEARCH',
'group': 'elasticsearch',
'hosts': []
},
'kibana': {
'port': 8888,
'backend_port': 80,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_TCP_KIBANA',
'group': 'kibana',
'priority': True,
'hosts': []
},
'kibana_ssl': {
'port': 8443,
'backend_port': 8443,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_TCP_KIBANA_SSL',
'group': 'kibana',
'priority': True,
'hosts': [],
'make_public': True,
'persist': True,
'backend_ssl': True
},
'swift': {
'port': 8080,
'backend_port': 8080,
'mon_type': '/' + PART + '/RPC-MON-EXT-ENDPOINT',
'group': 'swift_proxy',
'make_public': True,
'x-forwarded-proto': True,
'hosts': []
},
'repo': {
'port': 8181,
'backend_port': 8181,
'mon_type': '/' + PART + '/' + PREFIX_NAME + '_MON_HTTP_REPO',
'group': 'pkg_repo',
'priority': True,
'hosts': []
}
}
def recursive_host_get(inventory, group_name, host_dict=None):
if host_dict is None:
host_dict = {}
inventory_group = inventory.get(group_name)
if not inventory_group:
print('Inventory group "%s" not found, skipping.' % group_name)
return host_dict
if 'children' in inventory_group and inventory_group['children']:
for child in inventory_group['children']:
recursive_host_get(
inventory=inventory, group_name=child, host_dict=host_dict
)
if inventory_group.get('hosts'):
for host in inventory_group['hosts']:
if host not in host_dict['hosts']:
ca = inventory['_meta']['hostvars'][host]['container_address']
node = {
'hostname': host,
'container_address': ca
}
host_dict['hosts'].append(node)
return host_dict
def build_pool_parts(inventory):
for key, value in POOL_PARTS.iteritems():
recursive_host_get(
inventory, group_name=value['group'], host_dict=value
)
return POOL_PARTS
def file_find(filename, user_file=None, pass_exception=False):
"""Return the path to a file.
If no file is found the system will exit.
The file lookup will be done in the following directories:
/etc/openstack_deploy/
$HOME/openstack_deploy/
$(pwd)/openstack_deploy/
:param filename: ``str`` Name of the file to find
:param user_file: ``str`` Additional localtion to look in FIRST for a file
"""
file_check = [
os.path.join(
'/etc', 'openstack_deploy', filename
),
os.path.join(
os.environ.get('HOME'), 'openstack_deploy', filename
),
os.path.join(
os.getcwd(), filename
)
]
if user_file is not None:
file_check.insert(0, os.path.expanduser(user_file))
for f in file_check:
if os.path.isfile(f):
return f
else:
if pass_exception is False:
raise SystemExit('No file found at: %s' % file_check)
else:
return False
def args():
"""Setup argument Parsing."""
parser = argparse.ArgumentParser(
usage='%(prog)s',
description='Rackspace Openstack, Inventory Generator',
epilog='Inventory Generator Licensed "Apache 2.0"')
parser.add_argument(
'-f',
'--file',
help='Inventory file. Default: [ %(default)s ]',
required=False,
default='openstack_inventory.json'
)
parser.add_argument(
'-s',
'--snat-pool-address',
help='LB Main SNAT pool address for [ RPC_SNATPOOL ], for'
' multiple snat pool addresses comma seperate the ip'
' addresses. By default this IP will be .15 from within your'
' containers_cidr as found within inventory.',
required=False,
default=None
)
parser.add_argument(
'--limit-source',
help='Limit available connections to the source IP for all source'
' limited entries.',
required=False,
default=None
)
parser.add_argument(
'--ssl-public-ip',
help='Public IP address for the F5 to use.',
required=False,
default=None
)
parser.add_argument(
'--ssl-domain-name',
help='Name of the domain that will have an ssl cert.',
required=False,
default=None
)
parser.add_argument(
'--sec-host-network',
help='Security host network in CIDR format.'
' EXAMPLE: "192.168.1.0/24"',
required=False,
default=None
)
parser.add_argument(
'--sec-container-network',
help='Security container network in CIDR format.'
' EXAMPLE: "192.168.2.1/24',
required=False,
default=None
)
parser.add_argument(
'--sec-public-vlan-name',
help='Security container network address and netmask.'
' EXAMPLE: "FW-LB"',
required=False,
default=None
)
parser.add_argument(
'--galera-monitor-user',
help='Name of the user that will be available for the F5 to pull when'
' monitoring Galera.',
required=False,
default='openstack'
)
parser.add_argument(
'--print',
help='Print the script to screen, as well as write it out',
required=False,
default=False,
action='store_true'
)
parser.add_argument(
'-e',
'--export',
help='Export the generated F5 configuration script.'
' Default: [ %(default)s ]',
required=False,
default=os.path.join(
os.path.expanduser('~/'), 'rpc_f5_config.sh'
)
)
parser.add_argument(
'--afm',
help='Pass this argument if the f5 environment is using the Advanced Firewall Module.'
'Adding this flag will create the required rules to open up the API to ALL SOURCES.'
'It will also create a rule to block communication from the Provider Network to the Host network.',
required=False,
default=False,
action='store_true'
)
parser.add_argument(
'-S',
'--Superman',
help='Yes, its Superman ... strange visitor from another planet,'
'who came to Earth with powers and abilities far beyond those of mortal men! '
'Superman ... who can change the course of mighty rivers, bend steel in his bare hands,'
'and who, disguised as Clark Kent, mild-mannered reporter for a great metropolitan newspaper,'
'fights a never-ending battle for truth, justice, and the American way!',
required=False,
default=False,
action='store_true'
)
return vars(parser.parse_args())
def main():
"""Run the main application."""
# Parse user args
user_args = args()
# Get the contents of the system environment json
environment_file = file_find(filename=user_args['file'])
with open(environment_file, 'rb') as f:
inventory_json = json.loads(f.read())
commands = []
nodes = set()
pools = []
virts = []
sslvirts = []
pubvirts = []
afmrules = []
commands.extend([
'### CREATE SECURITY iRULE ###',
'run util bash',
'tmsh create ltm rule /' + PART + '/' + PREFIX_NAME + '_DISCARD_ALL when CLIENT_ACCEPTED { discard }',
'exit',
'### CREATE HTTP PROFILE ###',
'create ltm profile http /' + PART + '/' + PREFIX_NAME + '_X-FORWARDED-PROTO { header-insert "X-Forwarded-Proto: https" }\n',
'### CREATE EXTERNAL MONITOR ###',
' --> Upload External monitor file to disk <--',
' run util bash',
' curl -k -o /config/monitors/RPC-MON-EXT-ENDPOINT.monitor https://raw.githubusercontent.com/dpham-rs/rpc-openstack/master/scripts/f5-monitor-liberty.sh',
' exit',
' create sys file external-monitor /' + PART + '/RPC-MON-EXT-ENDPOINT { source-path file:///config/monitors/RPC-MON-EXT-ENDPOINT.monitor }',
' save sys config',
' create ltm monitor external /' + PART + '/RPC-MON-EXT-ENDPOINT { interval 20 timeout 61 run /' + PART + '/RPC-MON-EXT-ENDPOINT }\n'
])
if user_args['ssl_domain_name']:
commands.extend([
'### UPLOAD SSL CERT KEY PAIR ###',
'cd /RPC',
'create sys crypto key /' + PART + '/%(ssl_domain_name)s.key'
% user_args,
'create sys crypto cert /' + PART + '/%(ssl_domain_name)s.crt key /' % user_args + PART + '/%(ssl_domain_name)s.key common-name %(ssl_domain_name)s lifetime 3650'
% user_args,
'cd /Common\n',
'### CREATE SSL PROFILES ###',
('create ltm profile client-ssl'
' /' + PART + '/' + PREFIX_NAME + '_PROF_SSL_%(ssl_domain_name)s'
' { cert /' + PART + '/%(ssl_domain_name)s.crt key'
' /' + PART + '/%(ssl_domain_name)s.key defaults-from clientssl }')
% user_args,
'create ltm profile server-ssl /' + PART + '/' + PREFIX_NAME + '_PROF_SSL_SERVER { defaults-from /Common/serverssl }\n'
% user_args,
])
if user_args['Superman']:
print " ************************** "
print " .*##*:*####***:::**###*:######*. "
print " *##: .###* *######:,##* "
print " *##: :####: *####*. :##: "
print " *##,:########**********:, :##: "
print " .#########################*, *#* "
print " *#########################*##: "
print " *##, ..,,::**#####: "
print " ,##*,*****, *##* "
print " *#########*########: "
print " *##*:*******###* "
print " .##*. ,##* "
print " :##* *##, "
print " *####: "
print " :, "
# Kal-El
# SUPERMAN
# JNA
pool_parts = build_pool_parts(inventory=inventory_json)
lb_vip_address = inventory_json['all']['vars']['internal_lb_vip_address']
for key, value in pool_parts.iteritems():
value['group_name'] = key.upper()
value['vs_name'] = '%s_VS_%s' % (
PREFIX_NAME, value['group_name']
)
value['pool_name'] = '%s_POOL_%s' % (
PREFIX_NAME, value['group_name']
)
node_data = []
priority = 100
for node in value['hosts']:
node['node_name'] = '%s_NODE_%s' % (PREFIX_NAME, node['hostname'])
nodes.add(NODES % node)
if value.get('persist'):
persist = PERSIST_OPTION
else:
persist = str()
virtual_dict = {
'port': value['port'],
'vs_name': value['vs_name'],
'pool_name': value['pool_name'],
'internal_lb_vip_address': lb_vip_address,
'persist': persist,
'ssl_domain_name': user_args['ssl_domain_name'],
'ssl_public_ip': user_args['ssl_public_ip'],
}
##########################################
virt = '%s' % VIRTUAL_ENTRIES % virtual_dict
if virt not in virts:
virts.append(virt)
if user_args['ssl_public_ip']:
if not value.get('backend_ssl'):
virtual_dict['ltm_profiles'] = (
'/' + PART + '/' + PREFIX_NAME + '_PROF_SSL_%(ssl_domain_name)s { context clientside }'
) % user_args
if value.get ('x-forwarded-proto'):
virtual_dict['ltm_profiles'] = '/' + PART + '/' + PREFIX_NAME + '_X-FORWARDED-PROTO { }/' + PART + '/' + PREFIX_NAME + '_PROF_SSL_%(ssl_domain_name)s { context clientside }'% user_args
else:
virtual_dict['ltm_profiles'] = '/' + PART + '/' + PREFIX_NAME + '_PROF_SSL_SERVER { context serverside } /' + PART + '/' + PREFIX_NAME + '_PROF_SSL_%(ssl_domain_name)s { context clientside }'% user_args
if value.get('make_public'):
if value.get ('ssl_impossible'):
virtual_dict['vs_name'] = '%s_VS_%s' % (
'RPC_PUB', value['group_name']
)
pubvirt = (
'%s\n'
) % PUB_NONSSL_VIRTUAL_ENTRIES % virtual_dict
if pubvirt not in pubvirts:
pubvirts.append(pubvirt)
else:
virtual_dict['vs_name'] = '%s_VS_%s' % (
'RPC_PUB_SSL', value['group_name']
)
sslvirt = '%s' % PUB_SSL_VIRTUAL_ENTRIES % virtual_dict
if sslvirt not in sslvirts:
sslvirts.append(sslvirt)
if value.get('priority') is True:
node_data.append(
'%s:%s %s' % (
node['node_name'],
value['backend_port'],
PRIORITY_ENTRY % {'priority_int': priority}
)
)
priority -= 5
else:
node_data.append(
'%s:%s' % (
node['node_name'],
value['backend_port']
)
)
##########################################
value['nodes'] = ' '.join(node_data)
pool_node = [POOL_NODE['beginning'] % value]
if value.get('priority') is True:
pool_node.append(POOL_NODE['priority'])
pool_node.append(POOL_NODE['end'] % value)
pools.append('%s' % ' '.join(pool_node))
# define the SNAT pool address
snat_pool_adds = user_args.get('snat_pool_address')
if snat_pool_adds is None:
container_cidr = inventory_json['all']['vars']['container_cidr']
network = netaddr.IPNetwork(container_cidr)
snat_pool_adds = str(network[15])
snat_pool_addresses = ' '.join(snat_pool_adds.split(','))
snat_pool = '%s\n' % SNAT_POOL % {
'snat_pool_addresses': snat_pool_addresses
}
snat_translations = []
for snat_ip in snat_pool_adds.split(","):
snat_translations.append( SNAT_IDLE % snat_ip)
script = [
'#!/usr/bin/bash\n',
r'### F5 Build Script -- Liberty ###',
r'### CREATE RPC PARTITION ###',
'create auth partition %s\n' % PART,
r'### SET DISPLAY PORT NUMBERS ###',
'modify cli global-settings service number\n',
snat_pool
]
script.extend(['%s' % i for i in snat_translations])
script.extend(['\n### CREATE MONITORS ###'])
script.extend(['%s' % i % user_args for i in MONITORS])
script.extend(['%s' % i for i in commands])
script.extend(['### CREATE PERSISTENCE PROFILES ###'])
script.extend(['%s' % i % user_args for i in PERSISTANCE])
script.extend(['### CREATE NODES ###'])
script.extend(['%s' % i % user_args for i in sorted(nodes)])
script.extend(['\n### CREATE POOLS ###'])
script.extend(pools)
script.extend(['\n### CREATE VIRTUAL SERVERS ###'])
script.extend(virts)
script.extend(['\n### CREATE PUBLIC SSL OFFLOADED VIRTUAL SERVERS ###'])
script.extend(sslvirts)
script.extend(['\n### CREATE PUBLIC SSL PASS-THROUGH VIRTUAL SERVERS ###'])
script.extend(pubvirts)
if user_args['sec_host_network']:
hostnet = netaddr.IPNetwork(user_args['sec_host_network'])
if not user_args['sec_public_vlan_name']:
raise SystemExit('Please set the [ --sec-public-vlan-name ] value')
script.append(
SEC_HOSTNET_VIRTUAL_ENTRIES % {
'sec_host_net': str(hostnet.ip),
'sec_host_netmask': str(hostnet.netmask),
'sec_public_vlan_name': user_args['sec_public_vlan_name']
}
)
if user_args['sec_container_network']:
containernet = netaddr.IPNetwork(user_args['sec_container_network'])
script.append(
SEC_CONTAINER_VIRTUAL_ENTRIES % {
'sec_container_net': str(containernet.ip),
'sec_container_netmask': str(containernet.netmask)
}
)
script.extend(afmrules)
if user_args['afm']:
if not user_args['ssl_public_ip']:
raise SystemExit('Please set the [ --ssl_public_ip ] value')
if not user_args['sec_host_network']:
raise SystemExit('Please set the [ --sec_host_network ] value')
script.append(
SEC_AFM_RULES % {
'ssl_public_ip': user_args['ssl_public_ip'],
'private_ip': lb_vip_address,
'sec_host_net': user_args['sec_host_network']
}
)
script.extend(['%s\n' % i for i in END_COMMANDS])
if user_args['print']:
for i in script:
print(i)
with open(user_args['export'], 'w+') as f:
f.writelines("\n".join(script))
if __name__ == "__main__":
main()
| {
"content_hash": "f0466a73191264d6fd590204ac51bf42",
"timestamp": "",
"source": "github",
"line_count": 805,
"max_line_length": 222,
"avg_line_length": 37.017391304347825,
"alnum_prop": 0.526259270445317,
"repo_name": "busterswt/rpc-openstack",
"id": "5436ac18d4ed7651f709d7ae94a03c159f79966c",
"size": "30585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/f5-config.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "2319"
},
{
"name": "Python",
"bytes": "126966"
},
{
"name": "Shell",
"bytes": "20131"
}
],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Integration'] , ['Lag1Trend'] , ['Seasonal_WeekOfYear'] , ['NoAR'] ); | {
"content_hash": "c44657f9fb22a6c7a74e2038c3b5e38a",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 92,
"avg_line_length": 41.25,
"alnum_prop": 0.7212121212121212,
"repo_name": "antoinecarme/pyaf",
"id": "9219a2df0197e4b8f4a0c24b0c6da774ea8f7f06",
"size": "165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_Lag1Trend_Seasonal_WeekOfYear_NoAR.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
import os
# Set the number of the group to scrape here
group_number = 1
# Set your Chrome local storage directory here if in a non-standard
# location
chrome_local_storage_db = os.environ['HOME'] + '/.config/google-chrome/Default/Local Storage/https_www.facebook.com_0.localstorage'
| {
"content_hash": "9da805fbf741221c06373f1fa9ecdcca",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 131,
"avg_line_length": 35.625,
"alnum_prop": 0.7649122807017544,
"repo_name": "amdunn/intern_map",
"id": "819c68e66cb9cd10fb517cf3df813d4c36c3934a",
"size": "285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/get_where/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1076"
},
{
"name": "Python",
"bytes": "5222"
}
],
"symlink_target": ""
} |
from xml.dom.minidom import parse
import xml.dom.minidom
import os
import json
import codecs
## This scirpt parses every .tei.xml file
## TEI annotations data file
outputfile_path = "/home/jaydeep/599hw2/tei_json"
list_of_authors = "/home/jaydeep/599hw2/list_of_authors"
tei_file_dir = "/media/jaydeep/mySpace/spring2016/599/out/"
#tei_file_dir = "/home/jaydeep/src/grobid/out/"
feed = []
f1 = codecs.open(list_of_authors,"a",encoding='utf8')
with open(outputfile_path, 'w') as feedsjson:
for i in os.listdir(tei_file_dir):
# Open XML document using minidom parser
if i.endswith(".tei.xml"):
DOMTree = xml.dom.minidom.parse(tei_file_dir+''+i)
collection = DOMTree.documentElement
entry = {"authors":[],"publication_year":"","Affiliations":[],"title":""}
# Get all the movies in the collection
analytics = collection.getElementsByTagName("analytic")
analytic = analytics[0]
authors = analytic.getElementsByTagName("author")
if len(authors) != 0:
for author in authors:
if len(author.getElementsByTagName("forename")) != 0:
firstname = author.getElementsByTagName("forename")[0].childNodes[0].data
if len(author.getElementsByTagName("surname")) != 0:
lastname = author.getElementsByTagName("surname")[0].childNodes[0].data
afflen = author.getElementsByTagName("orgName")
affiliation = ''
if len(afflen) != 0:
for i in range(0,len(afflen)):
affiliation = affiliation + ',' +author.getElementsByTagName("orgName")[i].childNodes[0].data
name = firstname + " " + lastname
entry["authors"].append(name)
f1.write(name + "\n")
entry["Affiliations"].append(affiliation.lstrip(","))
if len(analytic.getElementsByTagName("title")) != 0:
topic = analytic.getElementsByTagName("title")[0].childNodes[0].data
entry["title"] = topic
imprints = analytic.getElementsByTagName("imprint")
if len(imprints) != 0:
imprint = imprints[0]
dates = imprint.getElementsByTagName("date")
if len(dates) != 0:
date = dates[0].getAttribute("when")
entry["publication_year"] = date
feed.append(entry)
json.dump(feed,feedsjson)
f1.close()
| {
"content_hash": "7b6d2d8c1493d11420b4939c5136cdbf",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 100,
"avg_line_length": 31.681159420289855,
"alnum_prop": 0.6788655077767612,
"repo_name": "jdramani/599-2",
"id": "8e57c30a0c965995c21f6a8380214b2cba6bf356",
"size": "2186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "q5/tei_xml_parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "41614"
},
{
"name": "Java",
"bytes": "20541"
},
{
"name": "Python",
"bytes": "51495"
},
{
"name": "Shell",
"bytes": "342"
}
],
"symlink_target": ""
} |
from typing import List
from datetime import datetime
from github.Issue import Issue
from common import Common
_PYTHON_OWNER = {'msyyc', 'Wzb123456789'}
_PYTHON_REPO = 'Azure/azure-sdk-for-python'
_FILE_OUT_NAME_PYTHON = 'sdk_issue_python.md'
class Python(Common):
def collect_open_issues(self) -> List[Issue]:
open_issues = super().collect_open_issues()
# Skip issue created by owners
filtered_issues = [i for i in open_issues if i.user.login not in self.language_owner]
return filtered_issues
def judge_status(self, issue: Issue) -> str:
bot_advice = super().judge_status(issue)
# Prompt to add `issue-addressed` tag if customer has not replied > 7 days
issue_labels = [label.name for label in issue.labels]
if not bot_advice and 'issue-addressed' not in issue_labels and 'needs-author-feedback' not in issue_labels:
if (datetime.today() - list(issue.get_comments())[-1].updated_at).days > 7:
return 'no reply > 7'
return bot_advice
def python_process() -> None:
instance = Python(_PYTHON_OWNER, _PYTHON_REPO, _FILE_OUT_NAME_PYTHON)
instance.run()
| {
"content_hash": "06db22a295966e3457e4e1f38f140085",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 116,
"avg_line_length": 36.6875,
"alnum_prop": 0.6669505962521295,
"repo_name": "Azure/azure-sdk-for-python",
"id": "779d5859cd66bb7a6666d2abf92f8ff27d62863b",
"size": "1174",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scripts/issue_helper/python.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.tasks.jvm_tool_task_mixin import JvmToolTaskMixin
from pants.base.exceptions import TaskError
from pants.init.subprocess import Subprocess
from pants.java import util
from pants.java.executor import SubprocessExecutor
from pants.java.jar.jar_dependency import JarDependency
from pants.java.nailgun_executor import NailgunExecutor, NailgunProcessGroup
from pants.task.task import Task, TaskBase
class NailgunTaskBase(JvmToolTaskMixin, TaskBase):
ID_PREFIX = 'ng'
@classmethod
def register_options(cls, register):
super(NailgunTaskBase, cls).register_options(register)
register('--use-nailgun', type=bool, default=True,
help='Use nailgun to make repeated invocations of this task quicker.')
register('--nailgun-timeout-seconds', advanced=True, default=10, type=float,
help='Timeout (secs) for nailgun startup.')
register('--nailgun-connect-attempts', advanced=True, default=5, type=int,
help='Max attempts for nailgun connects.')
cls.register_jvm_tool(register,
'nailgun-server',
classpath=[
JarDependency(org='com.martiansoftware',
name='nailgun-server',
rev='0.9.1'),
])
@classmethod
def subsystem_dependencies(cls):
return super(NailgunTaskBase, cls).subsystem_dependencies() + (Subprocess.Factory,)
def __init__(self, *args, **kwargs):
"""
:API: public
"""
super(NailgunTaskBase, self).__init__(*args, **kwargs)
id_tuple = (self.ID_PREFIX, self.__class__.__name__)
self._identity = '_'.join(id_tuple)
self._executor_workdir = os.path.join(self.context.options.for_global_scope().pants_workdir,
*id_tuple)
def create_java_executor(self):
"""Create java executor that uses this task's ng daemon, if allowed.
Call only in execute() or later. TODO: Enforce this.
"""
if self.get_options().use_nailgun:
classpath = os.pathsep.join(self.tool_classpath('nailgun-server'))
return NailgunExecutor(self._identity,
self._executor_workdir,
classpath,
self.dist,
connect_timeout=self.get_options().nailgun_timeout_seconds,
connect_attempts=self.get_options().nailgun_connect_attempts)
else:
return SubprocessExecutor(self.dist)
def runjava(self, classpath, main, jvm_options=None, args=None, workunit_name=None,
workunit_labels=None, workunit_log_config=None):
"""Runs the java main using the given classpath and args.
If --no-use-nailgun is specified then the java main is run in a freshly spawned subprocess,
otherwise a persistent nailgun server dedicated to this Task subclass is used to speed up
amortized run times.
:API: public
"""
executor = self.create_java_executor()
# Creating synthetic jar to work around system arg length limit is not necessary
# when `NailgunExecutor` is used because args are passed through socket, therefore turning off
# creating synthetic jar if nailgun is used.
create_synthetic_jar = not self.get_options().use_nailgun
try:
return util.execute_java(classpath=classpath,
main=main,
jvm_options=jvm_options,
args=args,
executor=executor,
workunit_factory=self.context.new_workunit,
workunit_name=workunit_name,
workunit_labels=workunit_labels,
workunit_log_config=workunit_log_config,
create_synthetic_jar=create_synthetic_jar,
synthetic_jar_dir=self._executor_workdir)
except executor.Error as e:
raise TaskError(e)
# TODO(John Sirois): This just prevents ripple - maybe inline
class NailgunTask(NailgunTaskBase, Task):
"""
:API: public
"""
pass
class NailgunKillall(Task):
"""Kill running nailgun servers."""
@classmethod
def register_options(cls, register):
super(NailgunKillall, cls).register_options(register)
register('--everywhere', type=bool,
help='Kill all nailguns servers launched by pants for all workspaces on the system.')
def execute(self):
NailgunProcessGroup().killall(everywhere=self.get_options().everywhere)
| {
"content_hash": "f813bf571a26acc25a927cf95dd6ff53",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 98,
"avg_line_length": 40.76271186440678,
"alnum_prop": 0.620997920997921,
"repo_name": "15Dkatz/pants",
"id": "8160b8489fc6b16df1607ba9e5cf87fb43d51036",
"size": "4957",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/jvm/tasks/nailgun_task.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1805"
},
{
"name": "HTML",
"bytes": "79866"
},
{
"name": "Java",
"bytes": "481460"
},
{
"name": "JavaScript",
"bytes": "35417"
},
{
"name": "Python",
"bytes": "5884798"
},
{
"name": "Rust",
"bytes": "212512"
},
{
"name": "Scala",
"bytes": "76124"
},
{
"name": "Shell",
"bytes": "67399"
},
{
"name": "Thrift",
"bytes": "2795"
}
],
"symlink_target": ""
} |
"""Sweep module."""
def get_experiment(exp_name, c, h):
"""Returns the parameter exploration for `exp_name`."""
del exp_name, c
return h.product(
[
h.sweep("GANManager.seed", [0]),
],
name="_gin")
| {
"content_hash": "1f69896cb8be0dcfff4cf9cc0394b156",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 57,
"avg_line_length": 21.181818181818183,
"alnum_prop": 0.5536480686695279,
"repo_name": "google-research/se3ds",
"id": "7bde4072fb7ad56036ebca6033d161daa5600b8a",
"size": "828",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "configs/highres/sweep.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "9133325"
},
{
"name": "Python",
"bytes": "266036"
},
{
"name": "Shell",
"bytes": "385"
}
],
"symlink_target": ""
} |
import signal
import socket
import sys
import time
from zktraffic import __version__
from zktraffic.endpoints.stats_server import StatsServer
from zktraffic.base.process import ProcessOptions
from twitter.common import app, log
from twitter.common.http import HttpServer
from twitter.common.http.diagnostics import DiagnosticsEndpoints
def setup():
app.add_option("--iface",
dest="iface",
metavar="IFACE",
default="eth0",
help="interface to capture packets from")
app.add_option("--http-port",
dest="http_port",
metavar="HTTPPORT",
type=int,
default=7070,
help="listen port for http endpoints")
app.add_option("--http-address",
dest="http_addr",
metavar="HTTPADDR",
type=str,
default=socket.gethostname(),
help="listen address for http endpoints")
app.add_option("--zookeeper-port",
type=int,
default=2181,
help="ZK's client port (from which to sniff)")
app.add_option("--aggregation-depth",
dest="aggregation_depth",
type=int,
default=0,
help="aggregate paths up to a certain depth")
app.add_option("--max-results",
dest="max_results",
type=int,
default=10,
help="top number of results to be exported")
app.add_option("--refresh-time",
dest="refresh_time",
type=int,
default=0,
help="refresh time in the generated html")
app.add_option("--niceness",
dest="niceness",
type=int,
default=0,
help="set the niceness")
app.add_option("--set-cpu-affinity",
dest="cpu_affinity",
metavar="CPU#[,CPU#]",
type=str,
default=None,
help="A comma-separated list of CPU cores to pin this process to")
app.add_option("--max-queued-requests",
type=int,
default=400000,
help="max queued requests")
app.add_option("--max-queued-replies",
type=int,
default=400000,
help="max queued replies")
app.add_option("--max-queued-events",
type=int,
default=400000,
help="max queued events")
app.add_option('--version', default=False, action='store_true')
class Server(HttpServer):
pass
def main(_, opts):
if opts.version:
sys.stdout.write("%s\n" % __version__)
sys.exit(0)
stats = StatsServer(opts.iface,
opts.zookeeper_port,
opts.aggregation_depth,
opts.max_results,
opts.max_queued_requests,
opts.max_queued_replies,
opts.max_queued_events)
log.info("Starting with opts: %s" % (opts))
signal.signal(signal.SIGINT, signal.SIG_DFL)
process = ProcessOptions()
if opts.niceness >= 0:
process.set_niceness(opts.niceness)
if opts.cpu_affinity:
process.set_cpu_affinity(opts.cpu_affinity)
server = Server()
server.mount_routes(DiagnosticsEndpoints())
server.mount_routes(stats)
server.run(opts.http_addr, opts.http_port)
while True:
time.sleep(10)
if __name__ == '__main__':
setup()
app.main()
| {
"content_hash": "7b3556265dd061848d18e801bcd6df20",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 83,
"avg_line_length": 30.134453781512605,
"alnum_prop": 0.534021193530396,
"repo_name": "fengshao0907/zktraffic",
"id": "46e10255813ee96e1740d7603af5745f5dab6c8b",
"size": "4487",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zktraffic/cli/stats_daemon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "202780"
}
],
"symlink_target": ""
} |
from typing import Any, Callable, Dict, Iterable, List, Optional, TypeVar, Union, cast
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/caches")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_list_by_resource_group_request(
resource_group_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_delete_request_initial(
resource_group_name: str,
cache_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cacheName": _SERIALIZER.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_get_request(
resource_group_name: str,
cache_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cacheName": _SERIALIZER.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_create_or_update_request_initial(
resource_group_name: str,
subscription_id: str,
cache_name: str,
*,
json: Optional[_models.Cache] = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"cacheName": _SERIALIZER.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
if content_type is not None:
_headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=_url,
params=_params,
headers=_headers,
json=json,
content=content,
**kwargs
)
def build_update_request(
resource_group_name: str,
subscription_id: str,
cache_name: str,
*,
json: Optional[_models.Cache] = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"cacheName": _SERIALIZER.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
if content_type is not None:
_headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=_url,
params=_params,
headers=_headers,
json=json,
content=content,
**kwargs
)
def build_debug_info_request_initial(
resource_group_name: str,
subscription_id: str,
cache_name: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/debugInfo") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"cacheName": _SERIALIZER.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_flush_request_initial(
resource_group_name: str,
subscription_id: str,
cache_name: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/flush") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"cacheName": _SERIALIZER.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_start_request_initial(
resource_group_name: str,
subscription_id: str,
cache_name: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/start") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"cacheName": _SERIALIZER.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_stop_request_initial(
resource_group_name: str,
subscription_id: str,
cache_name: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/stop") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"cacheName": _SERIALIZER.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_start_priming_job_request_initial(
resource_group_name: str,
subscription_id: str,
cache_name: str,
*,
json: Optional[_models.PrimingJob] = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/startPrimingJob") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"cacheName": _SERIALIZER.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
if content_type is not None:
_headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_params,
headers=_headers,
json=json,
content=content,
**kwargs
)
def build_stop_priming_job_request_initial(
resource_group_name: str,
cache_name: str,
subscription_id: str,
*,
json: Optional[_models.PrimingJobIdParameter] = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/stopPrimingJob") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cacheName": _SERIALIZER.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
if content_type is not None:
_headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_params,
headers=_headers,
json=json,
content=content,
**kwargs
)
def build_pause_priming_job_request_initial(
resource_group_name: str,
cache_name: str,
subscription_id: str,
*,
json: Optional[_models.PrimingJobIdParameter] = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/pausePrimingJob") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cacheName": _SERIALIZER.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
if content_type is not None:
_headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_params,
headers=_headers,
json=json,
content=content,
**kwargs
)
def build_resume_priming_job_request_initial(
resource_group_name: str,
cache_name: str,
subscription_id: str,
*,
json: Optional[_models.PrimingJobIdParameter] = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/resumePrimingJob") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cacheName": _SERIALIZER.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
if content_type is not None:
_headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_params,
headers=_headers,
json=json,
content=content,
**kwargs
)
def build_upgrade_firmware_request_initial(
resource_group_name: str,
subscription_id: str,
cache_name: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/upgrade") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"cacheName": _SERIALIZER.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_space_allocation_request_initial(
resource_group_name: str,
subscription_id: str,
cache_name: str,
*,
json: Optional[List[_models.StorageTargetSpaceAllocation]] = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/spaceAllocation") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"cacheName": _SERIALIZER.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
if content_type is not None:
_headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_params,
headers=_headers,
json=json,
content=content,
**kwargs
)
class CachesOperations: # pylint: disable=too-many-public-methods
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.storagecache.StorageCacheManagementClient`'s
:attr:`caches` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable[_models.CachesListResult]:
"""Returns all Caches the user has access to under a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CachesListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storagecache.models.CachesListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.CachesListResult]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("CachesListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/caches"} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable[_models.CachesListResult]:
"""Returns all Caches the user has access to under a resource group.
:param resource_group_name: Target resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CachesListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storagecache.models.CachesListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.CachesListResult]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_resource_group.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("CachesListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches"} # type: ignore
def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cache_name: str,
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_delete_request_initial(
resource_group_name=resource_group_name,
cache_name=cache_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}"} # type: ignore
@distributed_trace
def begin_delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cache_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Schedules a Cache for deletion.
:param resource_group_name: Target resource group.
:type resource_group_name: str
:param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
from the [-0-9a-zA-Z_] char class.
:type cache_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
cache_name=cache_name,
api_version=api_version,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(
lro_delay,
**kwargs
)) # type: PollingMethod
elif polling is False: polling_method = cast(PollingMethod, NoPolling())
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}"} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
cache_name: str,
**kwargs: Any
) -> _models.Cache:
"""Returns a Cache.
:param resource_group_name: Target resource group.
:type resource_group_name: str
:param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
from the [-0-9a-zA-Z_] char class.
:type cache_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Cache, or the result of cls(response)
:rtype: ~azure.mgmt.storagecache.models.Cache
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.Cache]
request = build_get_request(
resource_group_name=resource_group_name,
cache_name=cache_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Cache', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}"} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
cache_name: str,
cache: Optional[_models.Cache] = None,
**kwargs: Any
) -> Optional[_models.Cache]:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[_models.Cache]]
if cache is not None:
_json = self._serialize.body(cache, 'Cache')
else:
_json = None
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
cache_name=cache_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Cache', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Cache', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}"} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
cache_name: str,
cache: Optional[_models.Cache] = None,
**kwargs: Any
) -> LROPoller[_models.Cache]:
"""Create or update a Cache.
:param resource_group_name: Target resource group.
:type resource_group_name: str
:param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
from the [-0-9a-zA-Z_] char class.
:type cache_name: str
:param cache: Object containing the user-selectable properties of the new Cache. If read-only
properties are included, they must match the existing values of those properties. Default value
is None.
:type cache: ~azure.mgmt.storagecache.models.Cache
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Cache or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.storagecache.models.Cache]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.Cache]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
cache_name=cache_name,
cache=cache,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Cache', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(
lro_delay,
**kwargs
)) # type: PollingMethod
elif polling is False: polling_method = cast(PollingMethod, NoPolling())
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}"} # type: ignore
@distributed_trace
def update(
self,
resource_group_name: str,
cache_name: str,
cache: Optional[_models.Cache] = None,
**kwargs: Any
) -> _models.Cache:
"""Update a Cache instance.
:param resource_group_name: Target resource group.
:type resource_group_name: str
:param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
from the [-0-9a-zA-Z_] char class.
:type cache_name: str
:param cache: Object containing the user-selectable properties of the Cache. If read-only
properties are included, they must match the existing values of those properties. Default value
is None.
:type cache: ~azure.mgmt.storagecache.models.Cache
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Cache, or the result of cls(response)
:rtype: ~azure.mgmt.storagecache.models.Cache
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.Cache]
if cache is not None:
_json = self._serialize.body(cache, 'Cache')
else:
_json = None
request = build_update_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
cache_name=cache_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Cache', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}"} # type: ignore
def _debug_info_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cache_name: str,
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_debug_info_request_initial(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
cache_name=cache_name,
api_version=api_version,
template_url=self._debug_info_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_debug_info_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/debugInfo"} # type: ignore
@distributed_trace
def begin_debug_info( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cache_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Tells a Cache to write generate debug info for support to process.
:param resource_group_name: Target resource group.
:type resource_group_name: str
:param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
from the [-0-9a-zA-Z_] char class.
:type cache_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._debug_info_initial( # type: ignore
resource_group_name=resource_group_name,
cache_name=cache_name,
api_version=api_version,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(
lro_delay,
lro_options={'final-state-via': 'azure-async-operation'},
**kwargs
)) # type: PollingMethod
elif polling is False: polling_method = cast(PollingMethod, NoPolling())
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_debug_info.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/debugInfo"} # type: ignore
def _flush_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cache_name: str,
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_flush_request_initial(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
cache_name=cache_name,
api_version=api_version,
template_url=self._flush_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_flush_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/flush"} # type: ignore
@distributed_trace
def begin_flush( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cache_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Tells a Cache to write all dirty data to the Storage Target(s). During the flush, clients will
see errors returned until the flush is complete.
:param resource_group_name: Target resource group.
:type resource_group_name: str
:param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
from the [-0-9a-zA-Z_] char class.
:type cache_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._flush_initial( # type: ignore
resource_group_name=resource_group_name,
cache_name=cache_name,
api_version=api_version,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(
lro_delay,
lro_options={'final-state-via': 'azure-async-operation'},
**kwargs
)) # type: PollingMethod
elif polling is False: polling_method = cast(PollingMethod, NoPolling())
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_flush.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/flush"} # type: ignore
def _start_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cache_name: str,
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_start_request_initial(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
cache_name=cache_name,
api_version=api_version,
template_url=self._start_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/start"} # type: ignore
@distributed_trace
def begin_start( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cache_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Tells a Stopped state Cache to transition to Active state.
:param resource_group_name: Target resource group.
:type resource_group_name: str
:param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
from the [-0-9a-zA-Z_] char class.
:type cache_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_initial( # type: ignore
resource_group_name=resource_group_name,
cache_name=cache_name,
api_version=api_version,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(
lro_delay,
lro_options={'final-state-via': 'azure-async-operation'},
**kwargs
)) # type: PollingMethod
elif polling is False: polling_method = cast(PollingMethod, NoPolling())
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/start"} # type: ignore
def _stop_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cache_name: str,
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_stop_request_initial(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
cache_name=cache_name,
api_version=api_version,
template_url=self._stop_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/stop"} # type: ignore
@distributed_trace
def begin_stop( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cache_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Tells an Active Cache to transition to Stopped state.
:param resource_group_name: Target resource group.
:type resource_group_name: str
:param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
from the [-0-9a-zA-Z_] char class.
:type cache_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_initial( # type: ignore
resource_group_name=resource_group_name,
cache_name=cache_name,
api_version=api_version,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(
lro_delay,
lro_options={'final-state-via': 'azure-async-operation'},
**kwargs
)) # type: PollingMethod
elif polling is False: polling_method = cast(PollingMethod, NoPolling())
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/stop"} # type: ignore
def _start_priming_job_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cache_name: str,
primingjob: Optional[_models.PrimingJob] = None,
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[None]
if primingjob is not None:
_json = self._serialize.body(primingjob, 'PrimingJob')
else:
_json = None
request = build_start_priming_job_request_initial(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
cache_name=cache_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._start_priming_job_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
if cls:
return cls(pipeline_response, None, response_headers)
_start_priming_job_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/startPrimingJob"} # type: ignore
@distributed_trace
def begin_start_priming_job( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cache_name: str,
primingjob: Optional[_models.PrimingJob] = None,
**kwargs: Any
) -> LROPoller[None]:
"""Create a priming job. This operation is only allowed when the cache is healthy.
:param resource_group_name: Target resource group.
:type resource_group_name: str
:param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
from the [-0-9a-zA-Z_] char class.
:type cache_name: str
:param primingjob: Object containing the definition of a priming job. Default value is None.
:type primingjob: ~azure.mgmt.storagecache.models.PrimingJob
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[None]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_priming_job_initial( # type: ignore
resource_group_name=resource_group_name,
cache_name=cache_name,
primingjob=primingjob,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(
lro_delay,
lro_options={'final-state-via': 'azure-async-operation'},
**kwargs
)) # type: PollingMethod
elif polling is False: polling_method = cast(PollingMethod, NoPolling())
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_priming_job.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/startPrimingJob"} # type: ignore
def _stop_priming_job_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cache_name: str,
priming_job_id: Optional[_models.PrimingJobIdParameter] = None,
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[None]
if priming_job_id is not None:
_json = self._serialize.body(priming_job_id, 'PrimingJobIdParameter')
else:
_json = None
request = build_stop_priming_job_request_initial(
resource_group_name=resource_group_name,
cache_name=cache_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._stop_priming_job_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 202:
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
if cls:
return cls(pipeline_response, None, response_headers)
_stop_priming_job_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/stopPrimingJob"} # type: ignore
@distributed_trace
def begin_stop_priming_job( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cache_name: str,
priming_job_id: Optional[_models.PrimingJobIdParameter] = None,
**kwargs: Any
) -> LROPoller[None]:
"""Schedule a priming job for deletion.
:param resource_group_name: Target resource group.
:type resource_group_name: str
:param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
from the [-0-9a-zA-Z_] char class.
:type cache_name: str
:param priming_job_id: Object containing the priming job ID. Default value is None.
:type priming_job_id: ~azure.mgmt.storagecache.models.PrimingJobIdParameter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[None]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_priming_job_initial( # type: ignore
resource_group_name=resource_group_name,
cache_name=cache_name,
priming_job_id=priming_job_id,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(
lro_delay,
lro_options={'final-state-via': 'azure-async-operation'},
**kwargs
)) # type: PollingMethod
elif polling is False: polling_method = cast(PollingMethod, NoPolling())
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop_priming_job.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/stopPrimingJob"} # type: ignore
def _pause_priming_job_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cache_name: str,
priming_job_id: Optional[_models.PrimingJobIdParameter] = None,
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[None]
if priming_job_id is not None:
_json = self._serialize.body(priming_job_id, 'PrimingJobIdParameter')
else:
_json = None
request = build_pause_priming_job_request_initial(
resource_group_name=resource_group_name,
cache_name=cache_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._pause_priming_job_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 202:
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
if cls:
return cls(pipeline_response, None, response_headers)
_pause_priming_job_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/pausePrimingJob"} # type: ignore
@distributed_trace
def begin_pause_priming_job( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cache_name: str,
priming_job_id: Optional[_models.PrimingJobIdParameter] = None,
**kwargs: Any
) -> LROPoller[None]:
"""Schedule a priming job to be paused.
:param resource_group_name: Target resource group.
:type resource_group_name: str
:param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
from the [-0-9a-zA-Z_] char class.
:type cache_name: str
:param priming_job_id: Object containing the priming job ID. Default value is None.
:type priming_job_id: ~azure.mgmt.storagecache.models.PrimingJobIdParameter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[None]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._pause_priming_job_initial( # type: ignore
resource_group_name=resource_group_name,
cache_name=cache_name,
priming_job_id=priming_job_id,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(
lro_delay,
lro_options={'final-state-via': 'azure-async-operation'},
**kwargs
)) # type: PollingMethod
elif polling is False: polling_method = cast(PollingMethod, NoPolling())
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_pause_priming_job.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/pausePrimingJob"} # type: ignore
def _resume_priming_job_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cache_name: str,
priming_job_id: Optional[_models.PrimingJobIdParameter] = None,
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[None]
if priming_job_id is not None:
_json = self._serialize.body(priming_job_id, 'PrimingJobIdParameter')
else:
_json = None
request = build_resume_priming_job_request_initial(
resource_group_name=resource_group_name,
cache_name=cache_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._resume_priming_job_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 202:
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
if cls:
return cls(pipeline_response, None, response_headers)
_resume_priming_job_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/resumePrimingJob"} # type: ignore
@distributed_trace
def begin_resume_priming_job( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cache_name: str,
priming_job_id: Optional[_models.PrimingJobIdParameter] = None,
**kwargs: Any
) -> LROPoller[None]:
"""Resumes a paused priming job.
:param resource_group_name: Target resource group.
:type resource_group_name: str
:param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
from the [-0-9a-zA-Z_] char class.
:type cache_name: str
:param priming_job_id: Object containing the priming job ID. Default value is None.
:type priming_job_id: ~azure.mgmt.storagecache.models.PrimingJobIdParameter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[None]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._resume_priming_job_initial( # type: ignore
resource_group_name=resource_group_name,
cache_name=cache_name,
priming_job_id=priming_job_id,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(
lro_delay,
lro_options={'final-state-via': 'azure-async-operation'},
**kwargs
)) # type: PollingMethod
elif polling is False: polling_method = cast(PollingMethod, NoPolling())
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_resume_priming_job.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/resumePrimingJob"} # type: ignore
def _upgrade_firmware_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cache_name: str,
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_upgrade_firmware_request_initial(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
cache_name=cache_name,
api_version=api_version,
template_url=self._upgrade_firmware_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [201, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_upgrade_firmware_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/upgrade"} # type: ignore
@distributed_trace
def begin_upgrade_firmware( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cache_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Upgrade a Cache's firmware if a new version is available. Otherwise, this operation has no
effect.
:param resource_group_name: Target resource group.
:type resource_group_name: str
:param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
from the [-0-9a-zA-Z_] char class.
:type cache_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._upgrade_firmware_initial( # type: ignore
resource_group_name=resource_group_name,
cache_name=cache_name,
api_version=api_version,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(
lro_delay,
lro_options={'final-state-via': 'azure-async-operation'},
**kwargs
)) # type: PollingMethod
elif polling is False: polling_method = cast(PollingMethod, NoPolling())
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_upgrade_firmware.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/upgrade"} # type: ignore
def _space_allocation_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cache_name: str,
space_allocation: Optional[List[_models.StorageTargetSpaceAllocation]] = None,
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[None]
if space_allocation is not None:
_json = self._serialize.body(space_allocation, '[StorageTargetSpaceAllocation]')
else:
_json = None
request = build_space_allocation_request_initial(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
cache_name=cache_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._space_allocation_initial.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
if cls:
return cls(pipeline_response, None, response_headers)
_space_allocation_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/spaceAllocation"} # type: ignore
@distributed_trace
def begin_space_allocation( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cache_name: str,
space_allocation: Optional[List[_models.StorageTargetSpaceAllocation]] = None,
**kwargs: Any
) -> LROPoller[None]:
"""Update cache space allocation.
:param resource_group_name: Target resource group.
:type resource_group_name: str
:param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
from the [-0-9a-zA-Z_] char class.
:type cache_name: str
:param space_allocation: List containing storage target cache space percentage allocations.
Default value is None.
:type space_allocation: list[~azure.mgmt.storagecache.models.StorageTargetSpaceAllocation]
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-05-01")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[None]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._space_allocation_initial( # type: ignore
resource_group_name=resource_group_name,
cache_name=cache_name,
space_allocation=space_allocation,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(
lro_delay,
lro_options={'final-state-via': 'azure-async-operation'},
**kwargs
)) # type: PollingMethod
elif polling is False: polling_method = cast(PollingMethod, NoPolling())
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_space_allocation.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/spaceAllocation"} # type: ignore
| {
"content_hash": "8b04db5402c0ee1b88538f2812ba7598",
"timestamp": "",
"source": "github",
"line_count": 2564,
"max_line_length": 209,
"avg_line_length": 43.26443057722309,
"alnum_prop": 0.6213107365004958,
"repo_name": "Azure/azure-sdk-for-python",
"id": "10e01824c7a3d466f64b2bf9c0793a1d2b0b21fb",
"size": "111430",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/storage/azure-mgmt-storagecache/azure/mgmt/storagecache/operations/_caches_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from django.db import transaction
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.forms.models import modelform_factory
from functools import lru_cache
from restless.dj import DjangoResource
from .forms import DjangoFormMixin
from .permissions import ModelAuthorizationMixin
from .presentors import PresentorResourceMixin
class DjangoModelResource(ModelAuthorizationMixin, DjangoFormMixin, PresentorResourceMixin, DjangoResource):
'''
A restless DjangoResource with ponies
'''
model = None
paginate_by = 50
#TODO filter_by
#these modify autoform
fields = None
exclude_fields = None
#TODO fields, exclude_fields to shape preparer
def get_queryset(self):
queryset = self.model.objects.all()
return self.authorization.process_queryset(queryset)
def get_form_class(self):
if self.form_class:
return self.form_class
#TODO authorization may want to modify our form
return modelform_factory(model=self.model, fields=self.fields, exclude=self.exclude_fields)
def url_for(self, obj):
#TODO i'm sure we can come up with a smarter default
return obj.get_absolute_url()
#@lru_cache
def get_paginator(self):
queryset = self.get_queryset()
return Paginator(queryset, self.request.GET.get('paginate_by', self.paginate_by))
def get_page(self):
paginator = self.get_paginator()
return paginator.page(self.request.GET.get('page', 1))
def list(self):
try:
return self.get_page()
except PageNotAnInteger as exception:
#TODO proper status code?
return self.build_status_response(str(exception), status=400)
except EmptyPage as exception:
#TODO proper status code?
return self.build_status_response(str(exception), status=410)
def detail(self, pk):
try:
return self.get_queryset().get(pk=pk)
except self.model.DoesNotExist as exception:
return self.build_status_response(str(exception), status=404)
@transaction.atomic
def create(self):
form = self.make_form()
if form.is_valid():
obj = form.save()
return obj
#return HttpResponseRedirect(self.url_for(obj), status=303)
else:
return self.build_validation_error(form.errors)
@transaction.atomic
def update(self, pk):
try:
obj = self.get_queryset().get(pk=pk)
except self.model.DoesNotExist:
obj = self.model()
form = self.make_form(instance=obj)
if form.is_valid():
obj = form.save()
return obj
#return HttpResponseRedirect(self.url_for(obj), status=303)
else:
return self.build_validation_error(form.errors)
@transaction.atomic
def delete(self, pk):
self.get_queryset().get(pk=pk).delete()
return None
#return self.build_status_response(None, status=204)#or 410?
@transaction.atomic
def delete_list(self):
pks = self.request.GET.getlist('pk')
return self.get_queryset().filter(pk__in=pks).delete()
#return HttpResponseRedirect('./', status=303)
#TODO
'''
@transaction.atomic
def update_list(self):
pass
'''
| {
"content_hash": "addde163a8c8f9b918b09266419a88e7",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 108,
"avg_line_length": 31.858490566037737,
"alnum_prop": 0.6431744151613858,
"repo_name": "zbyte64/python-restmore",
"id": "1be479ccac3619d8e20b9fc55a3e2de12fb13b5f",
"size": "3377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "restmore/crud.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "74"
},
{
"name": "Python",
"bytes": "28706"
}
],
"symlink_target": ""
} |
import re
import sys
templFilename = "pyCombBLAS.i.templ"
outFilename = "pyCombBLAS.i"
def writeFile(filename, outfile):
print("Including from file " + filename)
input = open(filename, 'r')
shouldPrint = False;
line = input.readline()
while (len(line) > 0):
m = re.match(".*INTERFACE_INCLUDE_([A-Z]*).*", line);
if (m == None):
if (shouldPrint):
outfile.write(line)
else:
s = m.group(1)
if (s == "BEGIN"):
shouldPrint = True
if (s == "END"):
shouldPrint = False
line = input.readline()
if len(sys.argv) == 3:
templFilename = sys.argv[1]
outFilename = sys.argv[2]
else:
print("SWIG interface file maker.\nTakes an interface file template that is complete except for missing class bodies and fills them in from the C++ header files. This means any changes to the header files don't have to be manually copied into the interface file.\n")
print("Usage:")
print("python makei.py templatefile interfacefile\n")
sys.exit(1)
templ = open(templFilename, 'r')
out = open(outFilename, 'w')
line = templ.readline()
while (len(line) > 0):
m = re.match(".*INCLUDE \"([^\"]*)\".*", line);
if (m == None):
out.write(line)
else:
writeFile(m.group(1), out)
line = templ.readline()
| {
"content_hash": "593c8481d304d1096eaf315097b81d1d",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 267,
"avg_line_length": 24.6,
"alnum_prop": 0.6585365853658537,
"repo_name": "harperj/KDTSpecializer",
"id": "8e2dd0269f32851782a362c7247f718336189dcd",
"size": "1248",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kdt/pyCombBLAS/makei.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "975429"
},
{
"name": "C++",
"bytes": "1536020"
},
{
"name": "Matlab",
"bytes": "298905"
},
{
"name": "Objective-C",
"bytes": "2244"
},
{
"name": "Perl",
"bytes": "1442"
},
{
"name": "Python",
"bytes": "612733"
},
{
"name": "Shell",
"bytes": "58704"
}
],
"symlink_target": ""
} |
import copy
import time
from unittest import mock
import uuid
import ddt
from lxml import etree
from oslo_utils import units
import paramiko
import six
from cinder import exception
from cinder import ssh_utils
from cinder.tests.unit import test
from cinder.tests.unit.volume.drivers.netapp.dataontap.client import (
fakes as fake_client)
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp import utils as netapp_utils
CONNECTION_INFO = {'hostname': 'hostname',
'transport_type': 'https',
'port': 443,
'username': 'admin',
'password': 'passw0rd',
'vserver': 'fake_vserver',
'api_trace_pattern': 'fake_regex'}
@ddt.ddt
class NetAppCmodeClientTestCase(test.TestCase):
def setUp(self):
super(NetAppCmodeClientTestCase, self).setUp()
self.mock_object(client_cmode.Client, '_init_ssh_client')
# store the original reference so we can call it later in
# test__get_cluster_nodes_info
self.original_get_cluster_nodes_info = (
client_cmode.Client._get_cluster_nodes_info)
self.mock_object(client_cmode.Client, '_get_cluster_nodes_info',
return_value=fake.HYBRID_SYSTEM_NODES_INFO)
self.mock_object(client_cmode.Client, 'get_ontap_version',
return_value='9.6')
with mock.patch.object(client_cmode.Client,
'get_ontapi_version',
return_value=(1, 20)):
self.client = client_cmode.Client(**CONNECTION_INFO)
self.client.ssh_client = mock.MagicMock()
self.client.connection = mock.MagicMock()
self.connection = self.client.connection
self.vserver = CONNECTION_INFO['vserver']
self.fake_volume = six.text_type(uuid.uuid4())
self.fake_lun = six.text_type(uuid.uuid4())
self.mock_send_request = self.mock_object(
self.client.connection, 'send_request')
def _mock_api_error(self, code='fake'):
return mock.Mock(side_effect=netapp_api.NaApiError(code=code))
def test_has_records(self):
result = self.client._has_records(netapp_api.NaElement(
fake_client.QOS_POLICY_GROUP_GET_ITER_RESPONSE))
self.assertTrue(result)
def test_has_records_not_found(self):
result = self.client._has_records(
netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE))
self.assertFalse(result)
@ddt.data((fake_client.AGGR_GET_ITER_RESPONSE, 2),
(fake_client.NO_RECORDS_RESPONSE, 0))
@ddt.unpack
def test_get_record_count(self, response, expected):
api_response = netapp_api.NaElement(response)
result = self.client._get_record_count(api_response)
self.assertEqual(expected, result)
def test_get_records_count_invalid(self):
api_response = netapp_api.NaElement(
fake_client.INVALID_GET_ITER_RESPONSE_NO_RECORDS)
self.assertRaises(netapp_utils.NetAppDriverException,
self.client._get_record_count,
api_response)
@ddt.data(True, False)
def test_send_iter_request(self, enable_tunneling):
api_responses = [
netapp_api.NaElement(
fake_client.STORAGE_DISK_GET_ITER_RESPONSE_PAGE_1),
netapp_api.NaElement(
fake_client.STORAGE_DISK_GET_ITER_RESPONSE_PAGE_2),
netapp_api.NaElement(
fake_client.STORAGE_DISK_GET_ITER_RESPONSE_PAGE_3),
]
mock_send_request = self.mock_object(
self.client.connection, 'send_request',
side_effect=copy.deepcopy(api_responses))
storage_disk_get_iter_args = {
'desired-attributes': {
'storage-disk-info': {
'disk-name': None,
}
}
}
result = self.client.send_iter_request(
'storage-disk-get-iter', api_args=storage_disk_get_iter_args,
enable_tunneling=enable_tunneling, max_page_length=10)
num_records = result.get_child_content('num-records')
self.assertEqual('28', num_records)
next_tag = result.get_child_content('next-tag')
self.assertEqual('', next_tag)
args1 = copy.deepcopy(storage_disk_get_iter_args)
args1['max-records'] = 10
args2 = copy.deepcopy(storage_disk_get_iter_args)
args2['max-records'] = 10
args2['tag'] = 'next_tag_1'
args3 = copy.deepcopy(storage_disk_get_iter_args)
args3['max-records'] = 10
args3['tag'] = 'next_tag_2'
mock_send_request.assert_has_calls([
mock.call('storage-disk-get-iter', args1,
enable_tunneling=enable_tunneling),
mock.call('storage-disk-get-iter', args2,
enable_tunneling=enable_tunneling),
mock.call('storage-disk-get-iter', args3,
enable_tunneling=enable_tunneling),
])
def test_send_iter_request_single_page(self):
api_response = netapp_api.NaElement(
fake_client.STORAGE_DISK_GET_ITER_RESPONSE)
mock_send_request = self.mock_object(self.client.connection,
'send_request',
return_value=api_response)
storage_disk_get_iter_args = {
'desired-attributes': {
'storage-disk-info': {
'disk-name': None,
}
}
}
result = self.client.send_iter_request(
'storage-disk-get-iter', api_args=storage_disk_get_iter_args,
max_page_length=10)
num_records = result.get_child_content('num-records')
self.assertEqual('4', num_records)
args = copy.deepcopy(storage_disk_get_iter_args)
args['max-records'] = 10
mock_send_request.assert_has_calls([
mock.call('storage-disk-get-iter', args, enable_tunneling=True),
])
def test_send_iter_request_not_found(self):
api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)
mock_send_request = self.mock_object(self.client.connection,
'send_request',
return_value=api_response)
result = self.client.send_iter_request('storage-disk-get-iter')
num_records = result.get_child_content('num-records')
self.assertEqual('0', num_records)
args = {'max-records': client_cmode.DEFAULT_MAX_PAGE_LENGTH}
mock_send_request.assert_has_calls([
mock.call('storage-disk-get-iter', args, enable_tunneling=True),
])
@ddt.data(fake_client.INVALID_GET_ITER_RESPONSE_NO_ATTRIBUTES,
fake_client.INVALID_GET_ITER_RESPONSE_NO_RECORDS)
def test_send_iter_request_invalid(self, fake_response):
api_response = netapp_api.NaElement(fake_response)
self.mock_object(self.client.connection,
'send_request',
return_value=api_response)
self.assertRaises(netapp_utils.NetAppDriverException,
self.client.send_iter_request,
'storage-disk-get-iter')
@ddt.data((fake.AFF_SYSTEM_NODE_GET_ITER_RESPONSE,
fake.AFF_SYSTEM_NODES_INFO),
(fake.FAS_SYSTEM_NODE_GET_ITER_RESPONSE,
fake.FAS_SYSTEM_NODES_INFO),
(fake_client.NO_RECORDS_RESPONSE, []),
(fake.HYBRID_SYSTEM_NODE_GET_ITER_RESPONSE,
fake.HYBRID_SYSTEM_NODES_INFO))
@ddt.unpack
def test__get_cluster_nodes_info(self, response, expected):
client_cmode.Client._get_cluster_nodes_info = (
self.original_get_cluster_nodes_info)
nodes_response = netapp_api.NaElement(response)
self.mock_object(client_cmode.Client, 'send_iter_request',
return_value=nodes_response)
result = self.client._get_cluster_nodes_info()
self.assertEqual(expected, result)
def test_list_vservers(self):
api_response = netapp_api.NaElement(
fake_client.VSERVER_DATA_LIST_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
result = self.client.list_vservers()
vserver_get_iter_args = {
'query': {
'vserver-info': {
'vserver-type': 'data'
}
},
'desired-attributes': {
'vserver-info': {
'vserver-name': None
}
}
}
self.client.send_iter_request.assert_has_calls([
mock.call('vserver-get-iter', vserver_get_iter_args,
enable_tunneling=False)])
self.assertListEqual([fake_client.VSERVER_NAME], result)
def test_list_vservers_node_type(self):
api_response = netapp_api.NaElement(
fake_client.VSERVER_DATA_LIST_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
result = self.client.list_vservers(vserver_type='node')
vserver_get_iter_args = {
'query': {
'vserver-info': {
'vserver-type': 'node'
}
},
'desired-attributes': {
'vserver-info': {
'vserver-name': None
}
}
}
self.client.send_iter_request.assert_has_calls([
mock.call('vserver-get-iter', vserver_get_iter_args,
enable_tunneling=False)])
self.assertListEqual([fake_client.VSERVER_NAME], result)
def test_list_vservers_not_found(self):
api_response = netapp_api.NaElement(
fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client.connection,
'send_request',
return_value=api_response)
result = self.client.list_vservers(vserver_type='data')
self.assertListEqual([], result)
@ddt.data((1, 21), (1, 100), (2, 0))
def test_get_ems_log_destination_vserver(self, ontapi_version):
self.mock_object(self.client,
'get_ontapi_version',
return_value=ontapi_version)
mock_list_vservers = self.mock_object(
self.client,
'list_vservers',
return_value=[fake_client.ADMIN_VSERVER_NAME])
result = self.client._get_ems_log_destination_vserver()
mock_list_vservers.assert_called_once_with(vserver_type='admin')
self.assertEqual(fake_client.ADMIN_VSERVER_NAME, result)
def test_get_ems_log_destination_vserver_legacy(self):
self.mock_object(self.client,
'get_ontapi_version',
return_value=(1, 15))
mock_list_vservers = self.mock_object(
self.client,
'list_vservers',
return_value=[fake_client.NODE_VSERVER_NAME])
result = self.client._get_ems_log_destination_vserver()
mock_list_vservers.assert_called_once_with(vserver_type='node')
self.assertEqual(fake_client.NODE_VSERVER_NAME, result)
def test_get_ems_log_destination_no_cluster_creds(self):
self.mock_object(self.client,
'get_ontapi_version',
return_value=(1, 21))
mock_list_vservers = self.mock_object(
self.client,
'list_vservers',
side_effect=[[], [fake_client.VSERVER_NAME]])
result = self.client._get_ems_log_destination_vserver()
mock_list_vservers.assert_has_calls([
mock.call(vserver_type='admin'),
mock.call(vserver_type='data')])
self.assertEqual(fake_client.VSERVER_NAME, result)
def test_get_ems_log_destination_vserver_not_found(self):
self.mock_object(self.client,
'get_ontapi_version',
return_value=(1, 21))
mock_list_vservers = self.mock_object(
self.client,
'list_vservers',
return_value=[])
self.assertRaises(exception.NotFound,
self.client._get_ems_log_destination_vserver)
mock_list_vservers.assert_has_calls([
mock.call(vserver_type='admin'),
mock.call(vserver_type='data'),
mock.call(vserver_type='node')])
def test_get_iscsi_target_details_no_targets(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list></attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
target_list = self.client.get_iscsi_target_details()
self.assertEqual([], target_list)
def test_get_iscsi_target_details(self):
expected_target = {
"address": "127.0.0.1",
"port": "1337",
"interface-enabled": "true",
"tpgroup-tag": "7777",
}
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list>
<iscsi-interface-list-entry-info>
<ip-address>%(address)s</ip-address>
<ip-port>%(port)s</ip-port>
<is-interface-enabled>%(interface-enabled)s</is-interface-enabled>
<tpgroup-tag>%(tpgroup-tag)s</tpgroup-tag>
</iscsi-interface-list-entry-info>
</attributes-list>
</results>""" % expected_target))
self.connection.invoke_successfully.return_value = response
target_list = self.client.get_iscsi_target_details()
self.assertEqual([expected_target], target_list)
def test_get_iscsi_service_details_with_no_iscsi_service(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>0</num-records>
</results>"""))
self.connection.invoke_successfully.return_value = response
iqn = self.client.get_iscsi_service_details()
self.assertIsNone(iqn)
def test_get_iscsi_service_details(self):
expected_iqn = 'iqn.1998-01.org.openstack.iscsi:name1'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list>
<iscsi-service-info>
<node-name>%s</node-name>
</iscsi-service-info>
</attributes-list>
</results>""" % expected_iqn))
self.connection.invoke_successfully.return_value = response
iqn = self.client.get_iscsi_service_details()
self.assertEqual(expected_iqn, iqn)
def test_get_lun_list(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>2</num-records>
<attributes-list>
<lun-info>
</lun-info>
<lun-info>
</lun-info>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
luns = self.client.get_lun_list()
self.assertEqual(2, len(luns))
def test_get_lun_list_with_multiple_pages(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>2</num-records>
<attributes-list>
<lun-info> </lun-info>
<lun-info> </lun-info>
</attributes-list>
<next-tag>fake-next</next-tag>
</results>"""))
response_2 = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>2</num-records>
<attributes-list>
<lun-info> </lun-info>
<lun-info> </lun-info>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.side_effect = [response,
response_2]
luns = self.client.get_lun_list()
self.assertEqual(4, len(luns))
def test_get_lun_map_no_luns_mapped(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>0</num-records>
<attributes-list></attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
lun_map = self.client.get_lun_map(path)
self.assertEqual([], lun_map)
def test_get_lun_map(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
expected_lun_map = {
"initiator-group": "igroup",
"lun-id": "1337",
"vserver": "vserver",
}
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list>
<lun-map-info>
<lun-id>%(lun-id)s</lun-id>
<initiator-group>%(initiator-group)s</initiator-group>
<vserver>%(vserver)s</vserver>
</lun-map-info>
</attributes-list>
</results>""" % expected_lun_map))
self.connection.invoke_successfully.return_value = response
lun_map = self.client.get_lun_map(path)
self.assertEqual([expected_lun_map], lun_map)
def test_get_lun_map_multiple_pages(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
expected_lun_map = {
"initiator-group": "igroup",
"lun-id": "1337",
"vserver": "vserver",
}
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list>
<lun-map-info>
<lun-id>%(lun-id)s</lun-id>
<initiator-group>%(initiator-group)s</initiator-group>
<vserver>%(vserver)s</vserver>
</lun-map-info>
</attributes-list>
<next-tag>blah</next-tag>
</results>""" % expected_lun_map))
response_2 = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list>
<lun-map-info>
<lun-id>%(lun-id)s</lun-id>
<initiator-group>%(initiator-group)s</initiator-group>
<vserver>%(vserver)s</vserver>
</lun-map-info>
</attributes-list>
</results>""" % expected_lun_map))
self.connection.invoke_successfully.side_effect = [response,
response_2]
lun_map = self.client.get_lun_map(path)
self.assertEqual([expected_lun_map, expected_lun_map], lun_map)
def test_get_igroup_by_initiator_none_found(self):
initiator = 'initiator'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>0</num-records>
<attributes-list></attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
igroup = self.client.get_igroup_by_initiators([initiator])
self.assertEqual([], igroup)
def test_get_igroup_by_initiators(self):
initiators = ['11:22:33:44:55:66:77:88']
expected_igroup = {
'initiator-group-os-type': 'default',
'initiator-group-type': 'fcp',
'initiator-group-name': 'openstack-igroup1',
}
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<attributes-list>
<initiator-group-info>
<initiator-group-alua-enabled>true</initiator-group-alua-enabled>
<initiator-group-name>%(initiator-group-name)s</initiator-group-name>
<initiator-group-os-type>default</initiator-group-os-type>
<initiator-group-throttle-borrow>false</initiator-group-throttle-borrow>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-type>%(initiator-group-type)s</initiator-group-type>
<initiator-group-use-partner>true</initiator-group-use-partner>
<initiator-group-uuid>f8aa707a-57fa-11e4-ad08-123478563412
</initiator-group-uuid>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiators>
<initiator-info>
<initiator-name>11:22:33:44:55:66:77:88</initiator-name>
</initiator-info>
</initiators>
<vserver>cinder-iscsi</vserver>
</initiator-group-info>
</attributes-list>
<num-records>1</num-records>
</results>""" % expected_igroup))
self.connection.invoke_successfully.return_value = response
igroups = self.client.get_igroup_by_initiators(initiators)
# make these lists of dicts comparable using hashable dictionaries
igroups = set(
[netapp_utils.hashabledict(igroup) for igroup in igroups])
expected = set([netapp_utils.hashabledict(expected_igroup)])
self.assertSetEqual(igroups, expected)
def test_get_igroup_by_initiators_multiple(self):
initiators = ['11:22:33:44:55:66:77:88', '88:77:66:55:44:33:22:11']
expected_igroup = {
'initiator-group-os-type': 'default',
'initiator-group-type': 'fcp',
'initiator-group-name': 'openstack-igroup1',
}
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<attributes-list>
<initiator-group-info>
<initiator-group-alua-enabled>true</initiator-group-alua-enabled>
<initiator-group-name>%(initiator-group-name)s</initiator-group-name>
<initiator-group-os-type>default</initiator-group-os-type>
<initiator-group-throttle-borrow>false</initiator-group-throttle-borrow>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-type>%(initiator-group-type)s</initiator-group-type>
<initiator-group-use-partner>true</initiator-group-use-partner>
<initiator-group-uuid>f8aa707a-57fa-11e4-ad08-123478563412
</initiator-group-uuid>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiators>
<initiator-info>
<initiator-name>11:22:33:44:55:66:77:88</initiator-name>
</initiator-info>
<initiator-info>
<initiator-name>88:77:66:55:44:33:22:11</initiator-name>
</initiator-info>
</initiators>
<vserver>cinder-iscsi</vserver>
</initiator-group-info>
</attributes-list>
<num-records>1</num-records>
</results>""" % expected_igroup))
self.connection.invoke_successfully.return_value = response
igroups = self.client.get_igroup_by_initiators(initiators)
# make these lists of dicts comparable using hashable dictionaries
igroups = set(
[netapp_utils.hashabledict(igroup) for igroup in igroups])
expected = set([netapp_utils.hashabledict(expected_igroup)])
self.assertSetEqual(igroups, expected)
def test_get_igroup_by_initiators_multiple_pages(self):
initiator = '11:22:33:44:55:66:77:88'
expected_igroup1 = {
'initiator-group-os-type': 'default',
'initiator-group-type': 'fcp',
'initiator-group-name': 'openstack-igroup1',
}
expected_igroup2 = {
'initiator-group-os-type': 'default',
'initiator-group-type': 'fcp',
'initiator-group-name': 'openstack-igroup2',
}
response_1 = netapp_api.NaElement(
etree.XML("""<results status="passed">
<attributes-list>
<initiator-group-info>
<initiator-group-alua-enabled>true</initiator-group-alua-enabled>
<initiator-group-name>%(initiator-group-name)s</initiator-group-name>
<initiator-group-os-type>default</initiator-group-os-type>
<initiator-group-throttle-borrow>false</initiator-group-throttle-borrow>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-type>%(initiator-group-type)s</initiator-group-type>
<initiator-group-use-partner>true</initiator-group-use-partner>
<initiator-group-uuid>f8aa707a-57fa-11e4-ad08-123478563412
</initiator-group-uuid>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiators>
<initiator-info>
<initiator-name>11:22:33:44:55:66:77:88</initiator-name>
</initiator-info>
</initiators>
<vserver>cinder-iscsi</vserver>
</initiator-group-info>
</attributes-list>
<next-tag>12345</next-tag>
<num-records>1</num-records>
</results>""" % expected_igroup1))
response_2 = netapp_api.NaElement(
etree.XML("""<results status="passed">
<attributes-list>
<initiator-group-info>
<initiator-group-alua-enabled>true</initiator-group-alua-enabled>
<initiator-group-name>%(initiator-group-name)s</initiator-group-name>
<initiator-group-os-type>default</initiator-group-os-type>
<initiator-group-throttle-borrow>false</initiator-group-throttle-borrow>
<initiator-group-throttle-reserve>0</initiator-group-throttle-reserve>
<initiator-group-type>%(initiator-group-type)s</initiator-group-type>
<initiator-group-use-partner>true</initiator-group-use-partner>
<initiator-group-uuid>f8aa707a-57fa-11e4-ad08-123478563412
</initiator-group-uuid>
<initiator-group-vsa-enabled>false</initiator-group-vsa-enabled>
<initiators>
<initiator-info>
<initiator-name>11:22:33:44:55:66:77:88</initiator-name>
</initiator-info>
</initiators>
<vserver>cinder-iscsi</vserver>
</initiator-group-info>
</attributes-list>
<num-records>1</num-records>
</results>""" % expected_igroup2))
self.connection.invoke_successfully.side_effect = [response_1,
response_2]
igroups = self.client.get_igroup_by_initiators([initiator])
# make these lists of dicts comparable using hashable dictionaries
igroups = set(
[netapp_utils.hashabledict(igroup) for igroup in igroups])
expected = set([netapp_utils.hashabledict(expected_igroup1),
netapp_utils.hashabledict(expected_igroup2)])
self.assertSetEqual(igroups, expected)
@ddt.data(True, False)
def test__validate_qos_policy_group_none_adaptive(self, is_adaptive):
self.client.features.add_feature('ADAPTIVE_QOS', supported=True)
self.client._validate_qos_policy_group(
is_adaptive=is_adaptive, spec=None)
def test__validate_qos_policy_group_none_adaptive_no_support(self):
self.client.features.add_feature('ADAPTIVE_QOS', supported=False)
self.assertRaises(
netapp_utils.NetAppDriverException,
self.client._validate_qos_policy_group,
is_adaptive=True,
spec=None)
@ddt.data(True, False)
def test__validate_qos_policy_group_no_qos_min_support(self, is_adaptive):
spec = {'min_throughput': '10'}
self.assertRaises(
netapp_utils.NetAppDriverException,
self.client._validate_qos_policy_group,
is_adaptive=is_adaptive,
spec=spec,
qos_min_support=False)
def test__validate_qos_policy_group_no_block_size_support(self):
self.client.features.add_feature(
'ADAPTIVE_QOS_BLOCK_SIZE', supported=False)
spec = {'block_size': '4K'}
self.assertRaises(
netapp_utils.NetAppDriverException,
self.client._validate_qos_policy_group,
is_adaptive=True,
spec=spec)
def test__validate_qos_policy_group_no_expected_iops_allocation_support(
self):
self.client.features.add_feature(
'ADAPTIVE_QOS_EXPECTED_IOPS_ALLOCATION', supported=False)
spec = {'expected_iops_allocation': 'used-space'}
self.assertRaises(
netapp_utils.NetAppDriverException,
self.client._validate_qos_policy_group,
is_adaptive=True,
spec=spec)
def test__validate_qos_policy_group_adaptive_qos_spec(self):
self.client.features.add_feature('ADAPTIVE_QOS', supported=True)
self.client.features.add_feature(
'ADAPTIVE_QOS_BLOCK_SIZE', supported=True)
self.client.features.add_feature(
'ADAPTIVE_QOS_EXPECTED_IOPS_ALLOCATION', supported=True)
spec = {
'expected_iops': '128IOPS/GB',
'peak_iops': '512IOPS/GB',
'expected_iops_allocation': 'used-space',
'peak_iops_allocation': 'used-space',
'absolute_min_iops': '64IOPS',
'block_size': '4K',
}
self.client._validate_qos_policy_group(is_adaptive=True, spec=spec)
def test_clone_lun(self):
self.client.clone_lun(
'volume', 'fakeLUN', 'newFakeLUN',
qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME)
self.assertEqual(1, self.connection.invoke_successfully.call_count)
@ddt.data({'supports_is_backup': True, 'is_snapshot': True},
{'supports_is_backup': True, 'is_snapshot': False},
{'supports_is_backup': False, 'is_snapshot': True},
{'supports_is_backup': False, 'is_snapshot': False})
@ddt.unpack
def test_clone_lun_is_snapshot(self, supports_is_backup, is_snapshot):
self.client.features.add_feature('BACKUP_CLONE_PARAM',
supported=supports_is_backup)
self.client.clone_lun(
'volume', 'fakeLUN', 'newFakeLUN', is_snapshot=is_snapshot)
clone_create_args = {
'volume': 'volume',
'source-path': 'fakeLUN',
'destination-path': 'newFakeLUN',
'space-reserve': 'true',
}
if is_snapshot and supports_is_backup:
clone_create_args['is-backup'] = 'true'
self.connection.invoke_successfully.assert_called_once_with(
netapp_api.NaElement.create_node_with_children(
'clone-create', **clone_create_args), True)
@ddt.data(0, 1)
def test_clone_lun_is_sub_clone(self, block_count):
self.client.clone_lun(
'volume', 'fakeLUN', 'newFakeLUN', block_count=block_count)
clone_create_args = {
'volume': 'volume',
'source-path': 'fakeLUN',
'destination-path': 'newFakeLUN',
}
is_sub_clone = block_count > 0
if not is_sub_clone:
clone_create_args['space-reserve'] = 'true'
# build the expected request
expected_clone_create_request = \
netapp_api.NaElement.create_node_with_children(
'clone-create', **clone_create_args)
# add expected fields in the request if it's a sub-clone
if is_sub_clone:
block_ranges = netapp_api.NaElement("block-ranges")
block_range = \
netapp_api.NaElement.create_node_with_children(
'block-range',
**{'source-block-number': '0',
'destination-block-number': '0',
'block-count': '1'})
block_ranges.add_child_elem(block_range)
expected_clone_create_request.add_child_elem(block_ranges)
self.connection.invoke_successfully.assert_called_once_with(
expected_clone_create_request, True)
def test_clone_lun_multiple_zapi_calls(self):
"""Test for when lun clone requires more than one zapi call."""
# Max clone size per call = 2^18 blocks * 512 bytes/block = 128 MB
# Force 2 calls
bc = 2 ** 18 * 2
self.client.clone_lun('volume', 'fakeLUN', 'newFakeLUN',
block_count=bc)
self.assertEqual(2, self.connection.invoke_successfully.call_count)
def test_get_lun_by_args(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>2</num-records>
<attributes-list>
<lun-info>
</lun-info>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
lun = self.client.get_lun_by_args()
self.assertEqual(1, len(lun))
def test_get_lun_by_args_no_lun_found(self):
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>2</num-records>
<attributes-list>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
lun = self.client.get_lun_by_args()
self.assertEqual(0, len(lun))
def test_get_lun_by_args_with_args_specified(self):
path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>2</num-records>
<attributes-list>
<lun-info>
</lun-info>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
lun = self.client.get_lun_by_args(path=path)
__, _args, __ = self.connection.invoke_successfully.mock_calls[0]
actual_request = _args[0]
query = actual_request.get_child_by_name('query')
lun_info_args = query.get_child_by_name('lun-info').get_children()
# Assert request is made with correct arguments
self.assertEqual('path', lun_info_args[0].get_name())
self.assertEqual(path, lun_info_args[0].get_content())
self.assertEqual(1, len(lun))
def test_file_assign_qos(self):
api_args = {
'volume': fake.FLEXVOL,
'qos-policy-group-name': fake.QOS_POLICY_GROUP_NAME,
'file': fake.NFS_FILE_PATH,
'vserver': self.vserver
}
self.client.file_assign_qos(fake.FLEXVOL, fake.QOS_POLICY_GROUP_NAME,
False, fake.NFS_FILE_PATH)
self.mock_send_request.assert_has_calls([
mock.call('file-assign-qos', api_args, False)])
def test_set_lun_qos_policy_group(self):
api_args = {
'path': fake.LUN_PATH,
'qos-policy-group': fake.QOS_POLICY_GROUP_NAME,
}
self.client.set_lun_qos_policy_group(
fake.LUN_PATH, fake.QOS_POLICY_GROUP_NAME)
self.mock_send_request.assert_has_calls([
mock.call('lun-set-qos-policy-group', api_args)])
def test_provision_qos_policy_group_no_qos_policy_group_info(self):
mock_qos_policy_group_create = self.mock_object(
self.client, 'qos_policy_group_create')
self.client.provision_qos_policy_group(qos_policy_group_info=None,
qos_min_support=True)
mock_qos_policy_group_create.assert_not_called()
def test_provision_qos_policy_group_no_legacy_no_spec(self):
mock_qos_policy_group_exists = self.mock_object(
self.client, 'qos_policy_group_exists')
mock_qos_policy_group_create = self.mock_object(
self.client, 'qos_policy_group_create')
mock_qos_policy_group_modify = self.mock_object(
self.client, 'qos_policy_group_modify')
self.client.provision_qos_policy_group(qos_policy_group_info={},
qos_min_support=False)
mock_qos_policy_group_exists.assert_not_called()
mock_qos_policy_group_create.assert_not_called()
mock_qos_policy_group_modify.assert_not_called()
def test_provision_qos_policy_group_legacy_qos_policy_group_info(self):
mock_qos_policy_group_create = self.mock_object(
self.client, 'qos_policy_group_create')
self.client.provision_qos_policy_group(
qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO_LEGACY,
qos_min_support=True)
mock_qos_policy_group_create.assert_not_called()
def test_provision_qos_policy_group_with_qos_spec_create_with_min(self):
self.mock_object(self.client,
'qos_policy_group_exists',
return_value=False)
mock_qos_policy_group_create = self.mock_object(
self.client, 'qos_policy_group_create')
mock_qos_policy_group_modify = self.mock_object(
self.client, 'qos_policy_group_modify')
self.client.provision_qos_policy_group(fake.QOS_POLICY_GROUP_INFO,
True)
mock_qos_policy_group_create.assert_called_once_with({
'policy_name': fake.QOS_POLICY_GROUP_NAME,
'min_throughput': fake.MIN_IOPS,
'max_throughput': fake.MAX_IOPS,
})
mock_qos_policy_group_modify.assert_not_called()
def test_provision_qos_policy_group_with_qos_spec_create_with_aqos(self):
self.client.features.add_feature('ADAPTIVE_QOS', supported=True)
self.client.features.add_feature(
'ADAPTIVE_QOS_BLOCK_SIZE', supported=True)
self.client.features.add_feature(
'ADAPTIVE_QOS_EXPECTED_IOPS_ALLOCATION', supported=True)
self.mock_object(self.client,
'qos_policy_group_exists',
return_value=False)
mock_qos_policy_group_create = self.mock_object(
self.client, 'qos_policy_group_create')
mock_qos_policy_group_modify = self.mock_object(
self.client, 'qos_policy_group_modify')
mock_qos_adaptive_policy_group_create = self.mock_object(
self.client, 'qos_adaptive_policy_group_create')
mock_qos_adaptive_policy_group_modify = self.mock_object(
self.client, 'qos_adaptive_policy_group_modify')
self.client.provision_qos_policy_group(
fake.ADAPTIVE_QOS_POLICY_GROUP_INFO, False)
mock_qos_adaptive_policy_group_create.assert_called_once_with(
fake.ADAPTIVE_QOS_SPEC)
mock_qos_adaptive_policy_group_modify.assert_not_called()
mock_qos_policy_group_create.assert_not_called()
mock_qos_policy_group_modify.assert_not_called()
def test_provision_qos_policy_group_with_qos_spec_create_unsupported(self):
mock_qos_policy_group_exists = self.mock_object(
self.client, 'qos_policy_group_exists')
mock_qos_policy_group_create = self.mock_object(
self.client, 'qos_policy_group_create')
mock_qos_policy_group_modify = self.mock_object(
self.client, 'qos_policy_group_modify')
self.assertRaises(
netapp_utils.NetAppDriverException,
self.client.provision_qos_policy_group,
fake.QOS_POLICY_GROUP_INFO,
False)
mock_qos_policy_group_exists.assert_not_called()
mock_qos_policy_group_create.assert_not_called()
mock_qos_policy_group_modify.assert_not_called()
def test_provision_qos_policy_group_with_invalid_qos_spec(self):
self.mock_object(self.client, '_validate_qos_policy_group',
side_effect=netapp_utils.NetAppDriverException)
mock_policy_group_spec_is_adaptive = self.mock_object(
netapp_utils, 'is_qos_policy_group_spec_adaptive')
mock_qos_policy_group_exists = self.mock_object(
self.client, 'qos_policy_group_exists')
mock_qos_policy_group_create = self.mock_object(
self.client, 'qos_policy_group_create')
mock_qos_policy_group_modify = self.mock_object(
self.client, 'qos_policy_group_modify')
self.assertRaises(
netapp_utils.NetAppDriverException,
self.client.provision_qos_policy_group,
fake.QOS_POLICY_GROUP_INFO,
False)
mock_policy_group_spec_is_adaptive.assert_called_once_with(
fake.QOS_POLICY_GROUP_INFO)
mock_qos_policy_group_exists.assert_not_called()
mock_qos_policy_group_create.assert_not_called()
mock_qos_policy_group_modify.assert_not_called()
def test_provision_qos_policy_group_with_qos_spec_create(self):
self.mock_object(self.client,
'qos_policy_group_exists',
return_value=False)
mock_qos_policy_group_create = self.mock_object(
self.client, 'qos_policy_group_create')
mock_qos_policy_group_modify = self.mock_object(
self.client, 'qos_policy_group_modify')
self.client.provision_qos_policy_group(fake.QOS_POLICY_GROUP_INFO_MAX,
True)
mock_qos_policy_group_create.assert_has_calls([
mock.call({
'policy_name': fake.QOS_POLICY_GROUP_NAME,
'max_throughput': fake.MAX_THROUGHPUT,
})])
mock_qos_policy_group_modify.assert_not_called()
def test_provision_qos_policy_group_with_qos_spec_modify_with_min(self):
self.mock_object(self.client,
'qos_policy_group_exists',
return_value=True)
mock_qos_policy_group_create = self.mock_object(
self.client, 'qos_policy_group_create')
mock_qos_policy_group_modify = self.mock_object(
self.client, 'qos_policy_group_modify')
self.client.provision_qos_policy_group(fake.QOS_POLICY_GROUP_INFO,
True)
mock_qos_policy_group_create.assert_not_called()
mock_qos_policy_group_modify.assert_has_calls([
mock.call({
'policy_name': fake.QOS_POLICY_GROUP_NAME,
'min_throughput': fake.MIN_IOPS,
'max_throughput': fake.MAX_IOPS,
})])
def test_provision_qos_policy_group_with_qos_spec_modify(self):
self.mock_object(self.client,
'qos_policy_group_exists',
return_value=True)
mock_qos_policy_group_create = self.mock_object(
self.client, 'qos_policy_group_create')
mock_qos_policy_group_modify = self.mock_object(
self.client, 'qos_policy_group_modify')
self.client.provision_qos_policy_group(fake.QOS_POLICY_GROUP_INFO_MAX,
True)
mock_qos_policy_group_create.assert_not_called()
mock_qos_policy_group_modify.assert_has_calls([
mock.call({
'policy_name': fake.QOS_POLICY_GROUP_NAME,
'max_throughput': fake.MAX_THROUGHPUT,
})])
def test_provision_qos_policy_group_with_qos_spec_modify_with_aqos(self):
self.client.features.add_feature('ADAPTIVE_QOS', supported=True)
self.client.features.add_feature(
'ADAPTIVE_QOS_BLOCK_SIZE', supported=True)
self.client.features.add_feature(
'ADAPTIVE_QOS_EXPECTED_IOPS_ALLOCATION', supported=True)
self.mock_object(self.client,
'qos_policy_group_exists',
return_value=True)
mock_qos_policy_group_create = self.mock_object(
self.client, 'qos_policy_group_create')
mock_qos_policy_group_modify = self.mock_object(
self.client, 'qos_policy_group_modify')
mock_qos_adaptive_policy_group_create = self.mock_object(
self.client, 'qos_adaptive_policy_group_create')
mock_qos_adaptive_policy_group_modify = self.mock_object(
self.client, 'qos_adaptive_policy_group_modify')
self.client.provision_qos_policy_group(
fake.ADAPTIVE_QOS_POLICY_GROUP_INFO, False)
mock_qos_adaptive_policy_group_modify.assert_called_once_with(
fake.ADAPTIVE_QOS_SPEC)
mock_qos_adaptive_policy_group_create.assert_not_called()
mock_qos_policy_group_create.assert_not_called()
mock_qos_policy_group_modify.assert_not_called()
def test_qos_policy_group_exists(self):
self.mock_send_request.return_value = netapp_api.NaElement(
fake_client.QOS_POLICY_GROUP_GET_ITER_RESPONSE)
result = self.client.qos_policy_group_exists(
fake.QOS_POLICY_GROUP_NAME)
api_args = {
'query': {
'qos-policy-group-info': {
'policy-group': fake.QOS_POLICY_GROUP_NAME,
},
},
'desired-attributes': {
'qos-policy-group-info': {
'policy-group': None,
},
},
}
self.mock_send_request.assert_has_calls([
mock.call('qos-policy-group-get-iter', api_args, False)])
self.assertTrue(result)
def test_qos_policy_group_exists_not_found(self):
self.mock_send_request.return_value = netapp_api.NaElement(
fake_client.NO_RECORDS_RESPONSE)
result = self.client.qos_policy_group_exists(
fake.QOS_POLICY_GROUP_NAME)
self.assertFalse(result)
def test_qos_policy_group_create(self):
api_args = {
'policy-group': fake.QOS_POLICY_GROUP_NAME,
'min-throughput': '0',
'max-throughput': fake.MAX_THROUGHPUT,
'vserver': self.vserver,
}
self.client.qos_policy_group_create({
'policy_name': fake.QOS_POLICY_GROUP_NAME,
'min_throughput': '0',
'max_throughput': fake.MAX_THROUGHPUT,
})
self.mock_send_request.assert_has_calls([
mock.call('qos-policy-group-create', api_args, False)])
def test_qos_adaptive_policy_group_create(self):
api_args = {
'policy-group': fake.QOS_POLICY_GROUP_NAME,
'expected-iops': '%sIOPS/GB' % fake.EXPECTED_IOPS_PER_GB,
'peak-iops': '%sIOPS/GB' % fake.PEAK_IOPS_PER_GB,
'expected-iops-allocation': fake.EXPECTED_IOPS_ALLOCATION,
'peak-iops-allocation': fake.PEAK_IOPS_ALLOCATION,
'block-size': fake.BLOCK_SIZE,
'vserver': self.vserver,
}
self.client.qos_adaptive_policy_group_create({
'policy_name': fake.QOS_POLICY_GROUP_NAME,
'expected_iops': '%sIOPS/GB' % fake.EXPECTED_IOPS_PER_GB,
'peak_iops': '%sIOPS/GB' % fake.PEAK_IOPS_PER_GB,
'expected_iops_allocation': fake.EXPECTED_IOPS_ALLOCATION,
'peak_iops_allocation': fake.PEAK_IOPS_ALLOCATION,
'block_size': fake.BLOCK_SIZE,
})
self.mock_send_request.assert_has_calls([
mock.call('qos-adaptive-policy-group-create', api_args, False)])
def test_qos_policy_group_modify(self):
api_args = {
'policy-group': fake.QOS_POLICY_GROUP_NAME,
'min-throughput': '0',
'max-throughput': fake.MAX_THROUGHPUT,
}
self.client.qos_policy_group_modify({
'policy_name': fake.QOS_POLICY_GROUP_NAME,
'min_throughput': '0',
'max_throughput': fake.MAX_THROUGHPUT,
})
self.mock_send_request.assert_has_calls([
mock.call('qos-policy-group-modify', api_args, False)])
def test_qos_adaptive_policy_group_modify(self):
api_args = {
'policy-group': fake.QOS_POLICY_GROUP_NAME,
'expected-iops': '%sIOPS/GB' % fake.EXPECTED_IOPS_PER_GB,
'peak-iops': '%sIOPS/GB' % fake.PEAK_IOPS_PER_GB,
'expected-iops-allocation': fake.EXPECTED_IOPS_ALLOCATION,
'peak-iops-allocation': fake.PEAK_IOPS_ALLOCATION,
'block-size': fake.BLOCK_SIZE,
}
self.client.qos_adaptive_policy_group_modify({
'policy_name': fake.QOS_POLICY_GROUP_NAME,
'expected_iops': '%sIOPS/GB' % fake.EXPECTED_IOPS_PER_GB,
'peak_iops': '%sIOPS/GB' % fake.PEAK_IOPS_PER_GB,
'expected_iops_allocation': fake.EXPECTED_IOPS_ALLOCATION,
'peak_iops_allocation': fake.PEAK_IOPS_ALLOCATION,
'block_size': fake.BLOCK_SIZE,
})
self.mock_send_request.assert_has_calls([
mock.call('qos-adaptive-policy-group-modify', api_args, False)])
def test_qos_policy_group_rename(self):
new_name = 'new-' + fake.QOS_POLICY_GROUP_NAME
api_args = {
'policy-group-name': fake.QOS_POLICY_GROUP_NAME,
'new-name': new_name,
}
self.client.qos_policy_group_rename(
fake.QOS_POLICY_GROUP_NAME, new_name)
self.mock_send_request.assert_has_calls([
mock.call('qos-policy-group-rename', api_args, False)])
def test_mark_qos_policy_group_for_deletion_no_qos_policy_group_info(self):
mock_rename = self.mock_object(self.client, 'qos_policy_group_rename')
mock_remove = self.mock_object(self.client,
'remove_unused_qos_policy_groups')
self.client.mark_qos_policy_group_for_deletion(
qos_policy_group_info=None)
self.assertEqual(0, mock_rename.call_count)
self.assertEqual(0, mock_remove.call_count)
def test_mark_qos_policy_group_for_deletion_legacy_qos_policy(self):
mock_rename = self.mock_object(self.client, 'qos_policy_group_rename')
mock_remove = self.mock_object(self.client,
'remove_unused_qos_policy_groups')
self.client.mark_qos_policy_group_for_deletion(
qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO_LEGACY)
self.assertEqual(0, mock_rename.call_count)
self.assertEqual(1, mock_remove.call_count)
@ddt.data(True, False)
def test_mark_qos_policy_group_for_deletion_w_qos_spec(self,
is_adaptive):
mock_rename = self.mock_object(self.client, 'qos_policy_group_rename')
mock_remove = self.mock_object(self.client,
'remove_unused_qos_policy_groups')
mock_log = self.mock_object(client_cmode.LOG, 'warning')
new_name = 'deleted_cinder_%s' % fake.QOS_POLICY_GROUP_NAME
self.client.mark_qos_policy_group_for_deletion(
qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO_MAX,
is_adaptive=is_adaptive)
mock_rename.assert_has_calls([
mock.call(fake.QOS_POLICY_GROUP_NAME, new_name, is_adaptive)])
self.assertEqual(0, mock_log.call_count)
self.assertEqual(1, mock_remove.call_count)
@ddt.data(True, False)
def test_mark_qos_policy_group_for_deletion_exception_path(self,
is_adaptive):
mock_rename = self.mock_object(self.client, 'qos_policy_group_rename')
mock_rename.side_effect = netapp_api.NaApiError
mock_remove = self.mock_object(self.client,
'remove_unused_qos_policy_groups')
mock_log = self.mock_object(client_cmode.LOG, 'warning')
new_name = 'deleted_cinder_%s' % fake.QOS_POLICY_GROUP_NAME
self.client.mark_qos_policy_group_for_deletion(
qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO_MAX,
is_adaptive=is_adaptive)
mock_rename.assert_has_calls([
mock.call(fake.QOS_POLICY_GROUP_NAME, new_name, is_adaptive)])
self.assertEqual(1, mock_log.call_count)
self.assertEqual(1, mock_remove.call_count)
def test_remove_unused_qos_policy_groups(self):
mock_log = self.mock_object(client_cmode.LOG, 'debug')
api_args = {
'query': {
'qos-policy-group-info': {
'policy-group': 'deleted_cinder_*',
'vserver': self.vserver,
}
},
'max-records': 3500,
'continue-on-failure': 'true',
'return-success-list': 'false',
'return-failure-list': 'false',
}
self.client.remove_unused_qos_policy_groups()
self.mock_send_request.assert_has_calls([
mock.call('qos-policy-group-delete-iter', api_args, False)])
self.assertEqual(0, mock_log.call_count)
def test_remove_unused_qos_policy_groups_api_error(self):
self.client.features.add_feature('ADAPTIVE_QOS', supported=True)
mock_log = self.mock_object(client_cmode.LOG, 'debug')
qos_query = {
'qos-policy-group-info': {
'policy-group': 'deleted_cinder_*',
'vserver': self.vserver,
}
}
adaptive_qos_query = {
'qos-adaptive-policy-group-info': {
'policy-group': 'deleted_cinder_*',
'vserver': self.vserver,
}
}
qos_api_args = {
'query': qos_query,
'max-records': 3500,
'continue-on-failure': 'true',
'return-success-list': 'false',
'return-failure-list': 'false',
}
adaptive_qos_api_args = {
'query': adaptive_qos_query,
'max-records': 3500,
'continue-on-failure': 'true',
'return-success-list': 'false',
'return-failure-list': 'false',
}
self.mock_send_request.side_effect = netapp_api.NaApiError
self.client.remove_unused_qos_policy_groups()
self.mock_send_request.assert_has_calls([
mock.call('qos-policy-group-delete-iter',
qos_api_args, False),
mock.call('qos-adaptive-policy-group-delete-iter',
adaptive_qos_api_args, False),
])
self.assertEqual(2, mock_log.call_count)
@mock.patch('cinder.volume.volume_utils.resolve_hostname',
return_value='192.168.1.101')
def test_get_if_info_by_ip_not_found(self, mock_resolve_hostname):
fake_ip = '192.168.1.101'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>0</num-records>
<attributes-list>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
self.assertRaises(exception.NotFound, self.client.get_if_info_by_ip,
fake_ip)
@mock.patch('cinder.volume.volume_utils.resolve_hostname',
return_value='192.168.1.101')
def test_get_if_info_by_ip(self, mock_resolve_hostname):
fake_ip = '192.168.1.101'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list>
<net-interface-info>
<vserver>fake_vserver</vserver>
</net-interface-info>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
results = self.client.get_if_info_by_ip(fake_ip)
self.assertEqual(1, len(results))
def test_get_vol_by_junc_vserver_not_found(self):
fake_vserver = 'fake_vserver'
fake_junc = 'fake_junction_path'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>0</num-records>
<attributes-list>
</attributes-list>
</results>"""))
self.connection.invoke_successfully.return_value = response
self.assertRaises(exception.NotFound,
self.client.get_vol_by_junc_vserver,
fake_vserver, fake_junc)
def test_get_vol_by_junc_vserver(self):
fake_vserver = 'fake_vserver'
fake_junc = 'fake_junction_path'
expected_flex_vol = 'fake_flex_vol'
volume_attr_str = ("""
<volume-attributes>
<volume-id-attributes>
<name>%(flex_vol)s</name>
</volume-id-attributes>
</volume-attributes>
""" % {'flex_vol': expected_flex_vol})
volume_attr = netapp_api.NaElement(etree.XML(volume_attr_str))
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<num-records>1</num-records>
<attributes-list>%(vol)s</attributes-list>
</results>""" % {'vol': volume_attr_str}))
self.connection.invoke_successfully.return_value = response
mock_get_unique_vol = self.mock_object(
self.client, 'get_unique_volume', return_value=volume_attr)
actual_flex_vol = self.client.get_vol_by_junc_vserver(fake_vserver,
fake_junc)
self.assertEqual(expected_flex_vol, actual_flex_vol)
mock_get_unique_vol.assert_called_once_with(response)
def test_clone_file(self):
expected_flex_vol = "fake_flex_vol"
expected_src_path = "fake_src_path"
expected_dest_path = "fake_dest_path"
self.connection.get_api_version.return_value = (1, 20)
self.client.clone_file(expected_flex_vol, expected_src_path,
expected_dest_path, self.vserver,
source_snapshot=fake.CG_SNAPSHOT_ID)
__, _args, __ = self.connection.invoke_successfully.mock_calls[0]
actual_request = _args[0]
actual_flex_vol = actual_request.get_child_by_name('volume') \
.get_content()
actual_src_path = actual_request \
.get_child_by_name('source-path').get_content()
actual_dest_path = actual_request.get_child_by_name(
'destination-path').get_content()
self.assertEqual(expected_flex_vol, actual_flex_vol)
self.assertEqual(expected_src_path, actual_src_path)
self.assertEqual(expected_dest_path, actual_dest_path)
req_snapshot_child = actual_request.get_child_by_name('snapshot-name')
self.assertEqual(fake.CG_SNAPSHOT_ID, req_snapshot_child.get_content())
self.assertIsNone(actual_request.get_child_by_name(
'destination-exists'))
def test_clone_file_when_destination_exists(self):
expected_flex_vol = "fake_flex_vol"
expected_src_path = "fake_src_path"
expected_dest_path = "fake_dest_path"
self.connection.get_api_version.return_value = (1, 20)
self.client.clone_file(expected_flex_vol, expected_src_path,
expected_dest_path, self.vserver,
dest_exists=True)
__, _args, __ = self.connection.invoke_successfully.mock_calls[0]
actual_request = _args[0]
actual_flex_vol = actual_request.get_child_by_name('volume') \
.get_content()
actual_src_path = actual_request \
.get_child_by_name('source-path').get_content()
actual_dest_path = actual_request.get_child_by_name(
'destination-path').get_content()
self.assertEqual(expected_flex_vol, actual_flex_vol)
self.assertEqual(expected_src_path, actual_src_path)
self.assertEqual(expected_dest_path, actual_dest_path)
self.assertEqual('true',
actual_request.get_child_by_name(
'destination-exists').get_content())
def test_clone_file_when_destination_exists_and_version_less_than_1_20(
self):
expected_flex_vol = "fake_flex_vol"
expected_src_path = "fake_src_path"
expected_dest_path = "fake_dest_path"
self.connection.get_api_version.return_value = (1, 19)
self.client.clone_file(expected_flex_vol, expected_src_path,
expected_dest_path, self.vserver,
dest_exists=True)
__, _args, __ = self.connection.invoke_successfully.mock_calls[0]
actual_request = _args[0]
actual_flex_vol = actual_request.get_child_by_name('volume') \
.get_content()
actual_src_path = actual_request \
.get_child_by_name('source-path').get_content()
actual_dest_path = actual_request.get_child_by_name(
'destination-path').get_content()
self.assertEqual(expected_flex_vol, actual_flex_vol)
self.assertEqual(expected_src_path, actual_src_path)
self.assertEqual(expected_dest_path, actual_dest_path)
self.assertIsNone(actual_request.get_child_by_name(
'destination-exists'))
@ddt.data({'supports_is_backup': True, 'is_snapshot': True},
{'supports_is_backup': True, 'is_snapshot': False},
{'supports_is_backup': False, 'is_snapshot': True},
{'supports_is_backup': False, 'is_snapshot': False})
@ddt.unpack
def test_clone_file_is_snapshot(self, supports_is_backup, is_snapshot):
self.connection.get_api_version.return_value = (1, 20)
self.client.features.add_feature('BACKUP_CLONE_PARAM',
supported=supports_is_backup)
self.client.clone_file(
'volume', 'fake_source', 'fake_destination', 'fake_vserver',
is_snapshot=is_snapshot)
clone_create_args = {
'volume': 'volume',
'source-path': 'fake_source',
'destination-path': 'fake_destination',
}
if is_snapshot and supports_is_backup:
clone_create_args['is-backup'] = 'true'
self.connection.invoke_successfully.assert_called_once_with(
netapp_api.NaElement.create_node_with_children(
'clone-create', **clone_create_args), True)
def test_get_file_usage(self):
expected_bytes = "2048"
fake_vserver = 'fake_vserver'
fake_path = 'fake_path'
response = netapp_api.NaElement(
etree.XML("""<results status="passed">
<unique-bytes>%(unique-bytes)s</unique-bytes>
</results>""" % {'unique-bytes': expected_bytes}))
self.connection.invoke_successfully.return_value = response
actual_bytes = self.client.get_file_usage(fake_vserver, fake_path)
self.assertEqual(expected_bytes, actual_bytes)
def test_check_cluster_api(self):
self.client.features.USER_CAPABILITY_LIST = True
mock_check_cluster_api_legacy = self.mock_object(
self.client, '_check_cluster_api_legacy')
mock_check_cluster_api = self.mock_object(
self.client, '_check_cluster_api', return_value=True)
result = self.client.check_cluster_api('object', 'operation', 'api')
self.assertTrue(result)
self.assertFalse(mock_check_cluster_api_legacy.called)
mock_check_cluster_api.assert_called_once_with(
'object', 'operation', 'api')
def test_check_cluster_api_legacy(self):
self.client.features.USER_CAPABILITY_LIST = False
mock_check_cluster_api_legacy = self.mock_object(
self.client, '_check_cluster_api_legacy', return_value=True)
mock_check_cluster_api = self.mock_object(
self.client, '_check_cluster_api')
result = self.client.check_cluster_api('object', 'operation', 'api')
self.assertTrue(result)
self.assertFalse(mock_check_cluster_api.called)
mock_check_cluster_api_legacy.assert_called_once_with('api')
def test__check_cluster_api(self):
api_response = netapp_api.NaElement(
fake_client.SYSTEM_USER_CAPABILITY_GET_ITER_RESPONSE)
self.mock_send_request.return_value = api_response
result = self.client._check_cluster_api('object', 'operation', 'api')
system_user_capability_get_iter_args = {
'query': {
'capability-info': {
'object-name': 'object',
'operation-list': {
'operation-info': {
'name': 'operation',
},
},
},
},
'desired-attributes': {
'capability-info': {
'operation-list': {
'operation-info': {
'api-name': None,
},
},
},
},
}
self.mock_send_request.assert_called_once_with(
'system-user-capability-get-iter',
system_user_capability_get_iter_args,
False)
self.assertTrue(result)
@ddt.data(fake_client.SYSTEM_USER_CAPABILITY_GET_ITER_RESPONSE,
fake_client.NO_RECORDS_RESPONSE)
def test__check_cluster_api_not_found(self, response):
api_response = netapp_api.NaElement(response)
self.mock_send_request.return_value = api_response
result = self.client._check_cluster_api('object', 'operation', 'api4')
self.assertFalse(result)
@ddt.data('volume-get-iter', 'volume-get', 'aggr-options-list-info')
def test__check_cluster_api_legacy(self, api):
api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)
self.mock_send_request.return_value = api_response
result = self.client._check_cluster_api_legacy(api)
self.assertTrue(result)
self.mock_send_request.assert_called_once_with(api,
enable_tunneling=False)
@ddt.data(netapp_api.EAPIPRIVILEGE, netapp_api.EAPINOTFOUND)
def test__check_cluster_api_legacy_insufficient_privileges(self, code):
self.mock_send_request.side_effect = netapp_api.NaApiError(code=code)
result = self.client._check_cluster_api_legacy('volume-get-iter')
self.assertFalse(result)
self.mock_send_request.assert_called_once_with('volume-get-iter',
enable_tunneling=False)
def test__check_cluster_api_legacy_api_error(self):
self.mock_send_request.side_effect = netapp_api.NaApiError()
result = self.client._check_cluster_api_legacy('volume-get-iter')
self.assertTrue(result)
self.mock_send_request.assert_called_once_with('volume-get-iter',
enable_tunneling=False)
def test__check_cluster_api_legacy_invalid_api(self):
self.assertRaises(ValueError,
self.client._check_cluster_api_legacy,
'fake_api')
def test_get_operational_lif_addresses(self):
expected_result = ['1.2.3.4', '99.98.97.96']
api_response = netapp_api.NaElement(
fake_client.GET_OPERATIONAL_LIF_ADDRESSES_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
address_list = self.client.get_operational_lif_addresses()
net_interface_get_iter_args = {
'query': {
'net-interface-info': {
'operational-status': 'up'
}
},
'desired-attributes': {
'net-interface-info': {
'address': None,
}
}
}
self.client.send_iter_request.assert_called_once_with(
'net-interface-get-iter', net_interface_get_iter_args)
self.assertEqual(expected_result, address_list)
@ddt.data({'junction_path': '/fake/vol'},
{'name': 'fake_volume'},
{'junction_path': '/fake/vol', 'name': 'fake_volume'})
def test_get_volume_state(self, kwargs):
api_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_STATE_RESPONSE)
mock_send_iter_request = self.mock_object(
self.client, 'send_iter_request', return_value=api_response)
volume_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_STATE_ATTR)
mock_get_unique_vol = self.mock_object(
self.client, 'get_unique_volume', return_value=volume_response)
state = self.client.get_volume_state(**kwargs)
volume_id_attributes = {}
if 'junction_path' in kwargs:
volume_id_attributes['junction-path'] = kwargs['junction_path']
if 'name' in kwargs:
volume_id_attributes['name'] = kwargs['name']
volume_get_iter_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': volume_id_attributes,
}
},
'desired-attributes': {
'volume-attributes': {
'volume-id-attributes': {
'style-extended': None,
},
'volume-state-attributes': {
'state': None
}
}
},
}
mock_send_iter_request.assert_called_once_with(
'volume-get-iter', volume_get_iter_args)
mock_get_unique_vol.assert_called_once_with(api_response)
self.assertEqual(fake_client.VOLUME_STATE_ONLINE, state)
@ddt.data({'flexvol_path': '/fake/vol'},
{'flexvol_name': 'fake_volume'},
{'flexvol_path': '/fake/vol', 'flexvol_name': 'fake_volume'})
def test_get_flexvol_capacity(self, kwargs):
api_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_CAPACITY_RESPONSE)
mock_send_iter_request = self.mock_object(
self.client, 'send_iter_request', return_value=api_response)
volume_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_CAPACITY_ATTR)
mock_get_unique_vol = self.mock_object(
self.client, 'get_unique_volume', return_value=volume_response)
capacity = self.client.get_flexvol_capacity(**kwargs)
volume_id_attributes = {}
if 'flexvol_path' in kwargs:
volume_id_attributes['junction-path'] = kwargs['flexvol_path']
if 'flexvol_name' in kwargs:
volume_id_attributes['name'] = kwargs['flexvol_name']
volume_get_iter_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': volume_id_attributes,
}
},
'desired-attributes': {
'volume-attributes': {
'volume-id-attributes': {
'style-extended': None,
},
'volume-space-attributes': {
'size-available': None,
'size-total': None,
}
}
},
}
mock_send_iter_request.assert_called_once_with(
'volume-get-iter', volume_get_iter_args)
mock_get_unique_vol.assert_called_once_with(api_response)
self.assertEqual(fake_client.VOLUME_SIZE_TOTAL, capacity['size-total'])
self.assertEqual(fake_client.VOLUME_SIZE_AVAILABLE,
capacity['size-available'])
def test_get_flexvol_capacity_not_found(self):
self.mock_send_request.return_value = netapp_api.NaElement(
fake_client.NO_RECORDS_RESPONSE)
self.assertRaises(netapp_utils.NetAppDriverException,
self.client.get_flexvol_capacity,
flexvol_path='fake_path')
def test_list_flexvols(self):
api_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_LIST_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
result = self.client.list_flexvols()
volume_get_iter_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'type': 'rw',
'style': 'flex',
},
'volume-state-attributes': {
'is-vserver-root': 'false',
'is-inconsistent': 'false',
'is-invalid': 'false',
'state': 'online',
},
},
},
'desired-attributes': {
'volume-attributes': {
'volume-id-attributes': {
'name': None,
},
},
},
}
self.client.send_iter_request.assert_called_once_with(
'volume-get-iter', volume_get_iter_args)
self.assertEqual(list(fake_client.VOLUME_NAMES), result)
def test_list_flexvols_not_found(self):
api_response = netapp_api.NaElement(
fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
result = self.client.list_flexvols()
self.assertEqual([], result)
@ddt.data(False, True)
def test_get_flexvol(self, is_flexgroup):
if is_flexgroup:
api_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_SSC_RESPONSE_FLEXGROUP)
volume_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_SSC_RESPONSE_ATTR_FLEXGROUP)
else:
api_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_SSC_RESPONSE)
volume_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_SSC_RESPONSE_ATTR)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
mock_get_unique_vol = self.mock_object(
self.client, 'get_unique_volume', return_value=volume_response)
result = self.client.get_flexvol(
flexvol_name=fake_client.VOLUME_NAMES[0],
flexvol_path='/%s' % fake_client.VOLUME_NAMES[0])
volume_get_iter_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'name': fake_client.VOLUME_NAMES[0],
'junction-path': '/' + fake_client.VOLUME_NAMES[0],
'type': 'rw',
'style': 'flex',
},
'volume-state-attributes': {
'is-vserver-root': 'false',
'is-inconsistent': 'false',
'is-invalid': 'false',
'state': 'online',
},
},
},
'desired-attributes': {
'volume-attributes': {
'volume-id-attributes': {
'name': None,
'owning-vserver-name': None,
'junction-path': None,
'type': None,
'aggr-list': {
'aggr-name': None,
},
'containing-aggregate-name': None,
'style-extended': None,
},
'volume-mirror-attributes': {
'is-data-protection-mirror': None,
'is-replica-volume': None,
},
'volume-space-attributes': {
'is-space-guarantee-enabled': None,
'space-guarantee': None,
'percentage-snapshot-reserve': None,
'size': None,
},
'volume-qos-attributes': {
'policy-group-name': None,
},
'volume-snapshot-attributes': {
'snapshot-policy': None,
},
'volume-language-attributes': {
'language-code': None,
}
},
},
}
self.client.send_iter_request.assert_called_once_with(
'volume-get-iter', volume_get_iter_args)
mock_get_unique_vol.assert_called_once_with(api_response)
if is_flexgroup:
self.assertEqual(fake_client.VOLUME_INFO_SSC_FLEXGROUP, result)
else:
self.assertEqual(fake_client.VOLUME_INFO_SSC, result)
def test_create_flexvol(self):
self.mock_object(self.client.connection, 'send_request')
self.client.create_flexvol(
fake_client.VOLUME_NAME, fake_client.VOLUME_AGGREGATE_NAME, 100)
volume_create_args = {
'containing-aggr-name': fake_client.VOLUME_AGGREGATE_NAME,
'size': '100g',
'volume': fake_client.VOLUME_NAME,
'volume-type': 'rw',
'junction-path': '/%s' % fake_client.VOLUME_NAME,
}
self.client.connection.send_request.assert_called_once_with(
'volume-create', volume_create_args)
@ddt.data('dp', 'rw', None)
def test_create_volume_with_extra_specs(self, volume_type):
self.mock_object(self.client, 'enable_flexvol_dedupe')
self.mock_object(self.client, 'enable_flexvol_compression')
self.mock_object(self.client.connection, 'send_request')
self.client.create_flexvol(
fake_client.VOLUME_NAME, fake_client.VOLUME_AGGREGATE_NAME, 100,
space_guarantee_type='volume', language='en-US',
snapshot_policy='default', dedupe_enabled=True,
compression_enabled=True, snapshot_reserve=15,
volume_type=volume_type)
volume_create_args = {
'containing-aggr-name': fake_client.VOLUME_AGGREGATE_NAME,
'size': '100g',
'volume': fake_client.VOLUME_NAME,
'space-reserve': 'volume',
'language-code': 'en-US',
'volume-type': volume_type,
'percentage-snapshot-reserve': '15',
}
if volume_type != 'dp':
volume_create_args['snapshot-policy'] = 'default'
volume_create_args['junction-path'] = ('/%s' %
fake_client.VOLUME_NAME)
self.client.connection.send_request.assert_called_with(
'volume-create', volume_create_args)
self.client.enable_flexvol_dedupe.assert_called_once_with(
fake_client.VOLUME_NAME)
self.client.enable_flexvol_compression.assert_called_once_with(
fake_client.VOLUME_NAME)
def test_create_volume_async(self):
self.mock_object(self.client.connection, 'send_request')
self.client.create_volume_async(
fake_client.VOLUME_NAME, [fake_client.VOLUME_AGGREGATE_NAME], 100,
volume_type='dp')
volume_create_args = {
'aggr-list': [{'aggr-name': fake_client.VOLUME_AGGREGATE_NAME}],
'size': 100 * units.Gi,
'volume-name': fake_client.VOLUME_NAME,
'volume-type': 'dp'
}
self.client.connection.send_request.assert_called_once_with(
'volume-create-async', volume_create_args)
@ddt.data('dp', 'rw', None)
def test_create_volume_async_with_extra_specs(self, volume_type):
self.mock_object(self.client.connection, 'send_request')
self.client.create_volume_async(
fake_client.VOLUME_NAME, [fake_client.VOLUME_AGGREGATE_NAME], 100,
space_guarantee_type='volume', language='en-US',
snapshot_policy='default', snapshot_reserve=15,
volume_type=volume_type)
volume_create_args = {
'aggr-list': [{'aggr-name': fake_client.VOLUME_AGGREGATE_NAME}],
'size': 100 * units.Gi,
'volume-name': fake_client.VOLUME_NAME,
'space-reserve': 'volume',
'language-code': 'en-US',
'volume-type': volume_type,
'percentage-snapshot-reserve': '15',
}
if volume_type != 'dp':
volume_create_args['snapshot-policy'] = 'default'
volume_create_args['junction-path'] = ('/%s' %
fake_client.VOLUME_NAME)
self.client.connection.send_request.assert_called_with(
'volume-create-async', volume_create_args)
def test_flexvol_exists(self):
api_response = netapp_api.NaElement(
fake_client.VOLUME_GET_NAME_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
result = self.client.flexvol_exists(fake_client.VOLUME_NAME)
volume_get_iter_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'name': fake_client.VOLUME_NAME
}
}
},
'desired-attributes': {
'volume-attributes': {
'volume-id-attributes': {
'name': None
}
}
}
}
self.client.send_iter_request.assert_has_calls([
mock.call('volume-get-iter', volume_get_iter_args)])
self.assertTrue(result)
def test_flexvol_exists_not_found(self):
api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client.connection,
'send_request',
return_value=api_response)
self.assertFalse(self.client.flexvol_exists(fake_client.VOLUME_NAME))
def test_rename_flexvol(self):
self.mock_object(self.client.connection, 'send_request')
self.client.rename_flexvol(fake_client.VOLUME_NAME, 'new_name')
volume_rename_api_args = {
'volume': fake_client.VOLUME_NAME,
'new-volume-name': 'new_name',
}
self.client.connection.send_request.assert_called_once_with(
'volume-rename', volume_rename_api_args)
def test_mount_flexvol_default_junction_path(self):
self.mock_object(self.client.connection, 'send_request')
self.client.mount_flexvol(fake_client.VOLUME_NAME)
volume_mount_args = {
'volume-name': fake_client.VOLUME_NAME,
'junction-path': '/%s' % fake_client.VOLUME_NAME,
}
self.client.connection.send_request.assert_has_calls([
mock.call('volume-mount', volume_mount_args)])
def test_mount_flexvol(self):
self.mock_object(self.client.connection, 'send_request')
fake_path = '/fake_path'
self.client.mount_flexvol(fake_client.VOLUME_NAME,
junction_path=fake_path)
volume_mount_args = {
'volume-name': fake_client.VOLUME_NAME,
'junction-path': fake_path,
}
self.client.connection.send_request.assert_has_calls([
mock.call('volume-mount', volume_mount_args)])
def test_enable_volume_dedupe_async(self):
self.mock_object(self.client.connection, 'send_request')
self.client.enable_volume_dedupe_async(fake_client.VOLUME_NAME)
sis_enable_args = {'volume-name': fake_client.VOLUME_NAME}
self.client.connection.send_request.assert_called_once_with(
'sis-enable-async', sis_enable_args)
def test_disable_volume_dedupe_async(self):
self.mock_object(self.client.connection, 'send_request')
self.client.disable_volume_dedupe_async(fake_client.VOLUME_NAME)
sis_enable_args = {'volume-name': fake_client.VOLUME_NAME}
self.client.connection.send_request.assert_called_once_with(
'sis-disable-async', sis_enable_args)
def test_enable_volume_compression_async(self):
self.mock_object(self.client.connection, 'send_request')
self.client.enable_volume_compression_async(fake_client.VOLUME_NAME)
sis_set_config_args = {
'volume-name': fake_client.VOLUME_NAME,
'enable-compression': 'true'
}
self.client.connection.send_request.assert_called_once_with(
'sis-set-config-async', sis_set_config_args)
def test_disable_volume_compression_async(self):
self.mock_object(self.client.connection, 'send_request')
self.client.disable_volume_compression_async(fake_client.VOLUME_NAME)
sis_set_config_args = {
'volume-name': fake_client.VOLUME_NAME,
'enable-compression': 'false'
}
self.client.connection.send_request.assert_called_once_with(
'sis-set-config-async', sis_set_config_args)
def test_enable_flexvol_dedupe(self):
self.mock_object(self.client.connection, 'send_request')
self.client.enable_flexvol_dedupe(fake_client.VOLUME_NAME)
sis_enable_args = {'path': '/vol/%s' % fake_client.VOLUME_NAME}
self.client.connection.send_request.assert_called_once_with(
'sis-enable', sis_enable_args)
def test_disable_flexvol_dedupe(self):
self.mock_object(self.client.connection, 'send_request')
self.client.disable_flexvol_dedupe(fake_client.VOLUME_NAME)
sis_disable_args = {'path': '/vol/%s' % fake_client.VOLUME_NAME}
self.client.connection.send_request.assert_called_once_with(
'sis-disable', sis_disable_args)
def test_enable_flexvol_compression(self):
self.mock_object(self.client.connection, 'send_request')
self.client.enable_flexvol_compression(fake_client.VOLUME_NAME)
sis_set_config_args = {
'path': '/vol/%s' % fake_client.VOLUME_NAME,
'enable-compression': 'true'
}
self.client.connection.send_request.assert_called_once_with(
'sis-set-config', sis_set_config_args)
def test_disable_flexvol_compression(self):
self.mock_object(self.client.connection, 'send_request')
self.client.disable_flexvol_compression(fake_client.VOLUME_NAME)
sis_set_config_args = {
'path': '/vol/%s' % fake_client.VOLUME_NAME,
'enable-compression': 'false'
}
self.client.connection.send_request.assert_called_once_with(
'sis-set-config', sis_set_config_args)
def test_get_flexvol_dedupe_info(self):
api_response = netapp_api.NaElement(
fake_client.SIS_GET_ITER_SSC_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
result = self.client.get_flexvol_dedupe_info(
fake_client.VOLUME_NAMES[0])
sis_get_iter_args = {
'query': {
'sis-status-info': {
'path': '/vol/%s' % fake_client.VOLUME_NAMES[0],
},
},
'desired-attributes': {
'sis-status-info': {
'state': None,
'is-compression-enabled': None,
'logical-data-size': None,
'logical-data-limit': None,
},
},
}
self.client.send_iter_request.assert_called_once_with(
'sis-get-iter', sis_get_iter_args)
self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC, result)
def test_get_flexvol_dedupe_info_no_logical_data_values(self):
api_response = netapp_api.NaElement(
fake_client.SIS_GET_ITER_SSC_NO_LOGICAL_DATA_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
result = self.client.get_flexvol_dedupe_info(
fake_client.VOLUME_NAMES[0])
self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA,
result)
def test_get_flexvol_dedupe_info_not_found(self):
api_response = netapp_api.NaElement(
fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
result = self.client.get_flexvol_dedupe_info(
fake_client.VOLUME_NAMES[0])
self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA,
result)
def test_get_flexvol_dedupe_info_api_error(self):
self.mock_object(self.client,
'send_iter_request',
side_effect=self._mock_api_error())
result = self.client.get_flexvol_dedupe_info(
fake_client.VOLUME_NAMES[0])
self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA,
result)
def test_get_flexvol_dedupe_info_api_insufficient_privileges(self):
api_error = netapp_api.NaApiError(code=netapp_api.EAPIPRIVILEGE)
self.mock_object(self.client,
'send_iter_request',
side_effect=api_error)
result = self.client.get_flexvol_dedupe_info(
fake_client.VOLUME_NAMES[0])
self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA,
result)
def test_get_flexvol_dedupe_used_percent(self):
self.client.features.add_feature('CLONE_SPLIT_STATUS')
mock_get_flexvol_dedupe_info = self.mock_object(
self.client, 'get_flexvol_dedupe_info',
return_value=fake_client.VOLUME_DEDUPE_INFO_SSC)
mock_get_clone_split_info = self.mock_object(
self.client, 'get_clone_split_info',
return_value=fake_client.VOLUME_CLONE_SPLIT_STATUS)
result = self.client.get_flexvol_dedupe_used_percent(
fake_client.VOLUME_NAMES[0])
self.assertEqual(75.0, result)
mock_get_flexvol_dedupe_info.assert_called_once_with(
fake_client.VOLUME_NAMES[0])
mock_get_clone_split_info.assert_called_once_with(
fake_client.VOLUME_NAMES[0])
def test_get_flexvol_dedupe_used_percent_not_supported(self):
self.client.features.add_feature('CLONE_SPLIT_STATUS', supported=False)
mock_get_flexvol_dedupe_info = self.mock_object(
self.client, 'get_flexvol_dedupe_info',
return_value=fake_client.VOLUME_DEDUPE_INFO_SSC)
mock_get_clone_split_info = self.mock_object(
self.client, 'get_clone_split_info',
return_value=fake_client.VOLUME_CLONE_SPLIT_STATUS)
result = self.client.get_flexvol_dedupe_used_percent(
fake_client.VOLUME_NAMES[0])
self.assertEqual(0.0, result)
self.assertFalse(mock_get_flexvol_dedupe_info.called)
self.assertFalse(mock_get_clone_split_info.called)
def test_get_clone_split_info(self):
api_response = netapp_api.NaElement(
fake_client.CLONE_SPLIT_STATUS_RESPONSE)
self.mock_object(self.client.connection,
'send_request',
return_value=api_response)
result = self.client.get_clone_split_info(fake_client.VOLUME_NAMES[0])
self.assertEqual(fake_client.VOLUME_CLONE_SPLIT_STATUS, result)
self.client.connection.send_request.assert_called_once_with(
'clone-split-status', {'volume-name': fake_client.VOLUME_NAMES[0]})
def test_get_clone_split_info_api_error(self):
self.mock_object(self.client.connection,
'send_request',
side_effect=self._mock_api_error())
result = self.client.get_clone_split_info(fake_client.VOLUME_NAMES[0])
expected = {'unsplit-size': 0, 'unsplit-clone-count': 0}
self.assertEqual(expected, result)
def test_get_clone_split_info_no_data(self):
api_response = netapp_api.NaElement(
fake_client.CLONE_SPLIT_STATUS_NO_DATA_RESPONSE)
self.mock_object(self.client.connection,
'send_request',
return_value=api_response)
result = self.client.get_clone_split_info(fake_client.VOLUME_NAMES[0])
expected = {'unsplit-size': 0, 'unsplit-clone-count': 0}
self.assertEqual(expected, result)
def test_is_flexvol_mirrored(self):
api_response = netapp_api.NaElement(
fake_client.SNAPMIRROR_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
result = self.client.is_flexvol_mirrored(
fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME)
snapmirror_get_iter_args = {
'query': {
'snapmirror-info': {
'source-vserver': fake_client.VOLUME_VSERVER_NAME,
'source-volume': fake_client.VOLUME_NAMES[0],
'mirror-state': 'snapmirrored',
'relationship-type': 'data_protection',
},
},
'desired-attributes': {
'snapmirror-info': None,
},
}
self.client.send_iter_request.assert_called_once_with(
'snapmirror-get-iter', snapmirror_get_iter_args)
self.assertTrue(result)
def test_is_flexvol_mirrored_not_mirrored(self):
api_response = netapp_api.NaElement(
fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client.connection,
'send_request',
return_value=api_response)
result = self.client.is_flexvol_mirrored(
fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME)
self.assertFalse(result)
def test_is_flexvol_mirrored_api_error(self):
self.mock_object(self.client.connection,
'send_request',
side_effect=self._mock_api_error())
result = self.client.is_flexvol_mirrored(
fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME)
self.assertFalse(result)
def test_is_flexvol_encrypted(self):
api_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_ENCRYPTION_SSC_RESPONSE)
self.client.features.add_feature('FLEXVOL_ENCRYPTION')
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
result = self.client.is_flexvol_encrypted(
fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME)
volume_get_iter_args = {
'query': {
'volume-attributes': {
'encrypt': 'true',
'volume-id-attributes': {
'name': fake_client.VOLUME_NAME,
'owning-vserver-name': fake_client.VOLUME_VSERVER_NAME,
}
}
},
'desired-attributes': {
'volume-attributes': {
'encrypt': None,
}
}
}
self.client.send_iter_request.assert_called_once_with(
'volume-get-iter', volume_get_iter_args)
self.assertTrue(result)
def test_is_flexvol_encrypted_unsupported_version(self):
self.client.features.add_feature('FLEXVOL_ENCRYPTION', supported=False)
result = self.client.is_flexvol_encrypted(
fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME)
self.assertFalse(result)
def test_is_flexvol_encrypted_no_records_found(self):
api_response = netapp_api.NaElement(
fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client.connection,
'send_request',
return_value=api_response)
result = self.client.is_flexvol_encrypted(
fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME)
self.assertFalse(result)
def test_is_flexvol_encrypted_api_error(self):
self.mock_object(self.client.connection,
'send_request',
side_effect=self._mock_api_error())
result = self.client.is_flexvol_encrypted(
fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME)
self.assertFalse(result)
def test_get_aggregates(self):
api_response = netapp_api.NaElement(
fake_client.AGGR_GET_ITER_RESPONSE)
self.mock_object(self.client.connection,
'send_request',
return_value=api_response)
result = self.client._get_aggregates()
self.client.connection.send_request.assert_has_calls([
mock.call('aggr-get-iter', {}, enable_tunneling=False)])
self.assertListEqual(
[aggr.to_string() for aggr in api_response.get_child_by_name(
'attributes-list').get_children()],
[aggr.to_string() for aggr in result])
def test_get_aggregates_with_filters(self):
api_response = netapp_api.NaElement(
fake_client.AGGR_GET_SPACE_RESPONSE)
self.mock_object(self.client.connection,
'send_request',
return_value=api_response)
desired_attributes = {
'aggr-attributes': {
'aggregate-name': None,
'aggr-space-attributes': {
'size-total': None,
'size-available': None,
}
}
}
result = self.client._get_aggregates(
aggregate_names=fake_client.VOLUME_AGGREGATE_NAMES,
desired_attributes=desired_attributes)
aggr_get_iter_args = {
'query': {
'aggr-attributes': {
'aggregate-name': '|'.join(
fake_client.VOLUME_AGGREGATE_NAMES),
}
},
'desired-attributes': desired_attributes
}
self.client.connection.send_request.assert_has_calls([
mock.call('aggr-get-iter', aggr_get_iter_args,
enable_tunneling=False)])
self.assertListEqual(
[aggr.to_string() for aggr in api_response.get_child_by_name(
'attributes-list').get_children()],
[aggr.to_string() for aggr in result])
def test_get_aggregates_not_found(self):
api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client.connection,
'send_request',
return_value=api_response)
result = self.client._get_aggregates()
self.client.connection.send_request.assert_has_calls([
mock.call('aggr-get-iter', {}, enable_tunneling=False)])
self.assertListEqual([], result)
def test_get_node_for_aggregate(self):
api_response = netapp_api.NaElement(
fake_client.AGGR_GET_NODE_RESPONSE).get_child_by_name(
'attributes-list').get_children()
self.mock_object(self.client,
'_get_aggregates',
return_value=api_response)
result = self.client.get_node_for_aggregate(
fake_client.VOLUME_AGGREGATE_NAME)
desired_attributes = {
'aggr-attributes': {
'aggregate-name': None,
'aggr-ownership-attributes': {
'home-name': None,
},
},
}
self.client._get_aggregates.assert_has_calls([
mock.call(
aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME],
desired_attributes=desired_attributes)])
self.assertEqual(fake_client.NODE_NAME, result)
def test_get_node_for_aggregate_none_requested(self):
result = self.client.get_node_for_aggregate(None)
self.assertIsNone(result)
def test_get_node_for_aggregate_api_not_found(self):
api_error = self._mock_api_error(netapp_api.EAPINOTFOUND)
self.mock_object(self.client.connection,
'send_request',
side_effect=api_error)
result = self.client.get_node_for_aggregate(
fake_client.VOLUME_AGGREGATE_NAME)
self.assertIsNone(result)
def test_get_node_for_aggregate_api_error(self):
self.mock_object(self.client.connection,
'send_request',
self._mock_api_error())
self.assertRaises(netapp_api.NaApiError,
self.client.get_node_for_aggregate,
fake_client.VOLUME_AGGREGATE_NAME)
def test_get_node_for_aggregate_not_found(self):
api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client.connection,
'send_request',
return_value=api_response)
result = self.client.get_node_for_aggregate(
fake_client.VOLUME_AGGREGATE_NAME)
self.assertIsNone(result)
def test_get_aggregate_none_specified(self):
result = self.client.get_aggregate('')
self.assertEqual({}, result)
def test_get_aggregate(self):
api_response = netapp_api.NaElement(
fake_client.AGGR_GET_ITER_SSC_RESPONSE).get_child_by_name(
'attributes-list').get_children()
self.mock_object(self.client,
'_get_aggregates',
return_value=api_response)
result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME)
desired_attributes = {
'aggr-attributes': {
'aggregate-name': None,
'aggr-raid-attributes': {
'raid-type': None,
'is-hybrid': None,
},
'aggr-ownership-attributes': {
'home-name': None,
},
},
}
self.client._get_aggregates.assert_has_calls([
mock.call(
aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME],
desired_attributes=desired_attributes)])
expected = {
'name': fake_client.VOLUME_AGGREGATE_NAME,
'raid-type': 'raid_dp',
'is-hybrid': True,
'node-name': fake_client.NODE_NAME,
}
self.assertEqual(expected, result)
def test_get_aggregate_not_found(self):
api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client.connection,
'send_request',
return_value=api_response)
result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME)
self.assertEqual({}, result)
def test_get_aggregate_api_error(self):
self.mock_object(self.client.connection,
'send_request',
side_effect=self._mock_api_error())
result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME)
self.assertEqual({}, result)
def test_get_aggregate_api_not_found(self):
api_error = netapp_api.NaApiError(code=netapp_api.EAPINOTFOUND)
self.mock_object(self.client.connection,
'send_iter_request',
side_effect=api_error)
result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME)
self.assertEqual({}, result)
@ddt.data({'types': {'FCAL'}, 'expected': ['FCAL']},
{'types': {'SATA', 'SSD'}, 'expected': ['SATA', 'SSD']},)
@ddt.unpack
def test_get_aggregate_disk_types(self, types, expected):
mock_get_aggregate_disk_types = self.mock_object(
self.client, '_get_aggregate_disk_types', return_value=types)
result = self.client.get_aggregate_disk_types(
fake_client.VOLUME_AGGREGATE_NAME)
self.assertCountEqual(expected, result)
mock_get_aggregate_disk_types.assert_called_once_with(
fake_client.VOLUME_AGGREGATE_NAME)
def test_get_aggregate_disk_types_not_found(self):
mock_get_aggregate_disk_types = self.mock_object(
self.client, '_get_aggregate_disk_types', return_value=set())
result = self.client.get_aggregate_disk_types(
fake_client.VOLUME_AGGREGATE_NAME)
self.assertIsNone(result)
mock_get_aggregate_disk_types.assert_called_once_with(
fake_client.VOLUME_AGGREGATE_NAME)
def test_get_aggregate_disk_types_api_not_found(self):
api_error = netapp_api.NaApiError(code=netapp_api.EAPINOTFOUND)
self.mock_object(self.client,
'send_iter_request',
side_effect=api_error)
result = self.client.get_aggregate_disk_types(
fake_client.VOLUME_AGGREGATE_NAME)
self.assertIsNone(result)
def test_get_aggregate_disk_types_shared(self):
self.client.features.add_feature('ADVANCED_DISK_PARTITIONING')
mock_get_aggregate_disk_types = self.mock_object(
self.client, '_get_aggregate_disk_types',
side_effect=[set(['SSD']), set(['SATA'])])
result = self.client.get_aggregate_disk_types(
fake_client.VOLUME_AGGREGATE_NAME)
self.assertIsInstance(result, list)
self.assertCountEqual(['SATA', 'SSD'], result)
mock_get_aggregate_disk_types.assert_has_calls([
mock.call(fake_client.VOLUME_AGGREGATE_NAME),
mock.call(fake_client.VOLUME_AGGREGATE_NAME, shared=True),
])
def test__get_aggregate_disk_types(self):
api_response = netapp_api.NaElement(
fake_client.STORAGE_DISK_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
result = self.client._get_aggregate_disk_types(
fake_client.VOLUME_AGGREGATE_NAME)
storage_disk_get_iter_args = {
'query': {
'storage-disk-info': {
'disk-raid-info': {
'disk-aggregate-info': {
'aggregate-name':
fake_client.VOLUME_AGGREGATE_NAME,
},
},
},
},
'desired-attributes': {
'storage-disk-info': {
'disk-raid-info': {
'effective-disk-type': None,
},
},
},
}
self.client.send_iter_request.assert_called_once_with(
'storage-disk-get-iter', storage_disk_get_iter_args,
enable_tunneling=False)
expected = set(fake_client.AGGREGATE_DISK_TYPES)
self.assertEqual(expected, result)
def test__get_aggregate_disk_types_shared(self):
api_response = netapp_api.NaElement(
fake_client.STORAGE_DISK_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
result = self.client._get_aggregate_disk_types(
fake_client.VOLUME_AGGREGATE_NAME, shared=True)
storage_disk_get_iter_args = {
'query': {
'storage-disk-info': {
'disk-raid-info': {
'disk-shared-info': {
'aggregate-list': {
'shared-aggregate-info': {
'aggregate-name':
fake_client.VOLUME_AGGREGATE_NAME,
},
},
},
},
},
},
'desired-attributes': {
'storage-disk-info': {
'disk-raid-info': {
'effective-disk-type': None,
},
},
},
}
self.client.send_iter_request.assert_called_once_with(
'storage-disk-get-iter', storage_disk_get_iter_args,
enable_tunneling=False)
expected = set(fake_client.AGGREGATE_DISK_TYPES)
self.assertEqual(expected, result)
def test__get_aggregate_disk_types_not_found(self):
api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
result = self.client._get_aggregate_disk_types(
fake_client.VOLUME_AGGREGATE_NAME)
self.assertEqual(set(), result)
def test__get_aggregate_disk_types_api_error(self):
self.mock_object(self.client,
'send_iter_request',
side_effect=self._mock_api_error())
result = self.client._get_aggregate_disk_types(
fake_client.VOLUME_AGGREGATE_NAME)
self.assertEqual(set([]), result)
def test_get_aggregate_capacities(self):
aggr1_capacities = {
'percent-used': 50,
'size-available': 100.0,
'size-total': 200.0,
}
aggr2_capacities = {
'percent-used': 75,
'size-available': 125.0,
'size-total': 500.0,
}
mock_get_aggregate_capacity = self.mock_object(
self.client, 'get_aggregate_capacity',
side_effect=[aggr1_capacities, aggr2_capacities])
result = self.client.get_aggregate_capacities(['aggr1', 'aggr2'])
expected = {
'aggr1': aggr1_capacities,
'aggr2': aggr2_capacities,
}
self.assertEqual(expected, result)
mock_get_aggregate_capacity.assert_has_calls([
mock.call('aggr1'),
mock.call('aggr2'),
])
def test_get_aggregate_capacities_not_found(self):
mock_get_aggregate_capacity = self.mock_object(
self.client, 'get_aggregate_capacity', side_effect=[{}, {}])
result = self.client.get_aggregate_capacities(['aggr1', 'aggr2'])
expected = {
'aggr1': {},
'aggr2': {},
}
self.assertEqual(expected, result)
mock_get_aggregate_capacity.assert_has_calls([
mock.call('aggr1'),
mock.call('aggr2'),
])
def test_get_aggregate_capacities_not_list(self):
result = self.client.get_aggregate_capacities('aggr1')
self.assertEqual({}, result)
def test_get_aggregate_capacity(self):
api_response = netapp_api.NaElement(
fake_client.AGGR_GET_ITER_CAPACITY_RESPONSE).get_child_by_name(
'attributes-list').get_children()
self.mock_object(self.client,
'_get_aggregates',
return_value=api_response)
result = self.client.get_aggregate_capacity(
fake_client.VOLUME_AGGREGATE_NAME)
desired_attributes = {
'aggr-attributes': {
'aggr-space-attributes': {
'percent-used-capacity': None,
'size-available': None,
'size-total': None,
},
},
}
self.client._get_aggregates.assert_has_calls([
mock.call(
aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME],
desired_attributes=desired_attributes)])
expected = {
'percent-used': float(fake_client.AGGR_USED_PERCENT),
'size-available': float(fake_client.AGGR_SIZE_AVAILABLE),
'size-total': float(fake_client.AGGR_SIZE_TOTAL),
}
self.assertEqual(expected, result)
def test_get_aggregate_capacity_not_found(self):
api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client.connection,
'send_request',
return_value=api_response)
result = self.client.get_aggregate_capacity(
fake_client.VOLUME_AGGREGATE_NAME)
self.assertEqual({}, result)
def test_get_aggregate_capacity_api_error(self):
self.mock_object(self.client.connection,
'send_request',
side_effect=self._mock_api_error())
result = self.client.get_aggregate_capacity(
fake_client.VOLUME_AGGREGATE_NAME)
self.assertEqual({}, result)
def test_get_aggregate_capacity_api_not_found(self):
api_error = netapp_api.NaApiError(code=netapp_api.EAPINOTFOUND)
self.mock_object(
self.client.connection, 'send_request', side_effect=api_error)
result = self.client.get_aggregate_capacity(
fake_client.VOLUME_AGGREGATE_NAME)
self.assertEqual({}, result)
def test_get_performance_instance_uuids(self):
self.mock_send_request.return_value = netapp_api.NaElement(
fake_client.PERF_OBJECT_INSTANCE_LIST_INFO_ITER_RESPONSE)
result = self.client.get_performance_instance_uuids(
'system', fake_client.NODE_NAME)
expected = [fake_client.NODE_NAME + ':kernel:system']
self.assertEqual(expected, result)
perf_object_instance_list_info_iter_args = {
'objectname': 'system',
'query': {
'instance-info': {
'uuid': fake_client.NODE_NAME + ':*',
}
}
}
self.mock_send_request.assert_called_once_with(
'perf-object-instance-list-info-iter',
perf_object_instance_list_info_iter_args, enable_tunneling=False)
def test_get_performance_counters(self):
self.mock_send_request.return_value = netapp_api.NaElement(
fake_client.PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_CMODE)
instance_uuids = [
fake_client.NODE_NAMES[0] + ':kernel:system',
fake_client.NODE_NAMES[1] + ':kernel:system',
]
counter_names = ['avg_processor_busy']
result = self.client.get_performance_counters('system',
instance_uuids,
counter_names)
expected = [
{
'avg_processor_busy': '5674745133134',
'instance-name': 'system',
'instance-uuid': instance_uuids[0],
'node-name': fake_client.NODE_NAMES[0],
'timestamp': '1453412013',
}, {
'avg_processor_busy': '4077649009234',
'instance-name': 'system',
'instance-uuid': instance_uuids[1],
'node-name': fake_client.NODE_NAMES[1],
'timestamp': '1453412013'
},
]
self.assertEqual(expected, result)
perf_object_get_instances_args = {
'objectname': 'system',
'instance-uuids': [
{'instance-uuid': instance_uuid}
for instance_uuid in instance_uuids
],
'counters': [
{'counter': counter} for counter in counter_names
],
}
self.mock_send_request.assert_called_once_with(
'perf-object-get-instances', perf_object_get_instances_args,
enable_tunneling=False)
def test_check_iscsi_initiator_exists_when_no_initiator_exists(self):
self.connection.invoke_successfully = mock.Mock(
side_effect=netapp_api.NaApiError)
initiator = fake_client.INITIATOR_IQN
initiator_exists = self.client.check_iscsi_initiator_exists(initiator)
self.assertFalse(initiator_exists)
def test_check_iscsi_initiator_exists_when_initiator_exists(self):
self.connection.invoke_successfully = mock.Mock()
initiator = fake_client.INITIATOR_IQN
initiator_exists = self.client.check_iscsi_initiator_exists(initiator)
self.assertTrue(initiator_exists)
def test_set_iscsi_chap_authentication_no_previous_initiator(self):
self.connection.invoke_successfully = mock.Mock()
self.mock_object(self.client, 'check_iscsi_initiator_exists',
return_value=False)
ssh = mock.Mock(paramiko.SSHClient)
sshpool = mock.Mock(ssh_utils.SSHPool)
self.client.ssh_client.ssh_pool = sshpool
self.mock_object(self.client.ssh_client, 'execute_command_with_prompt')
sshpool.item().__enter__ = mock.Mock(return_value=ssh)
sshpool.item().__exit__ = mock.Mock(return_value=False)
self.client.set_iscsi_chap_authentication(fake_client.INITIATOR_IQN,
fake_client.USER_NAME,
fake_client.PASSWORD)
command = ('iscsi security create -vserver fake_vserver '
'-initiator-name iqn.2015-06.com.netapp:fake_iqn '
'-auth-type CHAP -user-name fake_user')
self.client.ssh_client.execute_command_with_prompt.assert_has_calls(
[mock.call(ssh, command, 'Password:', fake_client.PASSWORD)]
)
def test_set_iscsi_chap_authentication_with_preexisting_initiator(self):
self.connection.invoke_successfully = mock.Mock()
self.mock_object(self.client, 'check_iscsi_initiator_exists',
return_value=True)
ssh = mock.Mock(paramiko.SSHClient)
sshpool = mock.Mock(ssh_utils.SSHPool)
self.client.ssh_client.ssh_pool = sshpool
self.mock_object(self.client.ssh_client, 'execute_command_with_prompt')
sshpool.item().__enter__ = mock.Mock(return_value=ssh)
sshpool.item().__exit__ = mock.Mock(return_value=False)
self.client.set_iscsi_chap_authentication(fake_client.INITIATOR_IQN,
fake_client.USER_NAME,
fake_client.PASSWORD)
command = ('iscsi security modify -vserver fake_vserver '
'-initiator-name iqn.2015-06.com.netapp:fake_iqn '
'-auth-type CHAP -user-name fake_user')
self.client.ssh_client.execute_command_with_prompt.assert_has_calls(
[mock.call(ssh, command, 'Password:', fake_client.PASSWORD)]
)
def test_set_iscsi_chap_authentication_with_ssh_exception(self):
self.connection.invoke_successfully = mock.Mock()
self.mock_object(self.client, 'check_iscsi_initiator_exists',
return_value=True)
ssh = mock.Mock(paramiko.SSHClient)
sshpool = mock.Mock(ssh_utils.SSHPool)
self.client.ssh_client.ssh_pool = sshpool
sshpool.item().__enter__ = mock.Mock(return_value=ssh)
sshpool.item().__enter__.side_effect = paramiko.SSHException(
'Connection Failure')
sshpool.item().__exit__ = mock.Mock(return_value=False)
self.assertRaises(exception.VolumeBackendAPIException,
self.client.set_iscsi_chap_authentication,
fake_client.INITIATOR_IQN,
fake_client.USER_NAME,
fake_client.PASSWORD)
def test_get_snapshot_if_snapshot_present_not_busy(self):
expected_vol_name = fake.SNAPSHOT['volume_id']
expected_snapshot_name = fake.SNAPSHOT['name']
response = netapp_api.NaElement(
fake_client.SNAPSHOT_INFO_FOR_PRESENT_NOT_BUSY_SNAPSHOT_CMODE)
self.mock_send_request.return_value = response
snapshot = self.client.get_snapshot(expected_vol_name,
expected_snapshot_name)
self.assertEqual(expected_vol_name, snapshot['volume'])
self.assertEqual(expected_snapshot_name, snapshot['name'])
self.assertEqual(set([]), snapshot['owners'])
self.assertFalse(snapshot['busy'])
def test_get_snapshot_if_snapshot_present_busy(self):
expected_vol_name = fake.SNAPSHOT['volume_id']
expected_snapshot_name = fake.SNAPSHOT['name']
response = netapp_api.NaElement(
fake_client.SNAPSHOT_INFO_FOR_PRESENT_BUSY_SNAPSHOT_CMODE)
self.mock_send_request.return_value = response
snapshot = self.client.get_snapshot(expected_vol_name,
expected_snapshot_name)
self.assertEqual(expected_vol_name, snapshot['volume'])
self.assertEqual(expected_snapshot_name, snapshot['name'])
self.assertEqual(set([]), snapshot['owners'])
self.assertTrue(snapshot['busy'])
def test_get_snapshot_if_snapshot_not_present(self):
expected_vol_name = fake.SNAPSHOT['volume_id']
expected_snapshot_name = fake.SNAPSHOT['name']
response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)
self.mock_send_request.return_value = response
self.assertRaises(exception.SnapshotNotFound, self.client.get_snapshot,
expected_vol_name, expected_snapshot_name)
def test_create_cluster_peer(self):
self.mock_object(self.client.connection, 'send_request')
self.client.create_cluster_peer(['fake_address_1', 'fake_address_2'],
'fake_user', 'fake_password',
'fake_passphrase')
cluster_peer_create_args = {
'peer-addresses': [
{'remote-inet-address': 'fake_address_1'},
{'remote-inet-address': 'fake_address_2'},
],
'user-name': 'fake_user',
'password': 'fake_password',
'passphrase': 'fake_passphrase',
}
self.client.connection.send_request.assert_has_calls([
mock.call('cluster-peer-create', cluster_peer_create_args)])
def test_get_cluster_peers(self):
api_response = netapp_api.NaElement(
fake_client.CLUSTER_PEER_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
result = self.client.get_cluster_peers()
cluster_peer_get_iter_args = {}
self.client.send_iter_request.assert_has_calls([
mock.call('cluster-peer-get-iter', cluster_peer_get_iter_args)])
expected = [{
'active-addresses': [
fake_client.CLUSTER_ADDRESS_1,
fake_client.CLUSTER_ADDRESS_2
],
'availability': 'available',
'cluster-name': fake_client.CLUSTER_NAME,
'cluster-uuid': 'fake_uuid',
'peer-addresses': [fake_client.CLUSTER_ADDRESS_1],
'remote-cluster-name': fake_client.REMOTE_CLUSTER_NAME,
'serial-number': 'fake_serial_number',
'timeout': '60',
}]
self.assertEqual(expected, result)
def test_get_cluster_peers_single(self):
api_response = netapp_api.NaElement(
fake_client.CLUSTER_PEER_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
self.client.get_cluster_peers(
remote_cluster_name=fake_client.CLUSTER_NAME)
cluster_peer_get_iter_args = {
'query': {
'cluster-peer-info': {
'remote-cluster-name': fake_client.CLUSTER_NAME,
}
},
}
self.client.send_iter_request.assert_has_calls([
mock.call('cluster-peer-get-iter', cluster_peer_get_iter_args)])
def test_get_cluster_peers_not_found(self):
api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
result = self.client.get_cluster_peers(
remote_cluster_name=fake_client.CLUSTER_NAME)
self.assertEqual([], result)
self.assertTrue(self.client.send_iter_request.called)
def test_delete_cluster_peer(self):
self.mock_object(self.client.connection, 'send_request')
self.client.delete_cluster_peer(fake_client.CLUSTER_NAME)
cluster_peer_delete_args = {'cluster-name': fake_client.CLUSTER_NAME}
self.client.connection.send_request.assert_has_calls([
mock.call('cluster-peer-delete', cluster_peer_delete_args)])
def test_get_cluster_peer_policy(self):
self.client.features.add_feature('CLUSTER_PEER_POLICY')
api_response = netapp_api.NaElement(
fake_client.CLUSTER_PEER_POLICY_GET_RESPONSE)
self.mock_object(self.client.connection,
'send_request',
return_value=api_response)
result = self.client.get_cluster_peer_policy()
expected = {
'is-unauthenticated-access-permitted': False,
'passphrase-minimum-length': 8,
}
self.assertEqual(expected, result)
self.assertTrue(self.client.connection.send_request.called)
def test_get_cluster_peer_policy_not_supported(self):
result = self.client.get_cluster_peer_policy()
self.assertEqual({}, result)
def test_set_cluster_peer_policy_not_supported(self):
self.mock_object(self.client.connection, 'send_request')
self.client.set_cluster_peer_policy()
self.assertFalse(self.client.connection.send_request.called)
def test_set_cluster_peer_policy_no_arguments(self):
self.client.features.add_feature('CLUSTER_PEER_POLICY')
self.mock_object(self.client.connection, 'send_request')
self.client.set_cluster_peer_policy()
self.assertFalse(self.client.connection.send_request.called)
def test_set_cluster_peer_policy(self):
self.client.features.add_feature('CLUSTER_PEER_POLICY')
self.mock_object(self.client.connection, 'send_request')
self.client.set_cluster_peer_policy(
is_unauthenticated_access_permitted=True,
passphrase_minimum_length=12)
cluster_peer_policy_modify_args = {
'is-unauthenticated-access-permitted': 'true',
'passphrase-minlength': '12',
}
self.client.connection.send_request.assert_has_calls([
mock.call('cluster-peer-policy-modify',
cluster_peer_policy_modify_args)])
def test_create_vserver_peer(self):
self.mock_object(self.client.connection, 'send_request')
self.client.create_vserver_peer('fake_vserver', 'fake_vserver_peer')
vserver_peer_create_args = {
'vserver': 'fake_vserver',
'peer-vserver': 'fake_vserver_peer',
'applications': [
{'vserver-peer-application': 'snapmirror'},
],
}
self.client.connection.send_request.assert_has_calls([
mock.call('vserver-peer-create', vserver_peer_create_args,
enable_tunneling=False)])
def test_delete_vserver_peer(self):
self.mock_object(self.client.connection, 'send_request')
self.client.delete_vserver_peer('fake_vserver', 'fake_vserver_peer')
vserver_peer_delete_args = {
'vserver': 'fake_vserver',
'peer-vserver': 'fake_vserver_peer',
}
self.client.connection.send_request.assert_has_calls([
mock.call('vserver-peer-delete', vserver_peer_delete_args)])
def test_accept_vserver_peer(self):
self.mock_object(self.client.connection, 'send_request')
self.client.accept_vserver_peer('fake_vserver', 'fake_vserver_peer')
vserver_peer_accept_args = {
'vserver': 'fake_vserver',
'peer-vserver': 'fake_vserver_peer',
}
self.client.connection.send_request.assert_has_calls([
mock.call('vserver-peer-accept', vserver_peer_accept_args)])
def test_get_file_sizes_by_dir(self):
api_response = netapp_api.NaElement(
fake_client.FILE_SIZES_BY_DIR_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
result = self.client.get_file_sizes_by_dir(fake.NETAPP_VOLUME)
get_get_file_sizes_by_dir_get_iter_args = {
'path': '/vol/%s' % fake.NETAPP_VOLUME,
'query': {
'file-info': {
'file-type': 'file',
}
},
'desired-attributes': {
'file-info': {
'name': None,
'file-size': None
}
},
}
self.client.send_iter_request.assert_has_calls([
mock.call('file-list-directory-iter',
get_get_file_sizes_by_dir_get_iter_args,
max_page_length=100)])
expected = [{
'name': fake.VOLUME_NAME,
'file-size': float(1024)
}]
self.assertEqual(expected, result)
def test_get_file_sizes_by_dir_not_found(self):
api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
result = self.client.get_file_sizes_by_dir(fake.NETAPP_VOLUME)
self.assertEqual([], result)
self.assertTrue(self.client.send_iter_request.called)
def test_get_lun_sizes_by_volume(self):
api_response = netapp_api.NaElement(
fake_client.LUN_SIZES_BY_VOLUME_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
result = self.client.get_lun_sizes_by_volume(fake.NETAPP_VOLUME)
get_lun_sizes_by_volume_get_iter_args = {
'query': {
'lun-info': {
'volume': fake.NETAPP_VOLUME,
}
},
'desired-attributes': {
'lun-info': {
'path': None,
'size': None
}
},
}
self.client.send_iter_request.assert_has_calls([
mock.call('lun-get-iter', get_lun_sizes_by_volume_get_iter_args,
max_page_length=100)])
expected = [{
'path': fake.VOLUME_PATH,
'size': float(1024)
}]
self.assertEqual(expected, result)
def test_get_lun_sizes_by_volume_not_found(self):
api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
result = self.client.get_lun_sizes_by_volume(fake.NETAPP_VOLUME)
self.assertEqual([], result)
self.assertTrue(self.client.send_iter_request.called)
def test_get_vserver_peers(self):
api_response = netapp_api.NaElement(
fake_client.VSERVER_PEER_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
result = self.client.get_vserver_peers(
vserver_name=fake_client.VSERVER_NAME,
peer_vserver_name=fake_client.VSERVER_NAME_2)
vserver_peer_get_iter_args = {
'query': {
'vserver-peer-info': {
'vserver': fake_client.VSERVER_NAME,
'peer-vserver': fake_client.VSERVER_NAME_2,
}
}
}
self.client.send_iter_request.assert_has_calls([
mock.call('vserver-peer-get-iter', vserver_peer_get_iter_args,
enable_tunneling=False)])
expected = [{
'vserver': 'fake_vserver',
'peer-vserver': 'fake_vserver_2',
'peer-state': 'peered',
'peer-cluster': 'fake_cluster',
'applications': ['snapmirror'],
}]
self.assertEqual(expected, result)
def test_get_vserver_peers_not_found(self):
api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
result = self.client.get_vserver_peers(
vserver_name=fake_client.VSERVER_NAME,
peer_vserver_name=fake_client.VSERVER_NAME_2)
self.assertEqual([], result)
self.assertTrue(self.client.send_iter_request.called)
def test_ensure_snapmirror_v2(self):
self.assertIsNone(self.client._ensure_snapmirror_v2())
def test_ensure_snapmirror_v2_not_supported(self):
self.client.features.add_feature('SNAPMIRROR_V2', supported=False)
self.assertRaises(netapp_utils.NetAppDriverException,
self.client._ensure_snapmirror_v2)
@ddt.data({'schedule': 'fake_schedule', 'policy': 'fake_policy'},
{'schedule': None, 'policy': None})
@ddt.unpack
def test_create_snapmirror(self, schedule, policy):
self.mock_object(self.client.connection, 'send_request')
self.client.create_snapmirror(
fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME,
schedule=schedule, policy=policy)
snapmirror_create_args = {
'source-vserver': fake_client.SM_SOURCE_VSERVER,
'source-volume': fake_client.SM_SOURCE_VOLUME,
'destination-vserver': fake_client.SM_DEST_VSERVER,
'destination-volume': fake_client.SM_DEST_VOLUME,
'relationship-type': 'data_protection',
}
if schedule:
snapmirror_create_args['schedule'] = schedule
if policy:
snapmirror_create_args['policy'] = policy
self.client.connection.send_request.assert_has_calls([
mock.call('snapmirror-create', snapmirror_create_args)])
def test_create_snapmirror_already_exists(self):
api_error = netapp_api.NaApiError(code=netapp_api.ERELATION_EXISTS)
self.mock_object(
self.client.connection, 'send_request', side_effect=api_error)
self.client.create_snapmirror(
fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
snapmirror_create_args = {
'source-vserver': fake_client.SM_SOURCE_VSERVER,
'source-volume': fake_client.SM_SOURCE_VOLUME,
'destination-vserver': fake_client.SM_DEST_VSERVER,
'destination-volume': fake_client.SM_DEST_VOLUME,
'relationship-type': 'data_protection',
}
self.client.connection.send_request.assert_has_calls([
mock.call('snapmirror-create', snapmirror_create_args)])
def test_create_snapmirror_error(self):
api_error = netapp_api.NaApiError(code=0)
self.mock_object(
self.client.connection, 'send_request', side_effect=api_error)
self.assertRaises(netapp_api.NaApiError,
self.client.create_snapmirror,
fake_client.SM_SOURCE_VSERVER,
fake_client.SM_SOURCE_VOLUME,
fake_client.SM_DEST_VSERVER,
fake_client.SM_DEST_VOLUME)
self.assertTrue(self.client.connection.send_request.called)
@ddt.data(
{
'source_snapshot': 'fake_snapshot',
'transfer_priority': 'fake_priority'
},
{
'source_snapshot': None,
'transfer_priority': None
}
)
@ddt.unpack
def test_initialize_snapmirror(self, source_snapshot, transfer_priority):
api_response = netapp_api.NaElement(
fake_client.SNAPMIRROR_INITIALIZE_RESULT)
self.mock_object(self.client.connection,
'send_request',
return_value=api_response)
result = self.client.initialize_snapmirror(
fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME,
source_snapshot=source_snapshot,
transfer_priority=transfer_priority)
snapmirror_initialize_args = {
'source-vserver': fake_client.SM_SOURCE_VSERVER,
'source-volume': fake_client.SM_SOURCE_VOLUME,
'destination-vserver': fake_client.SM_DEST_VSERVER,
'destination-volume': fake_client.SM_DEST_VOLUME,
}
if source_snapshot:
snapmirror_initialize_args['source-snapshot'] = source_snapshot
if transfer_priority:
snapmirror_initialize_args['transfer-priority'] = transfer_priority
self.client.connection.send_request.assert_has_calls([
mock.call('snapmirror-initialize', snapmirror_initialize_args)])
expected = {
'operation-id': None,
'status': 'succeeded',
'jobid': None,
'error-code': None,
'error-message': None
}
self.assertEqual(expected, result)
@ddt.data(True, False)
def test_release_snapmirror(self, relationship_info_only):
self.mock_object(self.client.connection, 'send_request')
self.client.release_snapmirror(
fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME,
relationship_info_only=relationship_info_only)
snapmirror_release_args = {
'query': {
'snapmirror-destination-info': {
'source-vserver': fake_client.SM_SOURCE_VSERVER,
'source-volume': fake_client.SM_SOURCE_VOLUME,
'destination-vserver': fake_client.SM_DEST_VSERVER,
'destination-volume': fake_client.SM_DEST_VOLUME,
'relationship-info-only': ('true' if relationship_info_only
else 'false'),
}
}
}
self.client.connection.send_request.assert_has_calls([
mock.call('snapmirror-release-iter', snapmirror_release_args)])
def test_quiesce_snapmirror(self):
self.mock_object(self.client.connection, 'send_request')
self.client.quiesce_snapmirror(
fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
snapmirror_quiesce_args = {
'source-vserver': fake_client.SM_SOURCE_VSERVER,
'source-volume': fake_client.SM_SOURCE_VOLUME,
'destination-vserver': fake_client.SM_DEST_VSERVER,
'destination-volume': fake_client.SM_DEST_VOLUME,
}
self.client.connection.send_request.assert_has_calls([
mock.call('snapmirror-quiesce', snapmirror_quiesce_args)])
@ddt.data(True, False)
def test_abort_snapmirror(self, clear_checkpoint):
self.mock_object(self.client.connection, 'send_request')
self.client.abort_snapmirror(
fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME,
clear_checkpoint=clear_checkpoint)
snapmirror_abort_args = {
'source-vserver': fake_client.SM_SOURCE_VSERVER,
'source-volume': fake_client.SM_SOURCE_VOLUME,
'destination-vserver': fake_client.SM_DEST_VSERVER,
'destination-volume': fake_client.SM_DEST_VOLUME,
'clear-checkpoint': 'true' if clear_checkpoint else 'false',
}
self.client.connection.send_request.assert_has_calls([
mock.call('snapmirror-abort', snapmirror_abort_args)])
def test_abort_snapmirror_no_transfer_in_progress(self):
api_error = netapp_api.NaApiError(
code=netapp_api.ENOTRANSFER_IN_PROGRESS)
self.mock_object(
self.client.connection, 'send_request', side_effect=api_error)
self.client.abort_snapmirror(
fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
snapmirror_abort_args = {
'source-vserver': fake_client.SM_SOURCE_VSERVER,
'source-volume': fake_client.SM_SOURCE_VOLUME,
'destination-vserver': fake_client.SM_DEST_VSERVER,
'destination-volume': fake_client.SM_DEST_VOLUME,
'clear-checkpoint': 'false',
}
self.client.connection.send_request.assert_has_calls([
mock.call('snapmirror-abort', snapmirror_abort_args)])
def test_abort_snapmirror_error(self):
api_error = netapp_api.NaApiError(code=0)
self.mock_object(
self.client.connection, 'send_request', side_effect=api_error)
self.assertRaises(netapp_api.NaApiError,
self.client.abort_snapmirror,
fake_client.SM_SOURCE_VSERVER,
fake_client.SM_SOURCE_VOLUME,
fake_client.SM_DEST_VSERVER,
fake_client.SM_DEST_VOLUME)
def test_break_snapmirror(self):
self.mock_object(self.client.connection, 'send_request')
self.client.break_snapmirror(
fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
snapmirror_break_args = {
'source-vserver': fake_client.SM_SOURCE_VSERVER,
'source-volume': fake_client.SM_SOURCE_VOLUME,
'destination-vserver': fake_client.SM_DEST_VSERVER,
'destination-volume': fake_client.SM_DEST_VOLUME,
}
self.client.connection.send_request.assert_has_calls([
mock.call('snapmirror-break', snapmirror_break_args)])
@ddt.data(
{
'schedule': 'fake_schedule',
'policy': 'fake_policy',
'tries': 5,
'max_transfer_rate': 1024,
},
{
'schedule': None,
'policy': None,
'tries': None,
'max_transfer_rate': None,
}
)
@ddt.unpack
def test_modify_snapmirror(self, schedule, policy, tries,
max_transfer_rate):
self.mock_object(self.client.connection, 'send_request')
self.client.modify_snapmirror(
fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME,
schedule=schedule, policy=policy, tries=tries,
max_transfer_rate=max_transfer_rate)
snapmirror_modify_args = {
'source-vserver': fake_client.SM_SOURCE_VSERVER,
'source-volume': fake_client.SM_SOURCE_VOLUME,
'destination-vserver': fake_client.SM_DEST_VSERVER,
'destination-volume': fake_client.SM_DEST_VOLUME,
}
if schedule:
snapmirror_modify_args['schedule'] = schedule
if policy:
snapmirror_modify_args['policy'] = policy
if tries:
snapmirror_modify_args['tries'] = tries
if max_transfer_rate:
snapmirror_modify_args['max-transfer-rate'] = max_transfer_rate
self.client.connection.send_request.assert_has_calls([
mock.call('snapmirror-modify', snapmirror_modify_args)])
def test_delete_snapmirror(self):
self.mock_object(self.client.connection, 'send_request')
self.client.delete_snapmirror(
fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
snapmirror_delete_args = {
'query': {
'snapmirror-info': {
'source-vserver': fake_client.SM_SOURCE_VSERVER,
'source-volume': fake_client.SM_SOURCE_VOLUME,
'destination-vserver': fake_client.SM_DEST_VSERVER,
'destination-volume': fake_client.SM_DEST_VOLUME,
}
}
}
self.client.connection.send_request.assert_has_calls([
mock.call('snapmirror-destroy-iter', snapmirror_delete_args)])
def test_update_snapmirror(self):
self.mock_object(self.client.connection, 'send_request')
self.client.update_snapmirror(
fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
snapmirror_update_args = {
'source-vserver': fake_client.SM_SOURCE_VSERVER,
'source-volume': fake_client.SM_SOURCE_VOLUME,
'destination-vserver': fake_client.SM_DEST_VSERVER,
'destination-volume': fake_client.SM_DEST_VOLUME,
}
self.client.connection.send_request.assert_has_calls([
mock.call('snapmirror-update', snapmirror_update_args)])
def test_update_snapmirror_already_transferring(self):
api_error = netapp_api.NaApiError(
code=netapp_api.ETRANSFER_IN_PROGRESS)
self.mock_object(
self.client.connection, 'send_request', side_effect=api_error)
self.client.update_snapmirror(
fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
snapmirror_update_args = {
'source-vserver': fake_client.SM_SOURCE_VSERVER,
'source-volume': fake_client.SM_SOURCE_VOLUME,
'destination-vserver': fake_client.SM_DEST_VSERVER,
'destination-volume': fake_client.SM_DEST_VOLUME,
}
self.client.connection.send_request.assert_has_calls([
mock.call('snapmirror-update', snapmirror_update_args)])
def test_update_snapmirror_already_transferring_two(self):
api_error = netapp_api.NaApiError(code=netapp_api.EANOTHER_OP_ACTIVE)
self.mock_object(
self.client.connection, 'send_request', side_effect=api_error)
self.client.update_snapmirror(
fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
snapmirror_update_args = {
'source-vserver': fake_client.SM_SOURCE_VSERVER,
'source-volume': fake_client.SM_SOURCE_VOLUME,
'destination-vserver': fake_client.SM_DEST_VSERVER,
'destination-volume': fake_client.SM_DEST_VOLUME,
}
self.client.connection.send_request.assert_has_calls([
mock.call('snapmirror-update', snapmirror_update_args)])
def test_update_snapmirror_error(self):
api_error = netapp_api.NaApiError(code=0)
self.mock_object(
self.client.connection, 'send_request', side_effect=api_error)
self.assertRaises(netapp_api.NaApiError,
self.client.update_snapmirror,
fake_client.SM_SOURCE_VSERVER,
fake_client.SM_SOURCE_VOLUME,
fake_client.SM_DEST_VSERVER,
fake_client.SM_DEST_VOLUME)
def test_resume_snapmirror(self):
self.mock_object(self.client.connection, 'send_request')
self.client.resume_snapmirror(
fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
snapmirror_resume_args = {
'source-vserver': fake_client.SM_SOURCE_VSERVER,
'source-volume': fake_client.SM_SOURCE_VOLUME,
'destination-vserver': fake_client.SM_DEST_VSERVER,
'destination-volume': fake_client.SM_DEST_VOLUME,
}
self.client.connection.send_request.assert_has_calls([
mock.call('snapmirror-resume', snapmirror_resume_args)])
def test_resume_snapmirror_not_quiesed(self):
api_error = netapp_api.NaApiError(
code=netapp_api.ERELATION_NOT_QUIESCED)
self.mock_object(
self.client.connection, 'send_request', side_effect=api_error)
self.client.resume_snapmirror(
fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
snapmirror_resume_args = {
'source-vserver': fake_client.SM_SOURCE_VSERVER,
'source-volume': fake_client.SM_SOURCE_VOLUME,
'destination-vserver': fake_client.SM_DEST_VSERVER,
'destination-volume': fake_client.SM_DEST_VOLUME,
}
self.client.connection.send_request.assert_has_calls([
mock.call('snapmirror-resume', snapmirror_resume_args)])
def test_resume_snapmirror_error(self):
api_error = netapp_api.NaApiError(code=0)
self.mock_object(
self.client.connection, 'send_request', side_effect=api_error)
self.assertRaises(netapp_api.NaApiError,
self.client.resume_snapmirror,
fake_client.SM_SOURCE_VSERVER,
fake_client.SM_SOURCE_VOLUME,
fake_client.SM_DEST_VSERVER,
fake_client.SM_DEST_VOLUME)
def test_resync_snapmirror(self):
self.mock_object(self.client.connection, 'send_request')
self.client.resync_snapmirror(
fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
snapmirror_resync_args = {
'source-vserver': fake_client.SM_SOURCE_VSERVER,
'source-volume': fake_client.SM_SOURCE_VOLUME,
'destination-vserver': fake_client.SM_DEST_VSERVER,
'destination-volume': fake_client.SM_DEST_VOLUME,
}
self.client.connection.send_request.assert_has_calls([
mock.call('snapmirror-resync', snapmirror_resync_args)])
def test__get_snapmirrors(self):
api_response = netapp_api.NaElement(
fake_client.SNAPMIRROR_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
desired_attributes = {
'snapmirror-info': {
'source-vserver': None,
'source-volume': None,
'destination-vserver': None,
'destination-volume': None,
'is-healthy': None,
}
}
result = self.client._get_snapmirrors(
fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME,
desired_attributes=desired_attributes)
snapmirror_get_iter_args = {
'query': {
'snapmirror-info': {
'source-vserver': fake_client.SM_SOURCE_VSERVER,
'source-volume': fake_client.SM_SOURCE_VOLUME,
'destination-vserver': fake_client.SM_DEST_VSERVER,
'destination-volume': fake_client.SM_DEST_VOLUME,
},
},
'desired-attributes': {
'snapmirror-info': {
'source-vserver': None,
'source-volume': None,
'destination-vserver': None,
'destination-volume': None,
'is-healthy': None,
},
},
}
self.client.send_iter_request.assert_has_calls([
mock.call('snapmirror-get-iter', snapmirror_get_iter_args)])
self.assertEqual(1, len(result))
def test__get_snapmirrors_not_found(self):
api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
result = self.client._get_snapmirrors()
self.client.send_iter_request.assert_has_calls([
mock.call('snapmirror-get-iter', {})])
self.assertEqual([], result)
def test_get_snapmirrors(self):
api_response = netapp_api.NaElement(
fake_client.SNAPMIRROR_GET_ITER_FILTERED_RESPONSE)
self.mock_object(self.client,
'send_iter_request',
return_value=api_response)
desired_attributes = ['source-vserver', 'source-volume',
'destination-vserver', 'destination-volume',
'is-healthy', 'mirror-state', 'schedule']
result = self.client.get_snapmirrors(
fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME,
desired_attributes=desired_attributes)
snapmirror_get_iter_args = {
'query': {
'snapmirror-info': {
'source-vserver': fake_client.SM_SOURCE_VSERVER,
'source-volume': fake_client.SM_SOURCE_VOLUME,
'destination-vserver': fake_client.SM_DEST_VSERVER,
'destination-volume': fake_client.SM_DEST_VOLUME,
},
},
'desired-attributes': {
'snapmirror-info': {
'source-vserver': None,
'source-volume': None,
'destination-vserver': None,
'destination-volume': None,
'is-healthy': None,
'mirror-state': None,
'schedule': None,
},
},
}
expected = [{
'source-vserver': fake_client.SM_SOURCE_VSERVER,
'source-volume': fake_client.SM_SOURCE_VOLUME,
'destination-vserver': fake_client.SM_DEST_VSERVER,
'destination-volume': fake_client.SM_DEST_VOLUME,
'is-healthy': 'true',
'mirror-state': 'snapmirrored',
'schedule': 'daily',
}]
self.client.send_iter_request.assert_has_calls([
mock.call('snapmirror-get-iter', snapmirror_get_iter_args)])
self.assertEqual(expected, result)
def test_get_provisioning_options_from_flexvol(self):
self.mock_object(self.client, 'get_flexvol',
return_value=fake_client.VOLUME_INFO_SSC)
self.mock_object(self.client, 'get_flexvol_dedupe_info',
return_value=fake_client.VOLUME_DEDUPE_INFO_SSC)
expected_prov_opts = {
'aggregate': 'fake_aggr1',
'compression_enabled': False,
'dedupe_enabled': True,
'language': 'c.utf_8',
'size': 1,
'snapshot_policy': 'default',
'snapshot_reserve': '5',
'space_guarantee_type': 'none',
'volume_type': 'rw',
'is_flexgroup': False,
}
actual_prov_opts = self.client.get_provisioning_options_from_flexvol(
fake_client.VOLUME_NAME)
self.assertEqual(expected_prov_opts, actual_prov_opts)
def test_wait_for_busy_snapshot(self):
# Need to mock sleep as it is called by @utils.retry
self.mock_object(time, 'sleep')
mock_get_snapshot = self.mock_object(
self.client, 'get_snapshot',
return_value=fake.SNAPSHOT
)
self.client.wait_for_busy_snapshot(fake.FLEXVOL, fake.SNAPSHOT_NAME)
mock_get_snapshot.assert_called_once_with(fake.FLEXVOL,
fake.SNAPSHOT_NAME)
def test_wait_for_busy_snapshot_raise_exception(self):
# Need to mock sleep as it is called by @utils.retry
self.mock_object(time, 'sleep')
BUSY_SNAPSHOT = dict(fake.SNAPSHOT)
BUSY_SNAPSHOT['busy'] = True
mock_get_snapshot = self.mock_object(
self.client, 'get_snapshot',
return_value=BUSY_SNAPSHOT
)
self.assertRaises(exception.SnapshotIsBusy,
self.client.wait_for_busy_snapshot,
fake.FLEXVOL, fake.SNAPSHOT_NAME)
calls = [
mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME),
mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME),
mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME),
]
mock_get_snapshot.assert_has_calls(calls)
@ddt.data({
'mock_return':
fake_client.SNAPSHOT_INFO_FOR_PRESENT_NOT_BUSY_SNAPSHOT_CMODE,
'expected': [{
'name': fake.SNAPSHOT_NAME,
'instance_id': 'abcd-ef01-2345-6789',
'volume_name': fake.SNAPSHOT['volume_id'],
}]
}, {
'mock_return': fake_client.NO_RECORDS_RESPONSE,
'expected': [],
})
@ddt.unpack
def test_get_snapshots_marked_for_deletion(self, mock_return, expected):
api_response = netapp_api.NaElement(mock_return)
self.mock_object(self.client.connection,
'send_request',
return_value=api_response)
result = self.client.get_snapshots_marked_for_deletion()
api_args = {
'query': {
'snapshot-info': {
'name': client_base.DELETED_PREFIX + '*',
'vserver': self.vserver,
'busy': 'false'
},
},
'desired-attributes': {
'snapshot-info': {
'name': None,
'volume': None,
'snapshot-instance-uuid': None,
}
},
}
self.client.connection.send_request.assert_called_once_with(
'snapshot-get-iter', api_args)
self.assertListEqual(expected, result)
@ddt.data(True, False)
def test_is_qos_min_supported(self, supported):
self.client.features.add_feature('test', supported=supported)
mock_name = self.mock_object(netapp_utils,
'qos_min_feature_name',
return_value='test')
result = self.client.is_qos_min_supported(True, 'node')
mock_name.assert_called_once_with(True, 'node')
self.assertEqual(result, supported)
def test_is_qos_min_supported_invalid_node(self):
mock_name = self.mock_object(netapp_utils,
'qos_min_feature_name',
return_value='invalid_feature')
result = self.client.is_qos_min_supported(True, 'node')
mock_name.assert_called_once_with(True, 'node')
self.assertFalse(result)
def test_is_qos_min_supported_none_node(self):
result = self.client.is_qos_min_supported(True, None)
self.assertFalse(result)
def test_get_unique_volume(self):
api_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_STYLE_RESPONSE)
volume_elem = netapp_api.NaElement(fake_client.VOLUME_FLEXGROUP_STYLE)
volume_id_attr = self.client.get_unique_volume(api_response)
xml_exp = str(volume_elem).replace(" ", "").replace("\n", "")
xml_res = str(volume_id_attr).replace(" ", "").replace("\n", "")
self.assertEqual(xml_exp, xml_res)
def test_get_unique_volume_raise_exception(self):
api_response = netapp_api.NaElement(
fake_client.VOLUME_GET_ITER_SAME_STYLE_RESPONSE)
self.assertRaises(exception.VolumeBackendAPIException,
self.client.get_unique_volume,
api_response)
def test_get_cluster_name(self):
api_response = netapp_api.NaElement(
fake_client.GET_CLUSTER_NAME_RESPONSE)
mock_send_request = self.mock_object(
self.client.connection, 'send_request', return_value=api_response)
api_args = {
'desired-attributes': {
'cluster-identity-info': {
'cluster-name': None,
}
}
}
result = self.client.get_cluster_name()
mock_send_request.assert_called_once_with('cluster-identity-get',
api_args,
enable_tunneling=False)
self.assertEqual(fake_client.CLUSTER_NAME, result)
@ddt.data((fake_client.LUN_NAME, fake_client.DEST_VOLUME_NAME, None,
fake_client.VOLUME_NAME),
(fake_client.LUN_NAME, None, fake_client.DEST_LUN_NAME,
fake_client.DEST_VOLUME_NAME))
@ddt.unpack
def test_start_lun_move(self, src_lun_name, src_ontap_vol, dest_lun_name,
dest_ontap_vol):
api_response = netapp_api.NaElement(
fake_client.START_LUN_MOVE_RESPONSE)
mock_send_request = self.mock_object(
self.client.connection, 'send_request', return_value=api_response)
result = self.client.start_lun_move(src_lun_name,
dest_ontap_vol,
src_ontap_volume=src_ontap_vol,
dest_lun_name=dest_lun_name)
api_args = {
'paths': [{
'lun-path-pair': {
'destination-path': '/vol/%s/%s' % (dest_ontap_vol,
src_lun_name if
dest_lun_name is None
else dest_lun_name),
'source-path': '/vol/%s/%s' % (dest_ontap_vol
if src_ontap_vol is None
else src_ontap_vol,
src_lun_name)
}
}]
}
mock_send_request.assert_called_once_with('lun-move-start', api_args)
self.assertEqual(fake.JOB_UUID, result)
def test_get_lun_move_status(self):
api_response = netapp_api.NaElement(
fake_client.GET_LUN_MOVE_STATUS_RESPONSE)
mock_send_request = self.mock_object(
self.client.connection, 'send_request', return_value=api_response)
result = self.client.get_lun_move_status(fake.JOB_UUID)
api_args = {
'query': {
'lun-move-info': {
'job-uuid': fake.JOB_UUID
}
}
}
mock_send_request.assert_called_once_with('lun-move-get-iter',
api_args)
expected = {
'job-status': 'complete',
'last-failure-reason': None
}
self.assertEqual(expected, result)
@ddt.data((fake_client.LUN_NAME, None, fake_client.VSERVER_NAME,
fake_client.DEST_LUN_NAME, fake_client.DEST_VOLUME_NAME,
fake_client.DEST_VSERVER_NAME),
(fake_client.LUN_NAME, fake_client.VOLUME_NAME, None,
fake_client.DEST_LUN_NAME, fake_client.DEST_VOLUME_NAME,
fake_client.DEST_VSERVER_NAME),
(fake_client.LUN_NAME, fake_client.VOLUME_NAME,
fake_client.VSERVER_NAME, None, fake_client.DEST_VOLUME_NAME,
fake_client.DEST_VSERVER_NAME))
@ddt.unpack
def test_start_lun_copy(self, src_lun_name, src_ontap_vol, src_vserver,
dest_lun_name, dest_ontap_vol, dest_vserver):
api_response = netapp_api.NaElement(
fake_client.START_LUN_COPY_RESPONSE)
mock_send_request = self.mock_object(
self.client.connection, 'send_request', return_value=api_response)
result = self.client.start_lun_copy(src_lun_name,
dest_ontap_vol,
dest_vserver,
src_ontap_volume=src_ontap_vol,
src_vserver=src_vserver,
dest_lun_name=dest_lun_name)
api_args = {
'source-vserver': (dest_vserver if not src_vserver
else src_vserver),
'destination-vserver': dest_vserver,
'paths': [{
'lun-path-pair': {
'destination-path': '/vol/%s/%s' % (dest_ontap_vol,
src_lun_name if
dest_lun_name is None
else dest_lun_name),
'source-path': '/vol/%s/%s' % (dest_ontap_vol
if src_ontap_vol is None
else src_ontap_vol,
src_lun_name)
}
}]
}
mock_send_request.assert_called_once_with('lun-copy-start', api_args,
enable_tunneling=False)
self.assertEqual(fake.JOB_UUID, result)
def test_get_lun_copy_status(self):
api_response = netapp_api.NaElement(
fake_client.GET_LUN_COPY_STATUS_RESPONSE)
mock_send_request = self.mock_object(
self.client.connection, 'send_request', return_value=api_response)
result = self.client.get_lun_copy_status(fake.JOB_UUID)
api_args = {
'query': {
'lun-copy-info': {
'job-uuid': fake.JOB_UUID
}
}
}
mock_send_request.assert_called_once_with('lun-copy-get-iter',
api_args,
enable_tunneling=False)
expected = {
'job-status': 'complete',
'last-failure-reason': None
}
self.assertEqual(expected, result)
@ddt.data((fake_client.FILE_NAME, None, fake_client.DEST_VOLUME_NAME,
fake_client.DEST_VOLUME_NAME),
(fake_client.FILE_NAME, fake_client.VOLUME_NAME, None,
fake_client.DEST_VOLUME_NAME))
@ddt.unpack
def test_start_file_copy(self, src_file_name, src_ontap_vol,
dest_file_name, dest_ontap_vol):
api_response = netapp_api.NaElement(
fake_client.START_FILE_COPY_RESPONSE)
mock_send_request = self.mock_object(
self.client.connection, 'send_request', return_value=api_response)
result = self.client.start_file_copy(src_file_name,
dest_ontap_vol,
src_ontap_volume=src_ontap_vol,
dest_file_name=dest_file_name)
api_args = {
'source-paths': [{
'sfod-operation-path': '%s/%s' % (dest_ontap_vol if
src_ontap_vol is None else
src_ontap_vol,
src_file_name)
}],
'destination-paths': [{
'sfod-operation-path': '%s/%s' % (dest_ontap_vol,
src_file_name if
dest_file_name is None else
dest_file_name)
}],
}
mock_send_request.assert_called_once_with('file-copy-start', api_args,
enable_tunneling=False)
self.assertEqual(fake.JOB_UUID, result)
def test_get_file_copy_status(self):
api_response = netapp_api.NaElement(
fake_client.GET_FILE_COPY_STATUS_RESPONSE)
mock_send_request = self.mock_object(
self.client.connection, 'send_request', return_value=api_response)
result = self.client.get_file_copy_status(fake.JOB_UUID)
api_args = {
'query': {
'file-copy-info': {
'job-uuid': fake.JOB_UUID
}
}
}
mock_send_request.assert_called_once_with('file-copy-get-iter',
api_args,
enable_tunneling=False)
expected = {
'job-status': 'complete',
'last-failure-reason': None
}
self.assertEqual(expected, result)
def test_destroy_file_copy(self):
api_response = netapp_api.NaElement(
fake_client.DESTROY_FILE_COPY_RESPONSE)
mock_send_request = self.mock_object(
self.client.connection, 'send_request', return_value=api_response)
result = self.client.destroy_file_copy(fake.JOB_UUID)
api_args = {
'job-uuid': fake.JOB_UUID,
'file-index': 0
}
mock_send_request.assert_called_once_with('file-copy-destroy',
api_args,
enable_tunneling=False)
self.assertIsNone(result)
def test_destroy_file_copy_error(self):
mock_send_request = self.mock_object(self.client.connection,
'send_request',
side_effect=netapp_api.NaApiError)
self.assertRaises(netapp_utils.NetAppDriverException,
self.client.destroy_file_copy,
fake.JOB_UUID)
api_args = {
'job-uuid': fake.JOB_UUID,
'file-index': 0
}
mock_send_request.assert_called_once_with('file-copy-destroy',
api_args,
enable_tunneling=False)
def test_cancel_lun_copy(self):
api_response = netapp_api.NaElement(
fake_client.CANCEL_LUN_COPY_RESPONSE)
mock_send_request = self.mock_object(
self.client.connection, 'send_request', return_value=api_response)
result = self.client.cancel_lun_copy(fake.JOB_UUID)
api_args = {
'job-uuid': fake.JOB_UUID
}
mock_send_request.assert_called_once_with('lun-copy-cancel',
api_args,
enable_tunneling=False)
self.assertIsNone(result)
def test_cancel_lun_copy_error(self):
mock_send_request = self.mock_object(self.client.connection,
'send_request',
side_effect=netapp_api.NaApiError)
self.assertRaises(netapp_utils.NetAppDriverException,
self.client.cancel_lun_copy,
fake.JOB_UUID)
api_args = {
'job-uuid': fake.JOB_UUID
}
mock_send_request.assert_called_once_with('lun-copy-cancel',
api_args,
enable_tunneling=False)
def test_rename_file(self):
self.mock_object(self.client.connection, 'send_request')
orig_file_name = '/vol/fake_vol/volume-%s' % self.fake_volume
new_file_name = '/vol/fake_vol/new-volume-%s' % self.fake_volume
self.client.rename_file(orig_file_name, new_file_name)
api_args = {
'from-path': orig_file_name,
'to-path': new_file_name,
}
self.client.connection.send_request.assert_called_once_with(
'file-rename-file', api_args)
def test_check_api_permissions(self):
mock_log = self.mock_object(client_cmode.LOG, 'warning')
self.mock_object(self.client, 'check_cluster_api', return_value=True)
self.client.check_api_permissions()
self.client.check_cluster_api.assert_has_calls(
[mock.call(*key) for key in client_cmode.SSC_API_MAP.keys()])
self.assertEqual(0, mock_log.call_count)
def test_check_api_permissions_failed_ssc_apis(self):
def check_cluster_api(object_name, operation_name, api):
if api != 'volume-get-iter':
return False
return True
self.mock_object(self.client, 'check_cluster_api',
side_effect=check_cluster_api)
mock_log = self.mock_object(client_cmode.LOG, 'warning')
self.client.check_api_permissions()
self.assertEqual(1, mock_log.call_count)
def test_check_api_permissions_failed_volume_api(self):
def check_cluster_api(object_name, operation_name, api):
if api == 'volume-get-iter':
return False
return True
self.mock_object(self.client, 'check_cluster_api',
side_effect=check_cluster_api)
mock_log = self.mock_object(client_cmode.LOG, 'warning')
self.assertRaises(exception.VolumeBackendAPIException,
self.client.check_api_permissions)
self.assertEqual(0, mock_log.call_count)
| {
"content_hash": "4f1f38afc58e6f0eb97721b4bd0942ac",
"timestamp": "",
"source": "github",
"line_count": 4523,
"max_line_length": 80,
"avg_line_length": 39.30665487508291,
"alnum_prop": 0.5576373576924808,
"repo_name": "mahak/cinder",
"id": "e5ed24de6fbfafd663d22f08b19737d0077ff3e1",
"size": "178585",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "259"
},
{
"name": "Mako",
"bytes": "976"
},
{
"name": "Python",
"bytes": "25078356"
},
{
"name": "Shell",
"bytes": "6456"
},
{
"name": "Smarty",
"bytes": "67595"
}
],
"symlink_target": ""
} |
from faint import *
#start
# A custom function that changes tool settings
def example_function():
tool_line()
set_linewidth(10.0)
set_fg(0,0,255)
# Connecting the function to a key
bind(example_function)
##[press key]
| {
"content_hash": "9c12202c7f9d5fcad22701b0c81902df",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 46,
"avg_line_length": 22,
"alnum_prop": 0.6735537190082644,
"repo_name": "lukas-ke/faint-graphics-editor",
"id": "c190fd99c399c9612f759664d62a505b6b2bc117",
"size": "242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "help/example_py/scripting_intro_bind.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "49581"
},
{
"name": "C++",
"bytes": "3170874"
},
{
"name": "Emacs Lisp",
"bytes": "13474"
},
{
"name": "HTML",
"bytes": "26096"
},
{
"name": "NSIS",
"bytes": "2088"
},
{
"name": "Python",
"bytes": "537915"
}
],
"symlink_target": ""
} |
import pytest
from pysparkling.context import H2OContext
from tests.integration.external_only.external_backend_test_utils import *
def testSSL(spark):
conf = createH2OConf()
conf.setInternalSecureConnectionsEnabled()
context = H2OContext.getOrCreate(conf)
path = context.downloadH2OLogs("build", "LOG")
with open(path, 'r') as f:
originalLines = f.readlines()
lines = list(filter(lambda line: "H2O node running in encrypted mode using" in line, originalLines))
assert len(lines) >= 1
context.stop()
def testAuth(spark):
with open('build/login.conf', 'w') as f:
f.write('user:pass')
conf = createH2OConf()
conf.setHashLoginEnabled()
conf.setLoginConf("build/login.conf")
conf.setUserName("user")
conf.setPassword("pass")
context = H2OContext.getOrCreate(conf)
path = context.downloadH2OLogs("build", "LOG")
with open(path, 'r') as f:
originalLines = f.readlines()
lines = list(filter(lambda line: "-hash_login" in line, originalLines))
assert len(lines) >= 1
context.stop()
def testAuthFailsWhenUsernamePasswordNotSpecified(spark):
with open('build/login.conf', 'w') as f:
f.write('user:pass')
conf = createH2OConf()
conf.setHashLoginEnabled()
conf.setCloudName("test-cluster")
conf.setClusterInfoFile("build/notify_file.txt")
conf.setLoginConf("build/login.conf")
with pytest.raises(Exception):
H2OContext.getOrCreate(conf)
# No app should be running
assert noYarnApps()
conf.setUserName("user")
conf.setPassword("pass")
context = H2OContext.getOrCreate(conf)
context.stop()
def createPamLoginFile():
with open('build/login.conf', 'w') as f:
f.write('pamloginmodule {\n')
f.write(' de.codedo.jaas.PamLoginModule required\n')
f.write(' service = common-auth;\n')
f.write('};\n')
def testPamAuthWithCorrectCredentials(spark):
createPamLoginFile()
conf = createH2OConf()
conf.setPamLoginEnabled()
conf.setCloudName("test-cluster")
conf.setClusterInfoFile("build/notify_file.txt")
conf.setLoginConf("build/login.conf")
conf.setUserName("jenkins")
conf.setPassword("jenkins")
context = H2OContext.getOrCreate(conf)
context.stop()
def testPamAuthWithWrongCredentials(spark):
createPamLoginFile()
conf = createH2OConf()
conf.setPamLoginEnabled()
conf.setCloudName("test-cluster")
conf.setClusterInfoFile("build/notify_file.txt")
conf.setLoginConf("build/login.conf")
conf.setUserName("jenkins")
conf.setPassword("wrong_password")
with pytest.raises(Exception):
H2OContext.getOrCreate(conf)
assert specificNumberOfYarnApps(1)
killAllYarnApps()
| {
"content_hash": "44f4313f0dfcf6009954d2118e01ad19",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 108,
"avg_line_length": 29.574468085106382,
"alnum_prop": 0.6827338129496403,
"repo_name": "h2oai/sparkling-water",
"id": "e154593ff5f5d1faddef58c12e1721b0b4633f14",
"size": "3099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/tests/integration/external_only/test_security.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8719"
},
{
"name": "CSS",
"bytes": "4539"
},
{
"name": "Groovy",
"bytes": "122809"
},
{
"name": "HCL",
"bytes": "44156"
},
{
"name": "Java",
"bytes": "35009"
},
{
"name": "Python",
"bytes": "442716"
},
{
"name": "R",
"bytes": "63088"
},
{
"name": "Scala",
"bytes": "1720448"
},
{
"name": "Shell",
"bytes": "29726"
},
{
"name": "TeX",
"bytes": "132310"
}
],
"symlink_target": ""
} |
import sys
import os
import re
import MySQLdb
import cgi
import urllib
import cgitb; cgitb.enable();
sys.stderr = sys.stdout
import json
import operator
def main():
fs = cgi.FieldStorage()
enzymeName=fs['enzymeName'].value
taxId=fs['taxId'].value
query="http://www.uniprot.org/uniprot/?query="+str(enzymeName)+" AND taxonomy%3a"+taxId+"&format=tab&columns=id,entry name,reviewed,protein names,genes,organism,length"
query=query.replace(" ","+")
filehandle = urllib.urlopen(query)
lineCounter=0
diccionarioProteinas={}
for line in filehandle.readlines():
if lineCounter==0:
pass
else:
arrayFields=line.split("\t")
entry=arrayFields[0]
proteinName=arrayFields[3]
diccionarioProteinas[entry]=proteinName
lineCounter+=1
filehandle.close()
#primero miramos a ver si hay algo en el array, sino devolvemos el error
if len(diccionarioProteinas)==0:
emptyArray=[]
#We return the json encoded emptyArray
arrayJson=json.dumps(emptyArray)
print "Content-Type: application/json\n"
print arrayJson
sys.exit()
else:
proteinDictSorted = sorted(diccionarioProteinas.iteritems(), key=operator.itemgetter(1))
arrayJson=json.dumps(proteinDictSorted)
print "Content-Type: application/json\n"
print arrayJson
sys.exit()
main() | {
"content_hash": "fc2357907f4d1e962e21b5740294f399",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 172,
"avg_line_length": 29.020833333333332,
"alnum_prop": 0.6812634601579325,
"repo_name": "acanada/tebacten",
"id": "e3e0f32951e800885bd97f8075ba6e7c285d4a2a",
"size": "1435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webOLD/scripts/returnUniprotIds.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "303067"
},
{
"name": "JavaScript",
"bytes": "943469"
},
{
"name": "PHP",
"bytes": "566072"
},
{
"name": "Python",
"bytes": "31261"
}
],
"symlink_target": ""
} |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'Consul',
'author': 'Ben Belchak',
'url': 'https://github.com/bbelchak/python-consul',
'author_email': 'ben@belchak.com',
'version': '0.1',
'install_requires': ['nose', 'requests', 'simplejson'],
'packages': ['consul'],
'scripts': [],
'name': 'python-consul',
}
setup(**config) | {
"content_hash": "c8f2a277c2756378bdcc1f3dd9d1416a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 59,
"avg_line_length": 24.22222222222222,
"alnum_prop": 0.6123853211009175,
"repo_name": "bbelchak/python-consul",
"id": "94a9cf131e30b5d1d40fdf54beb9ddd09f83f07e",
"size": "436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2375"
}
],
"symlink_target": ""
} |
import pytest
from supriya.tools import servertools
@pytest.fixture(scope='function')
def server(request):
def server_teardown():
server.quit()
server = servertools.Server().boot()
request.addfinalizer(server_teardown)
return server
def test_BufferGroup_allocate_01(server):
buffer_group_one = servertools.BufferGroup(buffer_count=4)
assert not buffer_group_one.is_allocated
assert buffer_group_one.buffer_id is None
assert buffer_group_one.server is None
assert len(buffer_group_one) == 4
for buffer_ in buffer_group_one:
assert not buffer_.is_allocated
assert buffer_.buffer_group is buffer_group_one
assert buffer_.buffer_id is None
assert buffer_.frame_count == 0
assert buffer_.channel_count == 0
buffer_group_one.allocate(frame_count=512)
server.sync()
assert buffer_group_one.is_allocated
assert buffer_group_one.buffer_id is 0
assert buffer_group_one.server is server
assert len(buffer_group_one) == 4
for i, buffer_ in enumerate(buffer_group_one):
assert buffer_.is_allocated
assert buffer_.buffer_group is buffer_group_one
assert buffer_.buffer_id == buffer_group_one.buffer_id + i
assert buffer_.frame_count == 512
assert buffer_.channel_count == 1
buffer_group_two = servertools.BufferGroup(buffer_count=4)
server.sync()
assert not buffer_group_two.is_allocated
assert buffer_group_two.buffer_id is None
assert buffer_group_two.server is None
assert len(buffer_group_two) == 4
for buffer_ in buffer_group_two:
assert not buffer_.is_allocated
assert buffer_.buffer_group is buffer_group_two
assert buffer_.buffer_id is None
assert buffer_.frame_count == 0
assert buffer_.channel_count == 0
buffer_group_two.allocate(frame_count=1024, channel_count=2)
server.sync()
assert buffer_group_two.is_allocated
assert buffer_group_two.buffer_id is 4
assert buffer_group_two.server is server
assert len(buffer_group_two) == 4
for i, buffer_ in enumerate(buffer_group_two):
assert buffer_.is_allocated
assert buffer_.buffer_group is buffer_group_two
assert buffer_.buffer_id is buffer_group_two.buffer_id + i
assert buffer_.frame_count == 1024
assert buffer_.channel_count == 2
buffer_group_one.free()
server.sync()
assert not buffer_group_one.is_allocated
assert buffer_group_one.buffer_id is None
assert buffer_group_one.server is None
assert len(buffer_group_one) == 4
for buffer_ in buffer_group_one:
assert not buffer_.is_allocated
assert buffer_.buffer_group is buffer_group_one
assert buffer_.buffer_id is None
assert buffer_.frame_count == 0
assert buffer_.channel_count == 0
buffer_group_two.free()
server.sync()
assert not buffer_group_two.is_allocated
assert buffer_group_two.buffer_id is None
assert buffer_group_two.server is None
assert len(buffer_group_two) == 4
for buffer_ in buffer_group_two:
assert not buffer_.is_allocated
assert buffer_.buffer_group is buffer_group_two
assert buffer_.buffer_id is None
assert buffer_.frame_count == 0
assert buffer_.channel_count == 0
| {
"content_hash": "8528bb5d74158634aded406c5c39c42b",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 66,
"avg_line_length": 34.371134020618555,
"alnum_prop": 0.6781643671265747,
"repo_name": "andrewyoung1991/supriya",
"id": "c27319b1b4307c02e2178a081f607308a5a40703",
"size": "3360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supriya/tools/servertools/test/test_BufferGroup_allocate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6712"
},
{
"name": "CSS",
"bytes": "446"
},
{
"name": "HTML",
"bytes": "1083"
},
{
"name": "JavaScript",
"bytes": "6163"
},
{
"name": "Makefile",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "2693776"
}
],
"symlink_target": ""
} |
from zeit.cms.i18n import MessageFactory as _
import StringIO
import grokcore.component as grok
import hashlib
import lxml.objectify
import persistent
import urlparse
import z3c.traverser.interfaces
import zeit.cms.content.dav
import zeit.cms.content.interfaces
import zeit.cms.interfaces
import zeit.cms.repository.repository
import zeit.cms.type
import zeit.content.image.interfaces
import zeit.content.image.variant
import zope.app.container.contained
import zope.interface
import zope.location.interfaces
import zope.security.proxy
import sys
class ImageGroupBase(object):
zope.interface.implements(zeit.content.image.interfaces.IImageGroup)
zeit.cms.content.dav.mapProperties(
zeit.content.image.interfaces.IImageGroup,
zeit.content.image.interfaces.IMAGE_NAMESPACE,
('master_image',))
_variants = zeit.cms.content.dav.DAVProperty(
zeit.content.image.interfaces.IImageGroup['variants'],
zeit.content.image.interfaces.IMAGE_NAMESPACE,
'variants',
writeable=zeit.cms.content.interfaces.WRITEABLE_LIVE)
@property
def variants(self):
if self._variants is None:
return {}
return self._variants
@variants.setter
def variants(self, value):
self._variants = value
@property
def _variant_secret(self):
"""Secret for Spoof Protection."""
config = zope.app.appsetup.product.getProductConfiguration(
'zeit.content.image')
return config.get('variant-secret')
def create_variant_image(self, key, source=None):
"""Retrieve Variant and create an image according to options in URL.
See ImageGroup.__getitem__ for allowed URLs.
"""
key = self._verify_signature(key)
size = self.get_variant_size(key)
variant = self.get_variant_by_key(key)
if source is None:
source = zeit.content.image.interfaces.IMasterImage(self, None)
if source is None:
raise KeyError(key)
repository = zeit.content.image.interfaces.IRepositoryImageGroup(self)
if variant.name in repository:
return repository[variant.name]
# BBB Legacy ImageGroups still should return their materialized
# variants (for CP editor).
if variant.legacy_name:
for name in repository:
if variant.legacy_name in name:
return repository[name]
image = zeit.content.image.interfaces.ITransform(
source).create_variant_image(variant, size=size)
image.__name__ = key
image.__parent__ = self
return image
def get_variant_size(self, key):
"""Keys look like `square__20x20`. Retrieve size as [20, 20] or None"""
try:
return [int(x) for x in key.split('__')[1].split('x')]
except (IndexError, ValueError):
return None
def get_variant_by_key(self, key):
"""Retrieve Variant by using as much information as given in key."""
if '__' in key:
variant = self.get_variant_by_size(key)
else:
variant = self.get_variant_by_name(key)
if variant is None:
raise KeyError(key)
return variant
def get_variant_by_name(self, name):
"""Select the biggest Variant among those with the given name.
The biggest Variant is the one that has no max-limit given or the
biggest max-limit, if all Variants have a max-limit set.
"""
variant = self.get_variant_by_size('{name}__{max}x{max}'.format(
name=name, max=sys.maxint))
if variant is not None:
return variant
# BBB New ImageGroups must respond to the legacy names (for XSLT).
for mapping in zeit.content.image.variant.LEGACY_VARIANT_SOURCE(self):
if mapping['old'] in name:
variant = self.get_variant_by_name(mapping['new'])
variant.legacy_name = mapping['old']
return variant
return None
def get_variant_by_size(self, key):
"""Select the Variant that has a matching name and matching size.
The size does not need to be an exact fit. This method will try to find
a Variant whose max-size is as small as possible, but bigger or equal
than the size given in the key.
"""
name = key.split('__')[0]
candidates = self.get_all_variants_with_name(name)
size = self.get_variant_size(key)
if not size:
return None
for variant in candidates:
if size[0] <= variant.max_width and size[1] <= variant.max_height:
return variant
return None
def get_all_variants_with_name(self, name):
"""Return all Variants with a matching name, ordered by size."""
variants = zeit.content.image.interfaces.IVariants(self)
result = [v for v in variants.values() if name == v.name]
result.sort(key=lambda x: (x.max_width, x.max_height))
return result
def variant_url(self, name, width=None, height=None, thumbnail=False):
"""Helper method to create URLs to Variant images."""
path = urlparse.urlparse(self.uniqueId).path
if path.endswith('/'):
path = path[:-1]
if thumbnail:
name = '%s/%s' % (Thumbnails.NAME, name)
if width is None or height is None:
url = '{path}/{name}'.format(path=path, name=name)
else:
url = '{path}/{name}__{width}x{height}'.format(
path=path, name=name, width=width, height=height)
if self._variant_secret:
url += '__{signature}'.format(signature=compute_signature(
name, width, height, self._variant_secret))
return url
def _verify_signature(self, key):
"""Verification for Spoof Protection."""
if not self._variant_secret:
return key
try:
parts = key.split('__')
if len(parts) == 2:
name, signature = parts
width = height = None
stripped = name
elif len(parts) == 3:
name, size, signature = parts
width, height = size.split('x')
stripped = '{name}__{size}'.format(name=name, size=size)
if verify_signature(
name, width, height, self._variant_secret, signature):
return stripped
except:
pass
raise KeyError(key)
def compute_signature(name, width, height, secret):
return hashlib.sha1(':'.join(
[str(x) for x in [name, width, height, secret]])).hexdigest()
def verify_signature(name, width, height, secret, signature):
return signature == compute_signature(name, width, height, secret)
class ImageGroup(ImageGroupBase,
zeit.cms.repository.repository.Container):
zope.interface.implements(
zeit.content.image.interfaces.IRepositoryImageGroup)
def __getitem__(self, key):
"""The following URLs may render images:
Image is present on disk:
* /imagegroup/imagegroup-540x304.jpg
* /imagegroup/zon-large
* /imagegroup/zon-large__200x200
Virtual Image:
* /imagegroup/zon-large
* /imagegroup/zon-large__200x200
JSON API:
* /imagegroup/variants/zon-large
BBB compatibility:
* Asking a new image group (without on-disk variants) for an old name
(e.g. imagegroup-540x304.jpg, XSLT does this): maps old to new name
via legacy-variant-source settings.
* Asking an old image group for a new name: uses default focus point
to generate the new variant.
XXX Should we map to old on-disk variants instead?
* Asking an old image group for an old name with the new syntax
(CP editor does this): returns on-disk image.
"""
try:
item = super(ImageGroup, self).__getitem__(key)
except KeyError:
item = self.create_variant_image(key)
if key == self.master_image:
zope.interface.alsoProvides(
item, zeit.content.image.interfaces.IMasterImage)
return item
class ImageGroupType(zeit.cms.type.TypeDeclaration):
interface = zeit.content.image.interfaces.IImageGroup
interface_type = zeit.content.image.interfaces.IImageType
type = 'image-group'
title = _('Image Group')
addform = 'zeit.content.image.imagegroup.Add'
def content(self, resource):
ig = ImageGroup()
ig.uniqueId = resource.id
return ig
def resource_body(self, content):
return StringIO.StringIO()
def resource_content_type(self, content):
return 'httpd/unix-directory'
class LocalImageGroup(ImageGroupBase,
persistent.Persistent,
zope.app.container.contained.Contained):
zope.interface.implements(zeit.content.image.interfaces.ILocalImageGroup)
def __getitem__(self, key):
repository = zeit.content.image.interfaces.IRepositoryImageGroup(self)
if key in repository:
return repository[key]
return self.create_variant_image(key)
# XXX Inheriting from UserDict.DictMixin would be much more sensible,
# but that breaks browser/copyright.txt for reasons unknown. :-(
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
return self.get(key) is not None
def __setitem__(self, key, value):
repository = zeit.content.image.interfaces.IRepositoryImageGroup(self)
repository[key] = value
@grok.adapter(zeit.content.image.interfaces.IImageGroup)
@grok.implementer(zeit.content.image.interfaces.ILocalImageGroup)
def local_image_group_factory(context):
lig = LocalImageGroup()
lig.uniqueId = context.uniqueId
lig.__name__ = context.__name__
zeit.connector.interfaces.IWebDAVWriteProperties(lig).update(
zeit.connector.interfaces.IWebDAVReadProperties(
zope.security.proxy.removeSecurityProxy(context)))
return lig
@grok.adapter(zeit.content.image.interfaces.ILocalImageGroup)
@grok.implementer(zeit.content.image.interfaces.IRepositoryImageGroup)
def find_repository_group(context):
return zeit.cms.interfaces.ICMSContent(context.uniqueId)
class LocalSublocations(grok.Adapter):
grok.context(zeit.content.image.interfaces.ILocalImageGroup)
grok.implements(zope.location.interfaces.ISublocations)
def sublocations(self):
return []
@grok.adapter(zeit.content.image.interfaces.IImageGroup, name='image')
@grok.implementer(zeit.cms.content.interfaces.IXMLReference)
def XMLReference(context):
image = lxml.objectify.E.image()
image.set('base-id', context.uniqueId)
type = ''
for sub_image_name in context:
if '.' not in sub_image_name:
continue
base, ext = sub_image_name.rsplit('.', 1)
if base.endswith('x140'):
# This is deciding
type = ext
break
if not type:
# Just remember the first type
type = ext
image.set('type', type)
# The image reference can be seen like an element in a feed. Let the magic
# update the xml node.
updater = zeit.cms.content.interfaces.IXMLReferenceUpdater(context)
updater.update(image)
return image
@grok.adapter(zeit.content.image.interfaces.IImageGroup)
@grok.implementer(zeit.content.image.interfaces.IMasterImage)
def find_master_image(context):
if context.master_image:
return context.get(context.master_image)
class ThumbnailTraverser(object):
zope.interface.implements(z3c.traverser.interfaces.IPluggableTraverser)
def __init__(self, context, request):
self.context = context
self.request = request
def publishTraverse(self, request, name):
if name != Thumbnails.NAME:
raise zope.publisher.interfaces.NotFound(
self.context, name, request)
return zeit.content.image.interfaces.IThumbnails(self.context)
class Thumbnails(grok.Adapter):
grok.context(zeit.content.image.interfaces.IRepositoryImageGroup)
grok.implements(zeit.content.image.interfaces.IThumbnails)
NAME = 'thumbnails'
SOURCE_IMAGE_PREFIX = 'thumbnail-source'
THUMBNAIL_WIDTH = 1000
def __getitem__(self, key):
if self.master_image is None:
raise KeyError(key)
return self.context.create_variant_image(
key, source=self.source_image)
@property
def source_image_name(self):
return '%s-%s' % (self.SOURCE_IMAGE_PREFIX, self.master_image.__name__)
@property
def source_image(self):
if self.source_image_name in self.context:
return self.context[self.source_image_name]
if self.master_image.getImageSize()[0] <= self.THUMBNAIL_WIDTH:
return self.master_image
lockable = zope.app.locking.interfaces.ILockable(self.context, None)
# XXX 1. mod_dav does not allow LOCK of a member in a locked collection
# even though the WebDAV spec reads as if that should be possible.
# 2. zeit.connector has some kind of bug where it loses the property
# cache of the collection upon that error, so it thinks the collection
# is empty from then on out (only refresh-cache helps).
if lockable is not None and not lockable.locked():
return self._create_source_image()
else:
return self.master_image
def _create_source_image(self):
image = zeit.content.image.interfaces.ITransform(
self.master_image).resize(width=self.THUMBNAIL_WIDTH)
self.context[self.source_image_name] = image
return self.context[self.source_image_name]
@property
def master_image(self):
return zeit.content.image.interfaces.IMasterImage(self.context, None)
@grok.subscribe(
zeit.content.image.interfaces.IImage,
zope.lifecycleevent.IObjectAddedEvent)
def create_thumbnail_source_on_add(context, event):
group = context.__parent__
if not zeit.content.image.interfaces.IRepositoryImageGroup.providedBy(
group):
return
if group.master_image != context.__name__:
return
thumbnails = zeit.content.image.interfaces.IThumbnails(group)
if thumbnails.master_image:
thumbnails.source_image
| {
"content_hash": "60174fcc27c6bb94b2e520ec6457b524",
"timestamp": "",
"source": "github",
"line_count": 422,
"max_line_length": 79,
"avg_line_length": 34.57582938388626,
"alnum_prop": 0.641971078061819,
"repo_name": "cutoffthetop/zeit.content.image",
"id": "aa0193dbfc165d93aa0baadb451947fe10ce174c",
"size": "14591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/zeit/content/image/imagegroup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2649"
},
{
"name": "JavaScript",
"bytes": "25912"
},
{
"name": "Python",
"bytes": "124609"
}
],
"symlink_target": ""
} |
from copy import copy, deepcopy
from warnings import catch_warnings, simplefilter
import pytest
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_scalar
from pandas import (Series, DataFrame, Panel,
date_range, MultiIndex)
import pandas.io.formats.printing as printing
from pandas.compat import range, zip, PY3
from pandas.util.testing import (assert_raises_regex,
assert_series_equal,
assert_panel_equal,
assert_frame_equal)
import pandas.util.testing as tm
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if is_scalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
idx = list('ABCD')
# relabeling values passed into self.rename
args = [
str.lower,
{x: x.lower() for x in idx},
Series({x: x.lower() for x in idx}),
]
for axis in self._axes():
kwargs = {axis: idx}
obj = self._construct(4, **kwargs)
for arg in args:
# rename a single axis
result = obj.rename(**{axis: arg})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value='empty', **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
assert s.get(i) == d
assert s.get(i, d) == d
assert s.get(i, "z") == d
for other in others:
assert s.get(other, "z") == "z"
assert s.get(other, other) == other
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
pytest.raises(ValueError, lambda: bool(obj == 0))
pytest.raises(ValueError, lambda: bool(obj == 1))
pytest.raises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=1)
pytest.raises(ValueError, lambda: bool(obj == 0))
pytest.raises(ValueError, lambda: bool(obj == 1))
pytest.raises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=np.nan)
pytest.raises(ValueError, lambda: bool(obj == 0))
pytest.raises(ValueError, lambda: bool(obj == 1))
pytest.raises(ValueError, lambda: bool(obj))
# empty
obj = self._construct(shape=0)
pytest.raises(ValueError, lambda: bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
def f():
if obj1:
printing.pprint_thing("this works and shouldn't")
pytest.raises(ValueError, f)
pytest.raises(ValueError, lambda: obj1 and obj2)
pytest.raises(ValueError, lambda: obj1 or obj2)
pytest.raises(ValueError, lambda: not obj1)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# see gh-5191
# Compound dtypes should raise NotImplementedError.
def f(dtype):
return self._construct(shape=3, value=1, dtype=dtype)
pytest.raises(NotImplementedError, f, [("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
assert v is None
else:
assert v == getattr(y, m, None)
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
try:
result = getattr(o, op)(o)
self.check_metadata(o, result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o, result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
axis = o._get_axis_name(0)
setattr(o, axis, index(len(getattr(o, axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
pytest.skip('not implemented on {0}'.format(
o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4,
random_state=seed))
self._compare(
o.sample(frac=0.7, random_state=seed), o.sample(
frac=0.7, random_state=seed))
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)))
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
os1, os2 = [], []
for _ in range(2):
np.random.seed(test)
os1.append(o.sample(n=4))
os2.append(o.sample(frac=0.7))
self._compare(*os1)
self._compare(*os2)
# Check for error when random_state argument invalid.
with pytest.raises(ValueError):
o.sample(random_state='astring!')
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with pytest.raises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with pytest.raises(ValueError):
o.sample(n=-3)
with pytest.raises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with pytest.raises(ValueError):
o.sample(n=3.2)
# Check lengths are right
assert len(o.sample(n=4) == 4)
assert len(o.sample(frac=0.34) == 3)
assert len(o.sample(frac=0.36) == 4)
###
# Check weights
###
# Weight length must be right
with pytest.raises(ValueError):
o.sample(n=3, weights=[0, 1])
with pytest.raises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with pytest.raises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with pytest.raises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with pytest.raises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with pytest.raises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with pytest.raises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with pytest.raises(ValueError):
o.sample(n=3, weights=nan_weights)
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
assert o.size == np.prod(o.shape)
assert o.size == 10 ** len(o.axes)
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
assert len(np.array_split(o, 5)) == 5
assert len(np.array_split(o, 2)) == 2
def test_unexpected_keyword(self): # GH8597
df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df['joe'].copy()
ts[2] = np.nan
with assert_raises_regex(TypeError, 'unexpected keyword'):
df.drop('joe', axis=1, in_place=True)
with assert_raises_regex(TypeError, 'unexpected keyword'):
df.reindex([1, 0], inplace=True)
with assert_raises_regex(TypeError, 'unexpected keyword'):
ca.fillna(0, inplace=True)
with assert_raises_regex(TypeError, 'unexpected keyword'):
ts.fillna(0, in_place=True)
# See gh-12301
def test_stat_unexpected_keyword(self):
obj = self._construct(5)
starwars = 'Star Wars'
errmsg = 'unexpected keyword'
with assert_raises_regex(TypeError, errmsg):
obj.max(epic=starwars) # stat_function
with assert_raises_regex(TypeError, errmsg):
obj.var(epic=starwars) # stat_function_ddof
with assert_raises_regex(TypeError, errmsg):
obj.sum(epic=starwars) # cum_function
with assert_raises_regex(TypeError, errmsg):
obj.any(epic=starwars) # logical_function
def test_api_compat(self):
# GH 12021
# compat for __name__, __qualname__
obj = self._construct(5)
for func in ['sum', 'cumsum', 'any', 'var']:
f = getattr(obj, func)
assert f.__name__ == func
if PY3:
assert f.__qualname__.endswith(func)
def test_stat_non_defaults_args(self):
obj = self._construct(5)
out = np.array([0])
errmsg = "the 'out' parameter is not supported"
with assert_raises_regex(ValueError, errmsg):
obj.max(out=out) # stat_function
with assert_raises_regex(ValueError, errmsg):
obj.var(out=out) # stat_function_ddof
with assert_raises_regex(ValueError, errmsg):
obj.sum(out=out) # cum_function
with assert_raises_regex(ValueError, errmsg):
obj.any(out=out) # logical_function
def test_truncate_out_of_bounds(self):
# GH11382
# small
shape = [int(2e3)] + ([1] * (self._ndim - 1))
small = self._construct(shape, dtype='int8', value=1)
self._compare(small.truncate(), small)
self._compare(small.truncate(before=0, after=3e3), small)
self._compare(small.truncate(before=-1, after=2e3), small)
# big
shape = [int(2e6)] + ([1] * (self._ndim - 1))
big = self._construct(shape, dtype='int8', value=1)
self._compare(big.truncate(), big)
self._compare(big.truncate(before=0, after=3e6), big)
self._compare(big.truncate(before=-1, after=2e6), big)
def test_validate_bool_args(self):
df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
invalid_values = [1, "True", [1, 2, 3], 5.0]
for value in invalid_values:
with pytest.raises(ValueError):
super(DataFrame, df).rename_axis(mapper={'a': 'x', 'b': 'y'},
axis=1, inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df).drop('a', axis=1, inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df).sort_index(inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df)._consolidate(inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df).fillna(value=0, inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df).replace(to_replace=1, value=7,
inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df).interpolate(inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df)._where(cond=df.a > 2, inplace=value)
with pytest.raises(ValueError):
super(DataFrame, df).mask(cond=df.a > 2, inplace=value)
def test_copy_and_deepcopy(self):
# GH 15444
for shape in [0, 1, 2]:
obj = self._construct(shape)
for func in [copy,
deepcopy,
lambda x: x.copy(deep=False),
lambda x: x.copy(deep=True)]:
obj_copy = func(obj)
assert obj_copy is not obj
self._compare(obj_copy, obj)
@pytest.mark.parametrize("periods,fill_method,limit,exp", [
(1, "ffill", None, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, 0]),
(1, "ffill", 1, [np.nan, np.nan, np.nan, 1, 1, 1.5, 0, np.nan]),
(1, "bfill", None, [np.nan, 0, 0, 1, 1, 1.5, np.nan, np.nan]),
(1, "bfill", 1, [np.nan, np.nan, 0, 1, 1, 1.5, np.nan, np.nan]),
(-1, "ffill", None, [np.nan, np.nan, -.5, -.5, -.6, 0, 0, np.nan]),
(-1, "ffill", 1, [np.nan, np.nan, -.5, -.5, -.6, 0, np.nan, np.nan]),
(-1, "bfill", None, [0, 0, -.5, -.5, -.6, np.nan, np.nan, np.nan]),
(-1, "bfill", 1, [np.nan, 0, -.5, -.5, -.6, np.nan, np.nan, np.nan])
])
def test_pct_change(self, periods, fill_method, limit, exp):
vals = [np.nan, np.nan, 1, 2, 4, 10, np.nan, np.nan]
obj = self._typ(vals)
func = getattr(obj, 'pct_change')
res = func(periods=periods, fill_method=fill_method, limit=limit)
if type(obj) is DataFrame:
tm.assert_frame_equal(res, DataFrame(exp))
else:
tm.assert_series_equal(res, Series(exp))
class TestNDFrame(object):
# tests that don't fit elsewhere
def test_sample(sel):
# Fixes issue: 2419
# additional specific object based tests
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10,
'easyweights': easy_weight_list})
sample1 = df.sample(n=1, weights='easyweights')
assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series, panel, or
# DataFrame with axis = 1.
s = Series(range(10))
with pytest.raises(ValueError):
s.sample(n=3, weights='weight_column')
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
panel = Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
minor_axis=[3, 4, 5])
with pytest.raises(ValueError):
panel.sample(n=1, weights='weight_column')
with pytest.raises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
# Check weighting key error
with pytest.raises(KeyError):
df.sample(n=3, weights='not_a_real_column_name')
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(
df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})
second_column_weight = [0, 1]
assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])
# Different axis arg types
assert_frame_equal(df.sample(n=1, axis='columns',
weights=second_column_weight),
df[['col2']])
weight = [0] * 10
weight[5] = 0.5
assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),
df.iloc[5:6])
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
# Check out of range axis values
with pytest.raises(ValueError):
df.sample(n=1, axis=2)
with pytest.raises(ValueError):
df.sample(n=1, axis='not_a_name')
with pytest.raises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with pytest.raises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10})
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
assert_frame_equal(sample1, df[['colString']])
# Test default axes
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
p = Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
minor_axis=[1, 3, 5])
assert_panel_equal(
p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
random_state=42))
assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
random_state=42))
# Test that function aligns weights with frame
df = DataFrame(
{'col1': [5, 6, 7],
'col2': ['a', 'b', 'c'], }, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with pytest.raises(ValueError):
df.sample(1, weights=s4)
def test_squeeze(self):
# noop
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries()]:
tm.assert_series_equal(s.squeeze(), s)
for df in [tm.makeTimeDataFrame()]:
tm.assert_frame_equal(df.squeeze(), df)
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
for p in [tm.makePanel()]:
tm.assert_panel_equal(p.squeeze(), p)
# squeezing
df = tm.makeTimeDataFrame().reindex(columns=['A'])
tm.assert_series_equal(df.squeeze(), df['A'])
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
p = tm.makePanel().reindex(items=['ItemA'])
tm.assert_frame_equal(p.squeeze(), p['ItemA'])
p = tm.makePanel().reindex(items=['ItemA'], minor_axis=['A'])
tm.assert_series_equal(p.squeeze(), p.loc['ItemA', :, 'A'])
# don't fail with 0 length dimensions GH11229 & GH8999
empty_series = Series([], name='five')
empty_frame = DataFrame([empty_series])
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
empty_panel = Panel({'six': empty_frame})
[tm.assert_series_equal(empty_series, higher_dim.squeeze())
for higher_dim in [empty_series, empty_frame, empty_panel]]
# axis argument
df = tm.makeTimeDataFrame(nper=1).iloc[:, :1]
assert df.shape == (1, 1)
tm.assert_series_equal(df.squeeze(axis=0), df.iloc[0])
tm.assert_series_equal(df.squeeze(axis='index'), df.iloc[0])
tm.assert_series_equal(df.squeeze(axis=1), df.iloc[:, 0])
tm.assert_series_equal(df.squeeze(axis='columns'), df.iloc[:, 0])
assert df.squeeze() == df.iloc[0, 0]
pytest.raises(ValueError, df.squeeze, axis=2)
pytest.raises(ValueError, df.squeeze, axis='x')
df = tm.makeTimeDataFrame(3)
tm.assert_frame_equal(df.squeeze(axis=0), df)
def test_numpy_squeeze(self):
s = tm.makeFloatSeries()
tm.assert_series_equal(np.squeeze(s), s)
df = tm.makeTimeDataFrame().reindex(columns=['A'])
tm.assert_series_equal(np.squeeze(df), df['A'])
def test_transpose(self):
msg = (r"transpose\(\) got multiple values for "
r"keyword argument 'axes'")
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries()]:
# calls implementation in pandas/core/base.py
tm.assert_series_equal(s.transpose(), s)
for df in [tm.makeTimeDataFrame()]:
tm.assert_frame_equal(df.transpose().transpose(), df)
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
for p in [tm.makePanel()]:
tm.assert_panel_equal(p.transpose(2, 0, 1)
.transpose(1, 2, 0), p)
tm.assert_raises_regex(TypeError, msg, p.transpose,
2, 0, 1, axes=(2, 0, 1))
def test_numpy_transpose(self):
msg = "the 'axes' parameter is not supported"
s = tm.makeFloatSeries()
tm.assert_series_equal(
np.transpose(s), s)
tm.assert_raises_regex(ValueError, msg,
np.transpose, s, axes=1)
df = tm.makeTimeDataFrame()
tm.assert_frame_equal(np.transpose(
np.transpose(df)), df)
tm.assert_raises_regex(ValueError, msg,
np.transpose, df, axes=1)
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
p = tm.makePanel()
tm.assert_panel_equal(np.transpose(
np.transpose(p, axes=(2, 0, 1)),
axes=(1, 2, 0)), p)
def test_take(self):
indices = [1, 5, -2, 6, 3, -1]
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries()]:
out = s.take(indices)
expected = Series(data=s.values.take(indices),
index=s.index.take(indices), dtype=s.dtype)
tm.assert_series_equal(out, expected)
for df in [tm.makeTimeDataFrame()]:
out = df.take(indices)
expected = DataFrame(data=df.values.take(indices, axis=0),
index=df.index.take(indices),
columns=df.columns)
tm.assert_frame_equal(out, expected)
indices = [-3, 2, 0, 1]
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
for p in [tm.makePanel()]:
out = p.take(indices)
expected = Panel(data=p.values.take(indices, axis=0),
items=p.items.take(indices),
major_axis=p.major_axis,
minor_axis=p.minor_axis)
tm.assert_panel_equal(out, expected)
def test_take_invalid_kwargs(self):
indices = [-3, 2, 0, 1]
s = tm.makeFloatSeries()
df = tm.makeTimeDataFrame()
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
p = tm.makePanel()
for obj in (s, df, p):
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, obj.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, obj.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, obj.take,
indices, mode='clip')
def test_equals(self):
s1 = pd.Series([1, 2, 3], index=[0, 2, 1])
s2 = s1.copy()
assert s1.equals(s2)
s1[1] = 99
assert not s1.equals(s2)
# NaNs compare as equal
s1 = pd.Series([1, np.nan, 3, np.nan], index=[0, 2, 1, 3])
s2 = s1.copy()
assert s1.equals(s2)
s2[0] = 9.9
assert not s1.equals(s2)
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s1 = Series([1, 2, np.nan], index=idx)
s2 = s1.copy()
assert s1.equals(s2)
# Add object dtype column with nans
index = np.random.random(10)
df1 = DataFrame(
np.random.random(10, ), index=index, columns=['floats'])
df1['text'] = 'the sky is so blue. we could use more chocolate.'.split(
)
df1['start'] = date_range('2000-1-1', periods=10, freq='T')
df1['end'] = date_range('2000-1-1', periods=10, freq='D')
df1['diff'] = df1['end'] - df1['start']
df1['bool'] = (np.arange(10) % 3 == 0)
df1.loc[::2] = np.nan
df2 = df1.copy()
assert df1['text'].equals(df2['text'])
assert df1['start'].equals(df2['start'])
assert df1['end'].equals(df2['end'])
assert df1['diff'].equals(df2['diff'])
assert df1['bool'].equals(df2['bool'])
assert df1.equals(df2)
assert not df1.equals(object)
# different dtype
different = df1.copy()
different['floats'] = different['floats'].astype('float32')
assert not df1.equals(different)
# different index
different_index = -index
different = df2.set_index(different_index)
assert not df1.equals(different)
# different columns
different = df2.copy()
different.columns = df2.columns[::-1]
assert not df1.equals(different)
# DatetimeIndex
index = pd.date_range('2000-1-1', periods=10, freq='T')
df1 = df1.set_index(index)
df2 = df1.copy()
assert df1.equals(df2)
# MultiIndex
df3 = df1.set_index(['text'], append=True)
df2 = df1.set_index(['text'], append=True)
assert df3.equals(df2)
df2 = df1.set_index(['floats'], append=True)
assert not df3.equals(df2)
# NaN in index
df3 = df1.set_index(['floats'], append=True)
df2 = df1.set_index(['floats'], append=True)
assert df3.equals(df2)
# GH 8437
a = pd.Series([False, np.nan])
b = pd.Series([False, np.nan])
c = pd.Series(index=range(2))
d = pd.Series(index=range(2))
e = pd.Series(index=range(2))
f = pd.Series(index=range(2))
c[:-1] = d[:-1] = e[0] = f[0] = False
assert a.equals(a)
assert a.equals(b)
assert a.equals(c)
assert a.equals(d)
assert a.equals(e)
assert e.equals(f)
def test_describe_raises(self):
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
with pytest.raises(NotImplementedError):
tm.makePanel().describe()
def test_pipe(self):
df = DataFrame({'A': [1, 2, 3]})
f = lambda x, y: x ** y
result = df.pipe(f, 2)
expected = DataFrame({'A': [1, 4, 9]})
assert_frame_equal(result, expected)
result = df.A.pipe(f, 2)
assert_series_equal(result, expected.A)
def test_pipe_tuple(self):
df = DataFrame({'A': [1, 2, 3]})
f = lambda x, y: y
result = df.pipe((f, 'y'), 0)
assert_frame_equal(result, df)
result = df.A.pipe((f, 'y'), 0)
assert_series_equal(result, df.A)
def test_pipe_tuple_error(self):
df = DataFrame({"A": [1, 2, 3]})
f = lambda x, y: y
with pytest.raises(ValueError):
df.pipe((f, 'y'), x=1, y=0)
with pytest.raises(ValueError):
df.A.pipe((f, 'y'), x=1, y=0)
def test_pipe_panel(self):
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
wp = Panel({'r1': DataFrame({"A": [1, 2, 3]})})
f = lambda x, y: x + y
result = wp.pipe(f, 2)
expected = wp + 2
assert_panel_equal(result, expected)
result = wp.pipe((f, 'y'), x=1)
expected = wp + 1
assert_panel_equal(result, expected)
with pytest.raises(ValueError):
result = wp.pipe((f, 'y'), x=1, y=1)
@pytest.mark.parametrize('box', [pd.Series, pd.DataFrame])
def test_axis_classmethods(self, box):
obj = box()
values = (list(box._AXIS_NAMES.keys()) +
list(box._AXIS_NUMBERS.keys()) +
list(box._AXIS_ALIASES.keys()))
for v in values:
assert obj._get_axis_number(v) == box._get_axis_number(v)
assert obj._get_axis_name(v) == box._get_axis_name(v)
assert obj._get_block_manager_axis(v) == \
box._get_block_manager_axis(v)
| {
"content_hash": "4b0c42a190b6733dd2be32972313d716",
"timestamp": "",
"source": "github",
"line_count": 1030,
"max_line_length": 79,
"avg_line_length": 35.074757281553396,
"alnum_prop": 0.5199158524095552,
"repo_name": "harisbal/pandas",
"id": "46bb6303d8908a04b61bbc887ca548602d346f5f",
"size": "36186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/generic/test_generic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4907"
},
{
"name": "C",
"bytes": "404689"
},
{
"name": "C++",
"bytes": "17194"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "574"
},
{
"name": "Python",
"bytes": "14298777"
},
{
"name": "Shell",
"bytes": "28914"
},
{
"name": "Smarty",
"bytes": "2069"
}
],
"symlink_target": ""
} |
"""
Directives for table elements.
"""
__docformat__ = 'reStructuredText'
import sys
import os.path
import csv
from docutils import io, nodes, statemachine, utils
from docutils.utils.error_reporting import SafeString
from docutils.utils import SystemMessagePropagation
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
def align(argument):
return directives.choice(argument, ('left', 'center', 'right'))
class Table(Directive):
"""
Generic table base class.
"""
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged,
'align': align,
'widths': directives.value_or(('auto', 'grid'),
directives.positive_int_list)}
has_content = True
def make_title(self):
if self.arguments:
title_text = self.arguments[0]
text_nodes, messages = self.state.inline_text(title_text,
self.lineno)
title = nodes.title(title_text, '', *text_nodes)
(title.source,
title.line) = self.state_machine.get_source_and_line(self.lineno)
else:
title = None
messages = []
return title, messages
def process_header_option(self):
source = self.state_machine.get_source(self.lineno - 1)
table_head = []
max_header_cols = 0
if 'header' in self.options: # separate table header in option
rows, max_header_cols = self.parse_csv_data_into_rows(
self.options['header'].split('\n'), self.HeaderDialect(),
source)
table_head.extend(rows)
return table_head, max_header_cols
def check_table_dimensions(self, rows, header_rows, stub_columns):
if len(rows) < header_rows:
error = self.state_machine.reporter.error(
'%s header row(s) specified but only %s row(s) of data '
'supplied ("%s" directive).'
% (header_rows, len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(rows) == header_rows > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s row(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
for row in rows:
if len(row) < stub_columns:
error = self.state_machine.reporter.error(
'%s stub column(s) specified but only %s columns(s) of '
'data supplied ("%s" directive).' %
(stub_columns, len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(row) == stub_columns > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s columns(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
@property
def widths(self):
return self.options.get('widths', '')
def get_column_widths(self, max_cols):
if type(self.widths) == list:
if len(self.widths) != max_cols:
error = self.state_machine.reporter.error(
'"%s" widths do not match the number of columns in table '
'(%s).' % (self.name, max_cols), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
col_widths = self.widths
elif max_cols:
col_widths = [100 // max_cols] * max_cols
else:
error = self.state_machine.reporter.error(
'No table data detected in CSV file.', nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return col_widths
def extend_short_rows_with_empty_cells(self, columns, parts):
for part in parts:
for row in part:
if len(row) < columns:
row.extend([(0, 0, 0, [])] * (columns - len(row)))
class RSTTable(Table):
def run(self):
if not self.content:
warning = self.state_machine.reporter.warning(
'Content block expected for the "%s" directive; none found.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
if len(node) != 1 or not isinstance(node[0], nodes.table):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: exactly '
'one table expected.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table_node = node[0]
table_node['classes'] += self.options.get('class', [])
if 'align' in self.options:
table_node['align'] = self.options.get('align')
tgroup = table_node[0]
if type(self.widths) == list:
colspecs = [child for child in tgroup.children
if child.tagname == 'colspec']
for colspec, col_width in zip(colspecs, self.widths):
colspec['colwidth'] = col_width
# @@@ the colwidths argument for <tgroup> is not part of the
# XML Exchange Table spec (https://www.oasis-open.org/specs/tm9901.htm)
# and hence violates the docutils.dtd.
if self.widths == 'auto':
table_node['classes'] += ['colwidths-auto']
elif self.widths: # "grid" or list of integers
table_node['classes'] += ['colwidths-given']
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
class CSVTable(Table):
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'header': directives.unchanged,
'widths': directives.value_or(('auto', ),
directives.positive_int_list),
'file': directives.path,
'url': directives.uri,
'encoding': directives.encoding,
'class': directives.class_option,
'name': directives.unchanged,
'align': align,
# field delimiter char
'delim': directives.single_char_or_whitespace_or_unicode,
# treat whitespace after delimiter as significant
'keepspace': directives.flag,
# text field quote/unquote char:
'quote': directives.single_char_or_unicode,
# char used to escape delim & quote as-needed:
'escape': directives.single_char_or_unicode,}
class DocutilsDialect(csv.Dialect):
"""CSV dialect for `csv_table` directive."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = True
strict = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def __init__(self, options):
if 'delim' in options:
self.delimiter = CSVTable.encode_for_csv(options['delim'])
if 'keepspace' in options:
self.skipinitialspace = False
if 'quote' in options:
self.quotechar = CSVTable.encode_for_csv(options['quote'])
if 'escape' in options:
self.doublequote = False
self.escapechar = CSVTable.encode_for_csv(options['escape'])
csv.Dialect.__init__(self)
class HeaderDialect(csv.Dialect):
"""CSV dialect to use for the "header" option data."""
delimiter = ','
quotechar = '"'
escapechar = '\\'
doublequote = False
skipinitialspace = True
strict = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def check_requirements(self):
pass
def run(self):
try:
if (not self.state.document.settings.file_insertion_enabled
and ('file' in self.options
or 'url' in self.options)):
warning = self.state_machine.reporter.warning(
'File and URL access deactivated; ignoring "%s" '
'directive.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
self.check_requirements()
title, messages = self.make_title()
csv_data, source = self.get_csv_data()
table_head, max_header_cols = self.process_header_option()
rows, max_cols = self.parse_csv_data_into_rows(
csv_data, self.DocutilsDialect(self.options), source)
max_cols = max(max_cols, max_header_cols)
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(rows, header_rows, stub_columns)
table_head.extend(rows[:header_rows])
table_body = rows[header_rows:]
col_widths = self.get_column_widths(max_cols)
self.extend_short_rows_with_empty_cells(max_cols,
(table_head, table_body))
except SystemMessagePropagation as detail:
return [detail.args[0]]
except csv.Error as detail:
message = str(detail)
if sys.version_info < (3,) and '1-character string' in message:
message += '\nwith Python 2.x this must be an ASCII character.'
error = self.state_machine.reporter.error(
'Error with CSV data in "%s" directive:\n%s'
% (self.name, message), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table = (col_widths, table_head, table_body)
table_node = self.state.build_table(table, self.content_offset,
stub_columns, widths=self.widths)
table_node['classes'] += self.options.get('class', [])
if 'align' in self.options:
table_node['align'] = self.options.get('align')
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
def get_csv_data(self):
"""
Get CSV data from the directive content, from an external
file, or from a URL reference.
"""
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
error_handler = self.state.document.settings.input_encoding_error_handler
if self.content:
# CSV data is from directive content.
if 'file' in self.options or 'url' in self.options:
error = self.state_machine.reporter.error(
'"%s" directive may not both specify an external file and'
' have content.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
source = self.content.source(0)
csv_data = self.content
elif 'file' in self.options:
# CSV data is from an external file.
if 'url' in self.options:
error = self.state_machine.reporter.error(
'The "file" and "url" options may not be simultaneously'
' specified for the "%s" directive.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
source = os.path.normpath(os.path.join(source_dir,
self.options['file']))
source = utils.relative_path(None, source)
try:
self.state.document.settings.record_dependencies.add(source)
csv_file = io.FileInput(source_path=source,
encoding=encoding,
error_handler=error_handler)
csv_data = csv_file.read().splitlines()
except IOError as error:
severe = self.state_machine.reporter.severe(
'Problems with "%s" directive path:\n%s.'
% (self.name, SafeString(error)),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
elif 'url' in self.options:
# CSV data is from a URL.
# Do not import urllib2 at the top of the module because
# it may fail due to broken SSL dependencies, and it takes
# about 0.15 seconds to load.
import urllib.request, urllib.error, urllib.parse
source = self.options['url']
try:
csv_text = urllib.request.urlopen(source).read()
except (urllib.error.URLError, IOError, OSError, ValueError) as error:
severe = self.state_machine.reporter.severe(
'Problems with "%s" directive URL "%s":\n%s.'
% (self.name, self.options['url'], SafeString(error)),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
csv_file = io.StringInput(
source=csv_text, source_path=source, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler))
csv_data = csv_file.read().splitlines()
else:
error = self.state_machine.reporter.warning(
'The "%s" directive requires content; none supplied.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return csv_data, source
if sys.version_info < (3,):
# 2.x csv module doesn't do Unicode
def decode_from_csv(s):
return s.decode('utf-8')
def encode_for_csv(s):
return s.encode('utf-8')
else:
def decode_from_csv(s):
return s
def encode_for_csv(s):
return s
decode_from_csv = staticmethod(decode_from_csv)
encode_for_csv = staticmethod(encode_for_csv)
def parse_csv_data_into_rows(self, csv_data, dialect, source):
# csv.py doesn't do Unicode; encode temporarily as UTF-8
csv_reader = csv.reader([self.encode_for_csv(line + '\n')
for line in csv_data],
dialect=dialect)
rows = []
max_cols = 0
for row in csv_reader:
row_data = []
for cell in row:
# decode UTF-8 back to Unicode
cell_text = self.decode_from_csv(cell)
cell_data = (0, 0, 0, statemachine.StringList(
cell_text.splitlines(), source=source))
row_data.append(cell_data)
rows.append(row_data)
max_cols = max(max_cols, len(row))
return rows, max_cols
class ListTable(Table):
"""
Implement tables whose data is encoded as a uniform two-level bullet list.
For further ideas, see
http://docutils.sf.net/docs/dev/rst/alternatives.html#list-driven-tables
"""
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'widths': directives.value_or(('auto', ),
directives.positive_int_list),
'class': directives.class_option,
'name': directives.unchanged,
'align': align}
def run(self):
if not self.content:
error = self.state_machine.reporter.error(
'The "%s" directive is empty; content required.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [error]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
try:
num_cols, col_widths = self.check_list_content(node)
table_data = [[item.children for item in row_list[0]]
for row_list in node[0]]
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(table_data, header_rows, stub_columns)
except SystemMessagePropagation as detail:
return [detail.args[0]]
table_node = self.build_table_from_list(table_data, col_widths,
header_rows, stub_columns)
if 'align' in self.options:
table_node['align'] = self.options.get('align')
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
def check_list_content(self, node):
if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'exactly one bullet list expected.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
list_node = node[0]
# Check for a uniform two-level bullet list:
for item_index in range(len(list_node)):
item = list_node[item_index]
if len(item) != 1 or not isinstance(item[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'two-level bullet list expected, but row %s does not '
'contain a second-level bullet list.'
% (self.name, item_index + 1), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif item_index:
# ATTN pychecker users: num_cols is guaranteed to be set in the
# "else" clause below for item_index==0, before this branch is
# triggered.
if len(item[0]) != num_cols:
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'uniform two-level bullet list expected, but row %s '
'does not contain the same number of items as row 1 '
'(%s vs %s).'
% (self.name, item_index + 1, len(item[0]), num_cols),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
else:
num_cols = len(item[0])
col_widths = self.get_column_widths(num_cols)
return num_cols, col_widths
def build_table_from_list(self, table_data, col_widths, header_rows, stub_columns):
table = nodes.table()
if self.widths == 'auto':
table['classes'] += ['colwidths-auto']
elif self.widths: # "grid" or list of integers
table['classes'] += ['colwidths-given']
tgroup = nodes.tgroup(cols=len(col_widths))
table += tgroup
for col_width in col_widths:
colspec = nodes.colspec()
if col_width is not None:
colspec.attributes['colwidth'] = col_width
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
rows = []
for row in table_data:
row_node = nodes.row()
for cell in row:
entry = nodes.entry()
entry += cell
row_node += entry
rows.append(row_node)
if header_rows:
thead = nodes.thead()
thead.extend(rows[:header_rows])
tgroup += thead
tbody = nodes.tbody()
tbody.extend(rows[header_rows:])
tgroup += tbody
return table
| {
"content_hash": "6368824dbc76ca9dd3b40a10fa29b0d4",
"timestamp": "",
"source": "github",
"line_count": 496,
"max_line_length": 87,
"avg_line_length": 44.72782258064516,
"alnum_prop": 0.5449177372098265,
"repo_name": "LEXmono/q",
"id": "5457c1622fa71e3408270f48b062e7cf53160156",
"size": "22360",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "docutils/parsers/rst/directives/tables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5936"
},
{
"name": "CSS",
"bytes": "67090"
},
{
"name": "JavaScript",
"bytes": "15800"
},
{
"name": "Python",
"bytes": "4498863"
},
{
"name": "TeX",
"bytes": "1628"
}
],
"symlink_target": ""
} |
STRICT_MODE = True
| {
"content_hash": "4dd67357429abeef32fe0a05f3eb5cfc",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 18,
"avg_line_length": 19,
"alnum_prop": 0.7368421052631579,
"repo_name": "Typecraft/typecraft_python",
"id": "2ba1bd8f4015ff464d7afc3db6db5d8ad9c330cf",
"size": "20",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "typecraft_python/core/globals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2296"
},
{
"name": "Python",
"bytes": "131331"
}
],
"symlink_target": ""
} |
import sys
import os
includeos_src = os.environ.get('INCLUDEOS_SRC',
os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))).split('/test')[0])
sys.path.insert(0,includeos_src)
from vmrunner import vmrunner
vmrunner.vms[0].cmake().boot(40).clean()
| {
"content_hash": "b58e42426ca3233fc8674f1809cf016a",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 120,
"avg_line_length": 32.888888888888886,
"alnum_prop": 0.6486486486486487,
"repo_name": "ingve/IncludeOS",
"id": "083ea025ae756d35dd00c859692fcff21b7d3b15",
"size": "319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/stl/integration/stl/test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "50627"
},
{
"name": "C",
"bytes": "201785"
},
{
"name": "C++",
"bytes": "2871717"
},
{
"name": "CMake",
"bytes": "124597"
},
{
"name": "Dockerfile",
"bytes": "665"
},
{
"name": "GDB",
"bytes": "189"
},
{
"name": "JavaScript",
"bytes": "813"
},
{
"name": "Makefile",
"bytes": "1719"
},
{
"name": "Python",
"bytes": "151122"
},
{
"name": "Shell",
"bytes": "77957"
}
],
"symlink_target": ""
} |
from .base import FunctionalTest
class CreateValidTutorialTest(FunctionalTest):
def test_creating_a_valid_tutorial(self):
# Goes to the home page of the website
self.browser.get(self.live_server_url)
self.browser.find_element_by_id("go_to_home").click()
self.assertIn("Tutorials", self.browser.title)
# Clicks the Add Tutorial button
self.browser.find_element_by_id("add_tutorial").click()
# Gets taken to a page to enter details for a new tutorial
self.assertEquals(
self.live_server_url + "/add-tutorial/",
self.browser.current_url)
self.assertIn("Add a tutorial", self.browser.title)
# Enters the title: Test
self.browser.find_element_by_id("id_title").send_keys("Test")
# Types in the body of the tutorial: Tutorial goes here!
self.browser.find_element_by_id("id_body").send_keys(
"Tutorial goes here!")
# Clicks save and gets taken to the home page
self.browser.find_element_by_id("id_submit").click()
self.assertEquals(self.live_server_url + '/', self.browser.current_url)
self.assertIn("Tutorials", self.browser.title)
# Under the recently modified section of the homepage, it
# shows a link to the newly created tutorial: Test
recently_modified_list = self.browser.find_element_by_id(
"recently_modified")
list_items = recently_modified_list.find_elements_by_tag_name("li")
self.assertIn("Test", [item.text for item in list_items])
class CreateInvalidTutorialTest(FunctionalTest):
def test_creating_an_invalid_tutorial(self):
# Goes to the home page of the website
self.browser.get(self.live_server_url)
self.assertIn("Tutorials", self.browser.title)
# Clicks the Add Tutorial button
self.browser.find_element_by_id("add_tutorial").click()
# Gets taken to a page to enter details for a new tutorial
self.assertEquals(
self.live_server_url + "/add-tutorial/",
self.browser.current_url)
self.assertIn("Add a tutorial", self.browser.title)
# Enters the title: Test
self.browser.find_element_by_id("id_title").send_keys("Test")
# Presses submit
self.browser.find_element_by_id("id_submit").click()
# Gets taken back to the add-tutorial page
self.assertEquals(
self.live_server_url + "/add-tutorial/",
self.browser.current_url)
self.assertIn("Add a tutorial", self.browser.title)
# Sees an error saying "The body field is required"
page_text = self.browser.find_element_by_tag_name('body').text
self.assertIn('This field is required', page_text)
| {
"content_hash": "1278bf81bdddbe5fb579b5e4c98ac36d",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 79,
"avg_line_length": 39.309859154929576,
"alnum_prop": 0.6442135435327839,
"repo_name": "d0ntreadthis/tutorials_site",
"id": "20f16bf5621b0c0e25082efa481b5dae87ed54a4",
"size": "2791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "functional_tests/test_creating_a_tutorial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "34103"
},
{
"name": "Python",
"bytes": "27687"
}
],
"symlink_target": ""
} |
import numpy as np
from sklearn.datasets import make_sparse_coded_signal
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| {
"content_hash": "ebe52752729cf9f04eb3d96c33a87836",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 77,
"avg_line_length": 38.01477832512315,
"alnum_prop": 0.6265388104185564,
"repo_name": "DailyActie/Surrogate-Model",
"id": "d5e86d4d1928cb150f83993fcd96989fb6bca678",
"size": "7765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "01-codes/scikit-learn-master/sklearn/linear_model/tests/test_omp.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "345"
},
{
"name": "Batchfile",
"bytes": "18746"
},
{
"name": "C",
"bytes": "13004913"
},
{
"name": "C++",
"bytes": "14692003"
},
{
"name": "CMake",
"bytes": "72831"
},
{
"name": "CSS",
"bytes": "303488"
},
{
"name": "Fortran",
"bytes": "7339415"
},
{
"name": "HTML",
"bytes": "854774"
},
{
"name": "Java",
"bytes": "38854"
},
{
"name": "JavaScript",
"bytes": "2432846"
},
{
"name": "Jupyter Notebook",
"bytes": "829689"
},
{
"name": "M4",
"bytes": "1379"
},
{
"name": "Makefile",
"bytes": "48708"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "PHP",
"bytes": "93585"
},
{
"name": "Pascal",
"bytes": "1449"
},
{
"name": "Perl",
"bytes": "1152272"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "34668203"
},
{
"name": "Roff",
"bytes": "5925"
},
{
"name": "Ruby",
"bytes": "92498"
},
{
"name": "Shell",
"bytes": "94698"
},
{
"name": "TeX",
"bytes": "156540"
},
{
"name": "TypeScript",
"bytes": "41691"
}
],
"symlink_target": ""
} |
"""Test Konnected setup process."""
import pytest
from homeassistant.components import konnected
from homeassistant.components.konnected import config_flow
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import HTTP_NOT_FOUND
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import MockConfigEntry
@pytest.fixture(name="mock_panel")
async def mock_panel_fixture():
"""Mock a Konnected Panel bridge."""
with patch("konnected.Client", autospec=True) as konn_client:
def mock_constructor(host, port, websession):
"""Fake the panel constructor."""
konn_client.host = host
konn_client.port = port
return konn_client
konn_client.side_effect = mock_constructor
konn_client.ClientError = config_flow.CannotConnect
konn_client.get_status.return_value = {
"hwVersion": "2.3.0",
"swVersion": "2.3.1",
"heap": 10000,
"uptime": 12222,
"ip": "192.168.1.90",
"port": 9123,
"sensors": [],
"actuators": [],
"dht_sensors": [],
"ds18b20_sensors": [],
"mac": "11:22:33:44:55:66",
"settings": {},
}
yield konn_client
async def test_config_schema(hass):
"""Test that config schema is imported properly."""
config = {
konnected.DOMAIN: {
konnected.CONF_API_HOST: "http://1.1.1.1:8888",
konnected.CONF_ACCESS_TOKEN: "abcdefgh",
konnected.CONF_DEVICES: [{konnected.CONF_ID: "aabbccddeeff"}],
}
}
assert konnected.CONFIG_SCHEMA(config) == {
"konnected": {
"access_token": "abcdefgh",
"api_host": "http://1.1.1.1:8888",
"devices": [
{
"default_options": {
"blink": True,
"api_host": "http://1.1.1.1:8888",
"discovery": True,
"io": {
"1": "Disabled",
"10": "Disabled",
"11": "Disabled",
"12": "Disabled",
"2": "Disabled",
"3": "Disabled",
"4": "Disabled",
"5": "Disabled",
"6": "Disabled",
"7": "Disabled",
"8": "Disabled",
"9": "Disabled",
"alarm1": "Disabled",
"alarm2_out2": "Disabled",
"out": "Disabled",
"out1": "Disabled",
},
},
"id": "aabbccddeeff",
}
],
}
}
# check with host info
config = {
konnected.DOMAIN: {
konnected.CONF_ACCESS_TOKEN: "abcdefgh",
konnected.CONF_DEVICES: [
{konnected.CONF_ID: "aabbccddeeff", "host": "192.168.1.1", "port": 1234}
],
}
}
assert konnected.CONFIG_SCHEMA(config) == {
"konnected": {
"access_token": "abcdefgh",
"devices": [
{
"default_options": {
"blink": True,
"api_host": "",
"discovery": True,
"io": {
"1": "Disabled",
"10": "Disabled",
"11": "Disabled",
"12": "Disabled",
"2": "Disabled",
"3": "Disabled",
"4": "Disabled",
"5": "Disabled",
"6": "Disabled",
"7": "Disabled",
"8": "Disabled",
"9": "Disabled",
"alarm1": "Disabled",
"alarm2_out2": "Disabled",
"out": "Disabled",
"out1": "Disabled",
},
},
"id": "aabbccddeeff",
"host": "192.168.1.1",
"port": 1234,
}
],
}
}
# check pin to zone and multiple output
config = {
konnected.DOMAIN: {
konnected.CONF_ACCESS_TOKEN: "abcdefgh",
konnected.CONF_DEVICES: [
{
konnected.CONF_ID: "aabbccddeeff",
"binary_sensors": [
{"pin": 2, "type": "door"},
{"zone": 1, "type": "door"},
],
"switches": [
{
"zone": 3,
"name": "Beep Beep",
"momentary": 65,
"pause": 55,
"repeat": 4,
},
{
"zone": 3,
"name": "Warning",
"momentary": 100,
"pause": 100,
"repeat": -1,
},
],
}
],
}
}
assert konnected.CONFIG_SCHEMA(config) == {
"konnected": {
"access_token": "abcdefgh",
"devices": [
{
"default_options": {
"blink": True,
"api_host": "",
"discovery": True,
"io": {
"1": "Binary Sensor",
"10": "Disabled",
"11": "Disabled",
"12": "Disabled",
"2": "Binary Sensor",
"3": "Switchable Output",
"4": "Disabled",
"5": "Disabled",
"6": "Disabled",
"7": "Disabled",
"8": "Disabled",
"9": "Disabled",
"alarm1": "Disabled",
"alarm2_out2": "Disabled",
"out": "Disabled",
"out1": "Disabled",
},
"binary_sensors": [
{"inverse": False, "type": "door", "zone": "2"},
{"inverse": False, "type": "door", "zone": "1"},
],
"switches": [
{
"zone": "3",
"activation": "high",
"name": "Beep Beep",
"momentary": 65,
"pause": 55,
"repeat": 4,
},
{
"zone": "3",
"activation": "high",
"name": "Warning",
"momentary": 100,
"pause": 100,
"repeat": -1,
},
],
},
"id": "aabbccddeeff",
}
],
}
}
async def test_setup_with_no_config(hass):
"""Test that we do not discover anything or try to set up a Konnected panel."""
assert await async_setup_component(hass, konnected.DOMAIN, {})
# No flows started
assert len(hass.config_entries.flow.async_progress()) == 0
# Nothing saved from configuration.yaml
assert hass.data[konnected.DOMAIN][konnected.CONF_ACCESS_TOKEN] is None
assert hass.data[konnected.DOMAIN][konnected.CONF_API_HOST] is None
assert konnected.YAML_CONFIGS not in hass.data[konnected.DOMAIN]
async def test_setup_defined_hosts_known_auth(hass, mock_panel):
"""Test we don't initiate a config entry if configured panel is known."""
MockConfigEntry(
domain="konnected",
unique_id="112233445566",
data={"host": "0.0.0.0", "id": "112233445566"},
).add_to_hass(hass)
MockConfigEntry(
domain="konnected",
unique_id="aabbccddeeff",
data={"host": "1.2.3.4", "id": "aabbccddeeff"},
).add_to_hass(hass)
assert (
await async_setup_component(
hass,
konnected.DOMAIN,
{
konnected.DOMAIN: {
konnected.CONF_ACCESS_TOKEN: "abcdefgh",
konnected.CONF_DEVICES: [
{
config_flow.CONF_ID: "aabbccddeeff",
config_flow.CONF_HOST: "0.0.0.0",
config_flow.CONF_PORT: 1234,
}
],
}
},
)
is True
)
assert hass.data[konnected.DOMAIN][konnected.CONF_ACCESS_TOKEN] == "abcdefgh"
assert konnected.YAML_CONFIGS not in hass.data[konnected.DOMAIN]
# Flow aborted
assert len(hass.config_entries.flow.async_progress()) == 0
async def test_setup_defined_hosts_no_known_auth(hass):
"""Test we initiate config entry if config panel is not known."""
assert (
await async_setup_component(
hass,
konnected.DOMAIN,
{
konnected.DOMAIN: {
konnected.CONF_ACCESS_TOKEN: "abcdefgh",
konnected.CONF_DEVICES: [{konnected.CONF_ID: "aabbccddeeff"}],
}
},
)
is True
)
# Flow started for discovered bridge
assert len(hass.config_entries.flow.async_progress()) == 1
async def test_setup_multiple(hass):
"""Test we initiate config entry for multiple panels."""
assert (
await async_setup_component(
hass,
konnected.DOMAIN,
{
konnected.DOMAIN: {
konnected.CONF_ACCESS_TOKEN: "arandomstringvalue",
konnected.CONF_API_HOST: "http://192.168.86.32:8123",
konnected.CONF_DEVICES: [
{
konnected.CONF_ID: "aabbccddeeff",
"binary_sensors": [
{"zone": 4, "type": "motion", "name": "Hallway Motion"},
{
"zone": 5,
"type": "window",
"name": "Master Bedroom Window",
},
{
"zone": 6,
"type": "window",
"name": "Downstairs Windows",
},
],
"switches": [{"zone": "out", "name": "siren"}],
},
{
konnected.CONF_ID: "445566778899",
"binary_sensors": [
{"zone": 1, "type": "motion", "name": "Front"},
{"zone": 2, "type": "window", "name": "Back"},
],
"switches": [
{
"zone": "out",
"name": "Buzzer",
"momentary": 65,
"pause": 55,
"repeat": 4,
}
],
},
],
}
},
)
is True
)
# Flow started for discovered bridge
assert len(hass.config_entries.flow.async_progress()) == 2
# Globals saved
assert (
hass.data[konnected.DOMAIN][konnected.CONF_ACCESS_TOKEN] == "arandomstringvalue"
)
assert (
hass.data[konnected.DOMAIN][konnected.CONF_API_HOST]
== "http://192.168.86.32:8123"
)
async def test_config_passed_to_config_entry(hass):
"""Test that configured options for a host are loaded via config entry."""
entry = MockConfigEntry(
domain=konnected.DOMAIN,
data={config_flow.CONF_ID: "aabbccddeeff", config_flow.CONF_HOST: "0.0.0.0"},
)
entry.add_to_hass(hass)
with patch.object(konnected, "AlarmPanel", autospec=True) as mock_int:
assert (
await async_setup_component(
hass,
konnected.DOMAIN,
{
konnected.DOMAIN: {
konnected.CONF_ACCESS_TOKEN: "abcdefgh",
konnected.CONF_DEVICES: [{konnected.CONF_ID: "aabbccddeeff"}],
}
},
)
is True
)
assert len(mock_int.mock_calls) == 3
p_hass, p_entry = mock_int.mock_calls[0][1]
assert p_hass is hass
assert p_entry is entry
async def test_unload_entry(hass, mock_panel):
"""Test being able to unload an entry."""
await async_process_ha_core_config(
hass, {"internal_url": "http://example.local:8123"},
)
entry = MockConfigEntry(
domain=konnected.DOMAIN, data={konnected.CONF_ID: "aabbccddeeff"}
)
entry.add_to_hass(hass)
assert await async_setup_component(hass, konnected.DOMAIN, {}) is True
assert hass.data[konnected.DOMAIN]["devices"].get("aabbccddeeff") is not None
assert await konnected.async_unload_entry(hass, entry)
assert hass.data[konnected.DOMAIN]["devices"] == {}
async def test_api(hass, aiohttp_client, mock_panel):
"""Test callback view."""
await async_setup_component(hass, "http", {"http": {}})
device_config = config_flow.CONFIG_ENTRY_SCHEMA(
{
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"model": "Konnected Pro",
"access_token": "abcdefgh",
"default_options": config_flow.OPTIONS_SCHEMA({config_flow.CONF_IO: {}}),
}
)
device_options = config_flow.OPTIONS_SCHEMA(
{
"io": {
"1": "Binary Sensor",
"2": "Binary Sensor",
"3": "Binary Sensor",
"4": "Digital Sensor",
"5": "Digital Sensor",
"6": "Switchable Output",
"out": "Switchable Output",
},
"binary_sensors": [
{"zone": "1", "type": "door"},
{"zone": "2", "type": "window", "name": "winder", "inverse": True},
{"zone": "3", "type": "door"},
],
"sensors": [
{"zone": "4", "type": "dht"},
{"zone": "5", "type": "ds18b20", "name": "temper"},
],
"switches": [
{
"zone": "out",
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
{"zone": "6"},
],
}
)
entry = MockConfigEntry(
domain="konnected",
title="Konnected Alarm Panel",
data=device_config,
options=device_options,
)
entry.add_to_hass(hass)
assert (
await async_setup_component(
hass,
konnected.DOMAIN,
{konnected.DOMAIN: {konnected.CONF_ACCESS_TOKEN: "globaltoken"}},
)
is True
)
client = await aiohttp_client(hass.http.app)
# Test the get endpoint for switch status polling
resp = await client.get("/api/konnected")
assert resp.status == HTTP_NOT_FOUND # no device provided
resp = await client.get("/api/konnected/223344556677")
assert resp.status == HTTP_NOT_FOUND # unknown device provided
resp = await client.get("/api/konnected/device/112233445566")
assert resp.status == HTTP_NOT_FOUND # no zone provided
result = await resp.json()
assert result == {"message": "Switch on zone or pin unknown not configured"}
resp = await client.get("/api/konnected/device/112233445566?zone=8")
assert resp.status == HTTP_NOT_FOUND # invalid zone
result = await resp.json()
assert result == {"message": "Switch on zone or pin 8 not configured"}
resp = await client.get("/api/konnected/device/112233445566?pin=12")
assert resp.status == HTTP_NOT_FOUND # invalid pin
result = await resp.json()
assert result == {"message": "Switch on zone or pin 12 not configured"}
resp = await client.get("/api/konnected/device/112233445566?zone=out")
assert resp.status == 200
result = await resp.json()
assert result == {"state": 1, "zone": "out"}
resp = await client.get("/api/konnected/device/112233445566?pin=8")
assert resp.status == 200
result = await resp.json()
assert result == {"state": 1, "pin": "8"}
# Test the post endpoint for sensor updates
resp = await client.post("/api/konnected/device", json={"zone": "1", "state": 1})
assert resp.status == HTTP_NOT_FOUND
resp = await client.post(
"/api/konnected/device/112233445566", json={"zone": "1", "state": 1}
)
assert resp.status == 401
result = await resp.json()
assert result == {"message": "unauthorized"}
resp = await client.post(
"/api/konnected/device/223344556677",
headers={"Authorization": "Bearer abcdefgh"},
json={"zone": "1", "state": 1},
)
assert resp.status == 400
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"zone": "15", "state": 1},
)
assert resp.status == 400
result = await resp.json()
assert result == {"message": "unregistered sensor/actuator"}
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"zone": "1", "state": 1},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer globaltoken"},
json={"zone": "1", "state": 1},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"zone": "4", "temp": 22, "humi": 20},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
# Test the put endpoint for sensor updates
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"zone": "1", "state": 1},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
async def test_state_updates_zone(hass, aiohttp_client, mock_panel):
"""Test callback view."""
await async_process_ha_core_config(
hass, {"internal_url": "http://example.local:8123"},
)
device_config = config_flow.CONFIG_ENTRY_SCHEMA(
{
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"model": "Konnected Pro",
"access_token": "abcdefgh",
"default_options": config_flow.OPTIONS_SCHEMA({config_flow.CONF_IO: {}}),
}
)
device_options = config_flow.OPTIONS_SCHEMA(
{
"io": {
"1": "Binary Sensor",
"2": "Binary Sensor",
"3": "Binary Sensor",
"4": "Digital Sensor",
"5": "Digital Sensor",
"6": "Switchable Output",
"out": "Switchable Output",
},
"binary_sensors": [
{"zone": "1", "type": "door"},
{"zone": "2", "type": "window", "name": "winder", "inverse": True},
{"zone": "3", "type": "door"},
],
"sensors": [
{"zone": "4", "type": "dht"},
{"zone": "5", "type": "ds18b20", "name": "temper"},
],
"switches": [
{
"zone": "out",
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
{"zone": "6"},
],
}
)
entry = MockConfigEntry(
domain="konnected",
title="Konnected Alarm Panel",
data=device_config,
options=device_options,
)
entry.add_to_hass(hass)
# Add empty data field to ensure we process it correctly (possible if entry is ignored)
entry = MockConfigEntry(domain="konnected", title="Konnected Alarm Panel", data={})
entry.add_to_hass(hass)
assert (
await async_setup_component(
hass,
konnected.DOMAIN,
{konnected.DOMAIN: {konnected.CONF_ACCESS_TOKEN: "1122334455"}},
)
is True
)
client = await aiohttp_client(hass.http.app)
# Test updating a binary sensor
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"zone": "1", "state": 0},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.konnected_445566_zone_1").state == "off"
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"zone": "1", "state": 1},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.konnected_445566_zone_1").state == "on"
# Test updating sht sensor
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"zone": "4", "temp": 22, "humi": 20},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("sensor.konnected_445566_sensor_4_humidity").state == "20"
assert (
hass.states.get("sensor.konnected_445566_sensor_4_temperature").state == "22.0"
)
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"zone": "4", "temp": 25, "humi": 23},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("sensor.konnected_445566_sensor_4_humidity").state == "23"
assert (
hass.states.get("sensor.konnected_445566_sensor_4_temperature").state == "25.0"
)
# Test updating ds sensor
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"zone": "5", "temp": 32, "addr": 1},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("sensor.temper_temperature").state == "32.0"
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"zone": "5", "temp": 42, "addr": 1},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("sensor.temper_temperature").state == "42.0"
async def test_state_updates_pin(hass, aiohttp_client, mock_panel):
"""Test callback view."""
await async_process_ha_core_config(
hass, {"internal_url": "http://example.local:8123"},
)
device_config = config_flow.CONFIG_ENTRY_SCHEMA(
{
"host": "1.2.3.4",
"port": 1234,
"id": "112233445566",
"model": "Konnected",
"access_token": "abcdefgh",
"default_options": config_flow.OPTIONS_SCHEMA({config_flow.CONF_IO: {}}),
}
)
device_options = config_flow.OPTIONS_SCHEMA(
{
"io": {
"1": "Binary Sensor",
"2": "Binary Sensor",
"3": "Binary Sensor",
"4": "Digital Sensor",
"5": "Digital Sensor",
"6": "Switchable Output",
"out": "Switchable Output",
},
"binary_sensors": [
{"zone": "1", "type": "door"},
{"zone": "2", "type": "window", "name": "winder", "inverse": True},
{"zone": "3", "type": "door"},
],
"sensors": [
{"zone": "4", "type": "dht"},
{"zone": "5", "type": "ds18b20", "name": "temper"},
],
"switches": [
{
"zone": "out",
"name": "switcher",
"activation": "low",
"momentary": 50,
"pause": 100,
"repeat": 4,
},
{"zone": "6"},
],
}
)
entry = MockConfigEntry(
domain="konnected",
title="Konnected Alarm Panel",
data=device_config,
options=device_options,
)
entry.add_to_hass(hass)
# Add empty data field to ensure we process it correctly (possible if entry is ignored)
entry = MockConfigEntry(domain="konnected", title="Konnected Alarm Panel", data={},)
entry.add_to_hass(hass)
assert (
await async_setup_component(
hass,
konnected.DOMAIN,
{konnected.DOMAIN: {konnected.CONF_ACCESS_TOKEN: "1122334455"}},
)
is True
)
client = await aiohttp_client(hass.http.app)
# Test updating a binary sensor
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"pin": "1", "state": 0},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.konnected_445566_zone_1").state == "off"
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"pin": "1", "state": 1},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.konnected_445566_zone_1").state == "on"
# Test updating sht sensor
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"pin": "6", "temp": 22, "humi": 20},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("sensor.konnected_445566_sensor_4_humidity").state == "20"
assert (
hass.states.get("sensor.konnected_445566_sensor_4_temperature").state == "22.0"
)
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"pin": "6", "temp": 25, "humi": 23},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("sensor.konnected_445566_sensor_4_humidity").state == "23"
assert (
hass.states.get("sensor.konnected_445566_sensor_4_temperature").state == "25.0"
)
# Test updating ds sensor
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"pin": "7", "temp": 32, "addr": 1},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("sensor.temper_temperature").state == "32.0"
resp = await client.post(
"/api/konnected/device/112233445566",
headers={"Authorization": "Bearer abcdefgh"},
json={"pin": "7", "temp": 42, "addr": 1},
)
assert resp.status == 200
result = await resp.json()
assert result == {"message": "ok"}
await hass.async_block_till_done()
assert hass.states.get("sensor.temper_temperature").state == "42.0"
| {
"content_hash": "a9d2973a93024271fdcb780afe8c870a",
"timestamp": "",
"source": "github",
"line_count": 865,
"max_line_length": 91,
"avg_line_length": 34.52832369942197,
"alnum_prop": 0.46697023470720195,
"repo_name": "mKeRix/home-assistant",
"id": "74e3b931f61070c1f0af598509ba99f2f434e137",
"size": "29867",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/konnected/test_init.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1466026"
},
{
"name": "Python",
"bytes": "4770710"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "12407"
}
],
"symlink_target": ""
} |
from django.db import models
from pygments.lexers import get_all_lexers
from pygments.styles import get_all_styles
LEXERS = [item for item in get_all_lexers() if item[1]]
LANGUAGE_CHOICES = sorted([item[1][0], item[0]] for item in LEXERS)
STYLE_CHOICES = sorted((item, item) for item in get_all_styles())
class Crawl(models.Model):
created = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=100, blank=True, default='')
code = models.TextField()
linenos = models.BooleanField(default=False)
language = models.CharField(choices=LANGUAGE_CHOICES, default='python', max_length=100)
style = models.CharField(choices=STYLE_CHOICES, default='friendly', max_length=100)
class Meta:
ordering = ('created',)
class PageReference(models.Model):
title = models.CharField(max_length=1024, blank=False, default='')
file = models.CharField(max_length=512, blank=True, default='')
url_md5 = models.CharField(max_length=32, blank=True, default='')
url = models.CharField(max_length=1024, blank=True, default='')
icon = models.CharField(max_length=512, blank=True, default='')
source = models.CharField(max_length=512, blank=True, default='')
author = models.CharField(max_length=128, blank=True, default='')
update_time = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('-update_time',)
db_table = 'page_reference'
| {
"content_hash": "05120689fc686812af14208f8b54a196",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 91,
"avg_line_length": 39.94444444444444,
"alnum_prop": 0.7016689847009736,
"repo_name": "ethan-lau/mars",
"id": "bd5c728bc2fff1a0cc34d49ad97b16d7ae344a51",
"size": "1531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web_module/crawl/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "570"
},
{
"name": "HTML",
"bytes": "1277"
},
{
"name": "JavaScript",
"bytes": "6854"
},
{
"name": "Python",
"bytes": "20761"
},
{
"name": "Shell",
"bytes": "109"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._assessments_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AssessmentsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.security.v2020_01_01.aio.SecurityCenter`'s
:attr:`assessments` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, scope: str, **kwargs: Any) -> AsyncIterable["_models.SecurityAssessment"]:
"""Get security assessments on all your scanned resources inside a scope.
:param scope: Scope of the query, can be subscription
(/subscriptions/0b06d9ea-afe6-4779-bd59-30e5c2d9d13f) or management group
(/providers/Microsoft.Management/managementGroups/mgName). Required.
:type scope: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityAssessment or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.security.v2020_01_01.models.SecurityAssessment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-01-01")) # type: Literal["2020-01-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SecurityAssessmentList]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
scope=scope,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SecurityAssessmentList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/{scope}/providers/Microsoft.Security/assessments"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_id: str,
assessment_name: str,
expand: Optional[Union[str, _models.ExpandEnum]] = None,
**kwargs: Any
) -> _models.SecurityAssessment:
"""Get a security assessment on your scanned resource.
:param resource_id: The identifier of the resource. Required.
:type resource_id: str
:param assessment_name: The Assessment Key - Unique key for the assessment type. Required.
:type assessment_name: str
:param expand: OData expand. Optional. Known values are: "links" and "metadata". Default value
is None.
:type expand: str or ~azure.mgmt.security.v2020_01_01.models.ExpandEnum
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityAssessment or the result of cls(response)
:rtype: ~azure.mgmt.security.v2020_01_01.models.SecurityAssessment
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-01-01")) # type: Literal["2020-01-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SecurityAssessment]
request = build_get_request(
resource_id=resource_id,
assessment_name=assessment_name,
expand=expand,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("SecurityAssessment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/{resourceId}/providers/Microsoft.Security/assessments/{assessmentName}"} # type: ignore
@overload
async def create_or_update(
self,
resource_id: str,
assessment_name: str,
assessment: _models.SecurityAssessment,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.SecurityAssessment:
"""Create a security assessment on your resource. An assessment metadata that describes this
assessment must be predefined with the same name before inserting the assessment result.
:param resource_id: The identifier of the resource. Required.
:type resource_id: str
:param assessment_name: The Assessment Key - Unique key for the assessment type. Required.
:type assessment_name: str
:param assessment: Calculated assessment on a pre-defined assessment metadata. Required.
:type assessment: ~azure.mgmt.security.v2020_01_01.models.SecurityAssessment
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityAssessment or the result of cls(response)
:rtype: ~azure.mgmt.security.v2020_01_01.models.SecurityAssessment
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_or_update(
self,
resource_id: str,
assessment_name: str,
assessment: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.SecurityAssessment:
"""Create a security assessment on your resource. An assessment metadata that describes this
assessment must be predefined with the same name before inserting the assessment result.
:param resource_id: The identifier of the resource. Required.
:type resource_id: str
:param assessment_name: The Assessment Key - Unique key for the assessment type. Required.
:type assessment_name: str
:param assessment: Calculated assessment on a pre-defined assessment metadata. Required.
:type assessment: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityAssessment or the result of cls(response)
:rtype: ~azure.mgmt.security.v2020_01_01.models.SecurityAssessment
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_or_update(
self, resource_id: str, assessment_name: str, assessment: Union[_models.SecurityAssessment, IO], **kwargs: Any
) -> _models.SecurityAssessment:
"""Create a security assessment on your resource. An assessment metadata that describes this
assessment must be predefined with the same name before inserting the assessment result.
:param resource_id: The identifier of the resource. Required.
:type resource_id: str
:param assessment_name: The Assessment Key - Unique key for the assessment type. Required.
:type assessment_name: str
:param assessment: Calculated assessment on a pre-defined assessment metadata. Is either a
model type or a IO type. Required.
:type assessment: ~azure.mgmt.security.v2020_01_01.models.SecurityAssessment or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityAssessment or the result of cls(response)
:rtype: ~azure.mgmt.security.v2020_01_01.models.SecurityAssessment
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-01-01")) # type: Literal["2020-01-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.SecurityAssessment]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(assessment, (IO, bytes)):
_content = assessment
else:
_json = self._serialize.body(assessment, "SecurityAssessment")
request = build_create_or_update_request(
resource_id=resource_id,
assessment_name=assessment_name,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("SecurityAssessment", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("SecurityAssessment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {"url": "/{resourceId}/providers/Microsoft.Security/assessments/{assessmentName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self, resource_id: str, assessment_name: str, **kwargs: Any
) -> None:
"""Delete a security assessment on your resource. An assessment metadata that describes this
assessment must be predefined with the same name before inserting the assessment result.
:param resource_id: The identifier of the resource. Required.
:type resource_id: str
:param assessment_name: The Assessment Key - Unique key for the assessment type. Required.
:type assessment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2020-01-01")) # type: Literal["2020-01-01"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_id=resource_id,
assessment_name=assessment_name,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/{resourceId}/providers/Microsoft.Security/assessments/{assessmentName}"} # type: ignore
| {
"content_hash": "f7bef22f2e0e6d3a7e3fa8bb36c85c6a",
"timestamp": "",
"source": "github",
"line_count": 401,
"max_line_length": 130,
"avg_line_length": 44.35910224438903,
"alnum_prop": 0.6435237238587812,
"repo_name": "Azure/azure-sdk-for-python",
"id": "1761db658ea4911b38cf690b5a3fc04242d42277",
"size": "18288",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/security/azure-mgmt-security/azure/mgmt/security/v2020_01_01/aio/operations/_assessments_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import pathlib
import re
_docs_path = pathlib.Path(__file__).parent
_version_path = _docs_path / '../aiomonitor/__init__.py'
with _version_path.open() as fp:
try:
_version_info = re.search(r"^__version__ = '"
r"(?P<major>\d+)"
r"\.(?P<minor>\d+)"
r"\.(?P<patch>\d+)"
r"(?P<tag>.*)?'$",
fp.read(), re.M).groupdict()
except IndexError:
raise RuntimeError('Unable to determine version.')
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'aiomonitor'
copyright = '2016, Nikolay Novik'
author = 'Nikolay Novik'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '{major}.{minor}'.format(**_version_info)
# The full version, including alpha/beta/rc tags.
release = '{major}.{minor}.{patch}-{tag}'.format(**_version_info)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
aiomonitor_desc = ('module that adds monitor and cli capabilities'
'for asyncio application')
html_theme_options = {
'description': aiomonitor_desc,
'github_user': 'aio-libs',
'github_repo': 'aiomonitor',
'github_button': True,
'github_type': 'star',
'github_banner': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'aiomonitordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'aiomonitor.tex', 'aiomonitor Documentation',
'Nikolay Novik', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'aiomonitor', 'aiomonitor Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'aiomonitor', 'aiomonitor Documentation',
author, 'aiomonitor', aiomonitor_desc,
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| {
"content_hash": "eea72c905eb00207c686917426d00954",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 78,
"avg_line_length": 31.59119496855346,
"alnum_prop": 0.6386621540911805,
"repo_name": "aio-libs/aiomonitor",
"id": "c54c997456642bb2b348a44cc9fb8d77c8a94a90",
"size": "5953",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "29059"
}
],
"symlink_target": ""
} |
"""starts a long-running process that whatches the file system and
automatically execute tasks when file dependencies change"""
import os
import time
import sys
from multiprocessing import Process
from .cmdparse import CmdParse
from .filewatch import FileModifyWatcher
from .cmd_base import tasks_and_deps_iter
from .cmd_base import DoitCmdBase, check_tasks_exist
from .cmd_run import opt_verbosity, Run
opt_reporter = {'name':'reporter',
'short': None,
'long': None,
'type':str,
'default': 'executed-only',
}
class Auto(DoitCmdBase):
"""the main process will never load tasks,
delegates execution to a forked process.
python caches imported modules,
but using different process we can have dependencies on python
modules making sure the newest module will be used.
"""
doc_purpose = "automatically execute tasks when a dependency changes"
doc_usage = "[TASK ...]"
doc_description = None
cmd_options = (opt_verbosity, opt_reporter)
@staticmethod
def _find_file_deps(tasks, sel_tasks):
"""find all file deps
@param tasks (dict)
@param sel_tasks(list - str)
"""
deps = set()
for task in tasks_and_deps_iter(tasks, sel_tasks):
deps.update(task.file_dep)
deps.update(task.watch)
return deps
@staticmethod
def _dep_changed(watch_files, started, targets):
"""check if watched files was modified since execution started"""
for watched in watch_files:
# assume that changes to targets were done by doit itself
if watched in targets:
continue
if os.stat(watched).st_mtime > started:
return True
return False
def run_watch(self, params, args):
"""Run tasks and wait for file system event
This method is executed in a forked process.
The process is terminated after a single event.
"""
started = time.time()
# execute tasks using Run Command
ar = Run(task_loader=self._loader)
params.add_defaults(CmdParse(ar.options).parse([])[0])
result = ar.execute(params, args)
# get list of files to watch on file system
watch_files = self._find_file_deps(ar.control.tasks,
ar.control.selected_tasks)
# Check for timestamp changes since run started,
# if change, restart straight away
if not self._dep_changed(watch_files, started, ar.control.targets):
# set event handler. just terminate process.
class DoitAutoRun(FileModifyWatcher):
def handle_event(self, event):
#print "FS EVENT -> ", event
sys.exit(result)
file_watcher = DoitAutoRun(watch_files)
# kick start watching process
file_watcher.loop()
def execute(self, params, args):
"""loop executing tasks until process is interrupted"""
# check provided task names
if args:
task_list = self._loader.load_tasks(self, params, args)[0]
tasks = dict([(t.name, t) for t in task_list])
check_tasks_exist(tasks, args)
while True:
try:
p = Process(target=self.run_watch, args=(params, args))
p.start()
p.join()
except KeyboardInterrupt:
return 0
| {
"content_hash": "e2f2366281f5be654849dd9605df25ab",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 75,
"avg_line_length": 33.06542056074766,
"alnum_prop": 0.5955342001130582,
"repo_name": "lelit/doit",
"id": "b18eaad8f1d45306d1e4b90f9189857f6cd5202b",
"size": "3538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doit/cmd_auto.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "207"
},
{
"name": "C++",
"bytes": "21"
},
{
"name": "Python",
"bytes": "641976"
},
{
"name": "Shell",
"bytes": "2955"
}
],
"symlink_target": ""
} |
from datetime import timedelta, datetime
# Base Event type
class Event(object):
"""
Base class for all meeting events. Times and dates are in the meeting locale.
"""
def __init__(self,settings,startDateTime,endDateTime,summary,location,inIMAT,group):
self.sessionDateTime = settings.sessionDateTime
self.startDateTime = startDateTime # In the local meeting timezone
self.endDateTime = endDateTime # In the local meeting timezone
self.summary = summary
self.deleted = False
self.timeZoneOffset = settings.timeZoneOffset
if location != None:
self.location = location
else:
self.location = ''
self.inIMAT = inIMAT # Event is present in IMAT
self.group = group # name of group hosting breakout
def endsBefore(self, endDateTime):
return self.endDateTime < endDateTime
def startDateTimeUTC(self):
return self.startDateTime - timedelta(hours=self.timeZoneOffset)
def endDateTimeUTC(self):
return self.endDateTime - timedelta(hours=self.timeZoneOffset)
def __repr__(self):
s = "Event %s-%s '%s' " % (self.startDateTime, self.endDateTime, self.summary)
if len(self.location) > 0:
s += "in %s " % (self.location,)
return s
def shortStr(self):
"""
Return a short string to identify the meeting uniquely
"""
return "%s %s '%s'" % (self.startDateTime.strftime("%Y-%m-%d %a"), self.startDateTime.time(), self.summary)
# Equality is determined by a matching start and summary
def __eq__(self,obj):
return (self.startDateTime == obj.startDateTime) and (self.summary == obj.summary) and (self.deleted == obj.deleted)
def __ne__(self,obj):
return not self == obj
def changed(self,obj):
"""
Determine if self and obj have changed
"""
if self.startDateTime != obj.startDateTime:
return True
if self.endDateTime != obj.endDateTime:
return True
if self.summary != obj.summary:
return True
if (len(self.location) > 0) and (len(obj.location) > 0) and (self.location != obj.location):
return True
# if self.deleted and not obj.deleted:
# return True
if self.deleted != obj.deleted:
return True
return False
def diff(self,obj):
"""
Returns a string describing differences between two objects
"""
s = ''
if self.startDateTime != obj.startDateTime:
s += '%s start changed: to %s from %s. ' % (self.shortStr(), self.startDateTime, obj.startDateTime)
if self.endDateTime != obj.endDateTime:
s += '%s end changed: to %s from %s. ' % (self.shortStr(), self.endDateTime, obj.endDateTime)
if self.summary != obj.summary:
s += '%s summary changed: to %s from %s. ' % (self.shortStr(), self.summary, obj.summary)
if (len(self.location) > 0) and (len(obj.location) > 0) and (self.location != obj.location):
s += '%s location changed: to %s from %s. ' % (self.shortStr(), self.location, obj.location)
# if self.deleted != obj.deleted:
# s += '%s deleted changed: to %s from %s. ' % (self.shortStr(), 'now marked deleted')
if self.deleted != obj.deleted:
s += '%s deleted changed: %s. ' % (self.shortStr(), 'now marked deleted' if self.deleted else 'deletion marker removed')
if len(s) > 0:
s += '\n'
return s
# Return day index of start time
def dayIndex(self):
td = self.startDateTime - self.sessionDateTime
return td.days
class SlottedEvent(Event):
"""
Class for events that have been assigned start and end slots
"""
def __init__(self,settings,startDateTime,endDateTime,summary,location,startSlot,endSlot,inIMAT,group):
super(SlottedEvent, self).__init__(settings,startDateTime,endDateTime,summary,location,inIMAT,group)
self.startSlot = startSlot
if endSlot != None:
self.endSlot = endSlot
else:
self.endSlot = startSlot
def __repr__(self):
return super(SlottedEvent, self).__repr__() + " %s-%s" % (self.startSlot, self.endSlot)
# Equality is determined by a matching start date and slot and summary
def __eq__(self,obj):
return (self.dayIndex() == obj.dayIndex()) and \
(self.startSlot == obj.startSlot) and \
(self.summary == obj.summary)
class ImatEvent(SlottedEvent):
"""
Class to hold all information in an IMAT Event. Adds IMAT accounting data to slotted event.
"""
def __init__(self,settings,startDateTime,endDateTime,summary,location,startSlot,endSlot,group,credit,edit):
super(ImatEvent, self).__init__(settings,startDateTime,endDateTime,summary,location,startSlot,endSlot,True,'')
self.group = group
self.credit = credit
self.edit = edit
self.numerator = '0'
self.denominator = '0'
def __repr__(self):
return super(ImatEvent, self).__repr__() + " %s %s (%s/%s)" % (self.group, self.credit, self.numerator, self.denominator)
def creditChanged(self,obj):
"""
Indicate if a significant change has been made to credit
"""
if self.credit != obj.credit:
# We generally don't enforce our automated view of credit. It it's
# been changed on IMAT, that is presumably for a good reason. However
# The closing plenary has to be progressed automatically from "Normal" to
# "Other" as the dialog on which the meeting was initially created doesn't
# support "Other"
if self.credit == 'Other' and obj.credit == 'Normal':
return True
return False
def changed(self,obj):
"""
Determine if self and obj have changed
"""
if self.creditChanged(obj):
return True
return super(ImatEvent, self).changed(obj)
def diff(self,obj):
"""
Returns a string describing differences between two objects
"""
s = super(ImatEvent, self).diff(obj)
if self.creditChanged(obj):
s += '%s credit changed: to %s from %s.\n' % (self.shortStr(), self.credit, obj.credit)
return s
def compareEventLists(l1,n1,l2,n2,isImat,updatePast):
"""
Compare two event lists l1 and l2 called n1 and n2, conditionally ignoring any events that ended in the past.
When one of the lists is on IMAT, it is l2.
isImat indicates whether this is testing for imat events only
updatePast indicates whether past events should be updated
Returns:
a list events only in l1
a list [l1, l2] tuples of those changed
a list of events only in l2
A string describing the changes that will be made to resolve any differences. This string
assumes that l1 is the "new" state and l2 the "old" state, and that changes will be made
to match the new state.
"""
# Current time in UTC
now = datetime.utcnow()
onlyInL1 = []
onlyInL2 = []
changed = []
s = ''
for e1 in l1:
# Ignore events that start in the past
# if (e1.startDateTimeUTC() <= now) and not updatePast:
# continue
# Ignore events that end in the past
if (e1.endDateTimeUTC() <= now) and not updatePast:
continue
if isImat and not e1.inIMAT:
continue
# Ignore events marked deleted
if e1.deleted:
continue
found = False
for e2 in l2:
if e1 == e2:
if e1.changed(e2):
changed.append((e1, e2))
s += e1.diff(e2)
found = True
break
if not found:
onlyInL1.append(e1)
s += "%s: New in %s\n" % (e1.shortStr(), n1)
for e2 in l2:
# Ignore events that start in the past
# if (e2.startDateTimeUTC() <= now) and not updatePast:
# continue
# Ignore events that end in the past
if (e2.endDateTimeUTC() <= now) and not updatePast:
continue
if isImat and not e2.inIMAT:
continue
# Ignore events marked as deleted
if e2.deleted:
continue
found = False
for e1 in l1:
if e1 == e2:
found = True
break
if not found:
onlyInL2.append(e2)
s += "%s: Deleting item only in %s\n" % (e2.shortStr(), n2)
return (onlyInL1, changed, onlyInL2, s)
| {
"content_hash": "9fb2efeb2fbac0ab5b51b9a878860525",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 132,
"avg_line_length": 34.8962962962963,
"alnum_prop": 0.547866694969221,
"repo_name": "adrian-stephens/schedule",
"id": "7940335539e067bab6066f686a64ed8e547342ce",
"size": "9464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "events.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "82842"
}
],
"symlink_target": ""
} |
from sofi.ui import BasicBlock
def test_basic():
assert(str(BasicBlock()) == "<pre></pre>")
def test_text():
assert(str(BasicBlock("text")) == "<pre>text</pre>")
def test_custom_class_ident_style_and_attrs():
assert(str(BasicBlock("text", cl='abclass', ident='123', style="font-size:0.9em;", attrs={"data-test": 'abc'}))
== "<pre id=\"123\" class=\"abclass\" style=\"font-size:0.9em;\" data-test=\"abc\">text</pre>")
| {
"content_hash": "192084752098c3d32a19f723f89b000f",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 115,
"avg_line_length": 40.27272727272727,
"alnum_prop": 0.6117381489841986,
"repo_name": "jigarmistry/sofi",
"id": "0f1955ace4c68e2113687f9acb2d770f833d4e83",
"size": "443",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/basicblock_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "370"
},
{
"name": "JavaScript",
"bytes": "4083"
},
{
"name": "Python",
"bytes": "139248"
}
],
"symlink_target": ""
} |
"""A Topology can be defined as an arrangement of
fundamental nodes, in various levels. Each topology
has a default level "all" which has each node represented
as a group. For example:
level groups
"system" [[0, 1, 2, 3, 4]]
Only one group that has all the nodes
"cluster" [[0, 1], [2, 3, 4]]
Two groups that represent a cluster
"all" [[0], [1], [2], [3], [4], [5]]
"""
class Topology(object):
"""Topology object allows grouping of
pivot values (called nodes) at multiple levels.
The implementation is targeted towards CPU topologies
but can be used generically as well
"""
def __init__(self, clusters=[]):
self._levels = {}
self._nodes = set()
if len(clusters):
self.add_to_level("cluster", clusters)
cpu_level = []
for node in self.flatten():
cpu_level.append([node])
self.add_to_level("cpu", cpu_level)
def add_to_level(self, level_name, level_vals):
"""Add a group to a level
This function allows to append a
group of nodes to a level. If the level
does not exist a new level is created
Args:
level_name (hashable): The name of the level
level_vals (list of lists): groups containing
nodes
"""
if level_name not in self._levels:
self._levels[level_name] = []
self._levels[level_name] += level_vals
for group in level_vals:
self._nodes = self._nodes.union(set(group))
def get_level(self, level_name):
"""Returns the groups of nodes associated
with a level
"""
if level_name == "all":
return [self.flatten()]
else:
return self._levels[level_name]
def get_index(self, level, node):
"""Return the index of the node in the
level's list of nodes"""
nodes = self.get_level(level)
return nodes.index(node)
def get_node(self, level, index):
nodes = self.get_level(level)
return nodes[index]
def __iter__(self):
return self._levels.__iter__()
def flatten(self):
"""Return a flattened list of nodes in the
topology
"""
return list(self._nodes)
def level_span(self, level):
"""Return the number of groups in a level"""
if level == "all":
return len(self._nodes)
else:
return len(self._levels[level])
def has_level(self, level):
"""Returns true if level is present"""
return (level in self._levels)
| {
"content_hash": "5dfb98798c22d403a53fca392026022a",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 60,
"avg_line_length": 27.63917525773196,
"alnum_prop": 0.5550167847817978,
"repo_name": "derkling/trappy",
"id": "5b046e6b7170f0d05c2836d196ee4ea5de2b4b35",
"size": "3267",
"binary": false,
"copies": "1",
"ref": "refs/heads/fix-traces-generation",
"path": "trappy/stats/Topology.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1395"
},
{
"name": "Emacs Lisp",
"bytes": "217"
},
{
"name": "JavaScript",
"bytes": "22235"
},
{
"name": "Python",
"bytes": "255570"
},
{
"name": "Shell",
"bytes": "924"
}
],
"symlink_target": ""
} |
"""
Cart-pole balancing with continuous / Kernelized iFDD
"""
from rlpy.Domains.FiniteTrackCartPole import FiniteCartPoleBalanceOriginal, FiniteCartPoleBalanceModern
from rlpy.Agents import SARSA, Q_LEARNING
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
param_space = {
'kernel_resolution':
hp.loguniform("kernel_resolution", np.log(5), np.log(50)),
'discover_threshold':
hp.loguniform(
"discover_threshold",
np.log(1e-1),
np.log(1e2)),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(5e-2), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
discover_threshold=.21,
boyan_N0=200.,
initial_learn_rate=.1,
kernel_resolution=13.14):
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 100000
opt["num_policy_checks"] = 20
opt["checks_per_policy"] = 1
active_threshold = 0.01
max_base_feat_sim = 0.5
sparsify = 1
domain = FiniteCartPoleBalanceModern()
opt["domain"] = domain
kernel_width = (domain.statespace_limits[:, 1] - domain.statespace_limits[:, 0]) \
/ kernel_resolution
representation = KernelizediFDD(domain, sparsify=sparsify,
kernel=gaussian_kernel,
kernel_args=[kernel_width],
active_threshold=active_threshold,
discover_threshold=discover_threshold,
normalization=True,
max_active_base_feat=10,
max_base_feat_sim=max_base_feat_sim)
policy = eGreedy(representation, epsilon=0.)
# agent = SARSA(representation,policy,domain,initial_learn_rate=initial_learn_rate,
# lambda_=.0, learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
opt["agent"] = Q_LEARNING(
policy, representation, discount_factor=domain.discount_factor,
lambda_=0.9, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
experiment = make_experiment(1)
experiment.run(visualize_learning=True, visualize_performance=False)
experiment.plot()
# experiment.save()
| {
"content_hash": "ded3c3af8ba9c339b9cb2f88ff2ab42e",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 103,
"avg_line_length": 37.97014925373134,
"alnum_prop": 0.622248427672956,
"repo_name": "MDPvis/rlpy",
"id": "31c8979f9f2aa6bbbbfeb7f039cf83a67063a2d8",
"size": "2544",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/cartpole_modern/kifdd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "117712"
},
{
"name": "C++",
"bytes": "1575"
},
{
"name": "Python",
"bytes": "1175890"
}
],
"symlink_target": ""
} |
__author__ = 'api.roman.public@gmail.com (Roman Nurik)'
import logging
import os
from google.appengine.ext import webapp
from google.appengine.api import users
register = webapp.template.create_template_register()
# Keys for each instance of this app.
JSAPI_KEYS = {
'gongo-dev.appspot.com:80': 'ABQIAAAAsc0UQXoo2BnAJLhtpWCJFBS5gYxShnUObVA4VEJAsXhjFwcoxhRau7hEEpIuasKe44kdgfsOyEFcUQ',
'localhost:8080': 'ABQIAAAAa-nOaft0HwDB8qjrdQrFuhTwM0brOpm-All5BF6PoaKBxRWWERRwoUXW--ZXndf0j4fjnyMTJW65GQ',
}
@register.simple_tag
def jsapi_key():
# the os environ is actually the current web request's environ
server_key = '%s:%s' % (os.environ['SERVER_NAME'], os.environ['SERVER_PORT'])
logging.debug("server_key: %s", server_key)
return JSAPI_KEYS[server_key] if server_key in JSAPI_KEYS else ''
def _default_dest_url():
return os.environ['PATH_INFO'] + (('?' + os.environ['QUERY_STRING'])
if os.environ['QUERY_STRING'] else '')
@register.simple_tag
def login_url(dest_url=''):
dest_url = dest_url or _default_dest_url()
return users.create_login_url(dest_url)
@register.simple_tag
def logout_url(dest_url=''):
dest_url = dest_url or _default_dest_url()
return users.create_logout_url(dest_url)
@register.simple_tag
def test_curpath():
return str(os.environ)
| {
"content_hash": "8906c2586018a742606d44c36f2a3e4e",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 119,
"avg_line_length": 32.9,
"alnum_prop": 0.7196048632218845,
"repo_name": "mhfrantz/alertfeed",
"id": "0ad535d207e0d9943eeba95244d9ba1054a2a9f0",
"size": "1915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ui/templatelib.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1523"
},
{
"name": "HTML",
"bytes": "16653"
},
{
"name": "JavaScript",
"bytes": "15566"
},
{
"name": "Python",
"bytes": "280386"
},
{
"name": "Shell",
"bytes": "173"
}
],
"symlink_target": ""
} |
# Copyright (c) 2013-2014, Clemson University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Clemson University nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import difflib
import collections
import fnmatch
import hashlib
import re
import subprocess
import sys
import errno
from .common import *
DIFF = 'diff'
SVN = 'svn'
SVNADMIN = 'svnadmin'
SVNLOOK = 'svnlook'
BINARY_DIFF = """\
Binary files {fromfile} and {tofile} differ
"""
#
# Using the ASCII codec here should be OK as long as Subversion never includes
# unicode characters in their numbering scheme.
#
SVN_VERSION = tuple(
command([SVN, '--version', '--quiet'])
.decode()
.strip()
.split('.')
)
head_rev_rx = re.compile(r'^(?=.)(?P<head>\D[^:]*)?:?(?P<rev>\d+)?$')
mergeinfo_rx = re.compile(r'^(?P<head>.+):(?P<minrev>\d+)(?:-(?P<maxrev>\d+))$')
changed_copy_info_rx = re.compile(r'^[ ]{4}\(from (?P<src>.+)\)$')
HistoryEntry = collections.namedtuple('HistoryEntry', 'rev path')
def _add_diff_prefix(diff, a='a', b='b'):
output = ''
for line in diff.splitlines(True):
if line.startswith('--- '):
line = '--- ' + a + '/' + line[4:]
if line.startswith('+++ '):
line = '+++ ' + b + '/' + line[4:]
output += line
return output
def _join(*args):
return '/'.join(arg for arg in args if arg)
class SvnRepo(VCSRepo):
"""A Subversion repository
Unless otherwise specified, valid revisions are:
- an integer (ex: 194)
- an integer as a string (ex: "194")
- a branch or tag name (ex: "HEAD", "trunk", "branches/branch1")
- a branch or tag name at a specific revision (ex: "trunk:194")
Revisions have the following meanings:
- HEAD always maps to the root of the repository (/)
- Anything else (ex: "trunk", "branches/branch1") maps to the corresponding
path in the repository
- The youngest revision is assumed unless a revision is specified
For example, the following code will list the contents of the directory
branches/branch1/src from revision 194:
>>> repo = SvnRepo(path)
>>> repo.ls('branches/branch1:194', 'src')
Branches and tags are detected in branches() and tags() by looking at the
paths specified in repo.branch_glob and repo.tag_glob. The default values
for these variables will detect the following repository layout:
- /trunk - the main development branch
- /branches/* - branches
- /tags/* - tags
If a repository does not fit this layout, everything other than branch and
tag detection will work as expected.
"""
@classmethod
def clone(cls, srcpath, destpath):
"""Copy a main repository to a new location."""
try:
os.makedirs(destpath)
except OSError as e:
if not e.errno == errno.EEXIST:
raise
cmd = [SVNADMIN, 'dump', '--quiet', '.']
dump = subprocess.Popen(
cmd, cwd=srcpath, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
repo = cls.create(destpath)
repo.load(dump.stdout)
stderr = dump.stderr.read()
dump.stdout.close()
dump.stderr.close()
dump.wait()
if dump.returncode != 0:
raise subprocess.CalledProcessError(dump.returncode, cmd, stderr)
return repo
@classmethod
def create(cls, path):
"""Create a new repository"""
try:
os.makedirs(path)
except OSError as e:
if not e.errno == errno.EEXIST:
raise
cmd = [SVNADMIN, 'create', path]
subprocess.check_call(cmd)
return cls(path)
@classmethod
def cleanPath(cls, path):
path = multislash_rx.sub('/', path)
if not path.startswith('/'):
path = '/' + path
return path
def __init__(self, path):
super(SvnRepo, self).__init__(path)
self.branch_glob = ['/trunk/', '/branches/*/']
self.tag_glob = ['/tags/*/']
@property
def private_path(self):
"""Get the path to a directory which can be used to store arbitrary data
This directory should not conflict with any of the repository internals.
The directory should be created if it does not already exist.
"""
import os
path = os.path.join(self.path, '.private')
try:
os.mkdir(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return path
def _proplist(self, rev, path):
cmd = [SVNLOOK, 'proplist', '-r', rev, '.', path or '--revprop']
output = self._command(cmd).decode(self.encoding)
props = [x.strip() for x in output.splitlines()]
#
# Subversion 1.8 adds extra user output when given a path argument.
#
if not path is None and SVN_VERSION >= ('1', '8'):
return props[1:]
else:
return props
def proplist(self, rev, path=None):
"""List Subversion properties of the path"""
rev, prefix = self._maprev(rev)
if path is None:
return self._proplist(str(rev), None)
else:
path = type(self).cleanPath(_join(prefix, path))
return self._proplist(str(rev), path)
def _propget(self, prop, rev, path):
cmd = [SVNLOOK, 'propget', '-r', rev, '.', prop, path or '--revprop']
return self._command(cmd).decode()
def propget(self, prop, rev, path=None):
"""Get Subversion property value of the path"""
rev, prefix = self._maprev(rev)
if path is None:
return self._propget(prop, str(rev), None)
else:
path = type(self).cleanPath(_join(prefix, path))
return self._propget(prop, str(rev), path)
def _mergeinfo(self, rev, path):
revstr = str(rev)
if 'svn:mergeinfo' not in self._proplist(revstr, path):
return []
results = []
mergeinfo = self._propget('svn:mergeinfo', revstr, path)
for line in mergeinfo.splitlines():
m = mergeinfo_rx.match(line)
assert m
head, minrev, maxrev = m.group('head', 'minrev', 'maxrev')
minrev = int(minrev)
maxrev = int(maxrev or minrev)
results.append((head, minrev, maxrev))
return results
def _maprev(self, rev):
if isinstance(rev, int):
return (rev, '/')
m = head_rev_rx.match(rev)
assert m, 'invalid rev'
head, rev = m.group('head', 'rev')
if rev:
rev = int(rev)
else:
rev = self.youngest()
if head is None:
return (rev, '/')
elif head == 'HEAD':
return (rev, '/')
else:
return (rev, '/' + head)
def canonical_rev(self, rev):
try:
types = (str, unicode)
except NameError:
types = str
if isinstance(rev, int):
return rev
elif isinstance(rev, types) and rev.isdigit():
return int(rev)
else:
rev, prefix = self._maprev(rev)
return rev
def compose_rev(self, branch, rev):
return '%s:%d' % (branch, self.canonical_rev(rev))
def ls(
self, rev, path, recursive=False, recursive_dirs=False,
directory=False, report=()
):
rev, prefix = self._maprev(rev)
revstr = str(rev)
path = type(self).cleanPath(_join(prefix, path))
forcedir = False
if path.endswith('/'):
forcedir = True
if path != '/':
path = path.rstrip('/')
if path == '/':
if directory:
entry = attrdict(path='/', type='d')
if 'commit' in report:
entry.commit = self._history(revstr, '/', 1)[0].rev
return [entry]
ltrim = 1
prefix = '/'
else:
ltrim = len(path) + 1
prefix = path + '/'
cmd = [SVNLOOK, 'tree', '-r', revstr, '--full-paths']
if not recursive:
cmd.append('--non-recursive')
cmd.extend(['.', path])
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
output, stderr = p.communicate()
if p.returncode != 0:
stderr = stderr.decode()
if p.returncode == 1 and 'File not found' in stderr:
raise PathDoesNotExist(rev, path)
raise subprocess.CalledProcessError(p.returncode, cmd, stderr)
results = []
lines = output.decode(self.encoding, 'replace').splitlines()
if forcedir and not lines[0].endswith('/'):
raise PathDoesNotExist(rev, path)
if lines[0].endswith('/'):
if directory:
lines = lines[:1]
else:
lines = lines[1:]
for name in lines:
entry_name = name[ltrim:]
entry = attrdict(path=name.strip('/'))
if name.endswith('/'):
if recursive and not recursive_dirs:
continue
entry.type = 'd'
entry_name = entry_name.rstrip('/')
else:
proplist = self._proplist(revstr, name)
if 'svn:special' in proplist:
link = self._cat(revstr, name).decode(self.encoding, 'replace')
link = link.split(None, 1)
if len(link) == 2 and link[0] == 'link':
entry.type = 'l'
if 'target' in report:
entry.target = link[1]
if 'type' not in entry:
entry.type = 'f'
if 'executable' in report:
entry.executable = 'svn:executable' in proplist
if 'size' in report:
entry.size = len(self._cat(revstr, name))
if entry_name:
entry.name = entry_name
if 'commit' in report:
entry.commit = self._history(revstr, name, 1)[0].rev
results.append(entry)
return results
def _cat(self, rev, path):
cmd = [SVNLOOK, 'cat', '-r', rev, '.', path.encode(self.encoding)]
return self._command(cmd)
def cat(self, rev, path):
rev, prefix = self._maprev(rev)
path = type(self).cleanPath(_join(prefix, path))
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
return self._cat(str(rev), path)
def _readlink(self, rev, path):
output = self._cat(rev, path)
link = output.decode(self.encoding, 'replace').split(None, 1)
assert len(link) == 2 and link[0] == 'link'
return link[1]
def readlink(self, rev, path):
rev, prefix = self._maprev(rev)
path = type(self).cleanPath(_join(prefix, path))
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'l':
raise BadFileType(rev, path)
return self._readlink(str(rev), path)
def youngest(self):
cmd = [SVNLOOK, 'youngest', '.']
return int(self._command(cmd))
def _heads(self, globs):
root = {}
for glob in globs:
n = root
for p in glob.strip('/').split('/'):
n = n.setdefault(p, {})
youngest = self.youngest()
results = []
def match(n, path):
for d in self.ls(youngest, path):
if d.get('type') == 'd':
for k, v in n.items():
if fnmatch.fnmatchcase(d.name, k):
if path:
p = path + '/' + d.name
else:
p = d.name
if v:
match(v, p)
else:
results.append(p)
match(root, '')
return results
def branches(self):
return ['HEAD'] + self._heads(self.branch_glob)
def tags(self):
return self._heads(self.tag_glob)
def heads(self):
return ['HEAD'] + self._heads(self.branch_glob + self.tag_glob)
def empty(self):
cmd = [SVNLOOK, 'history', '.', '-l2']
output = self._command(cmd)
return len(output.splitlines()) < 4
def __contains__(self, rev):
rev, prefix = self._maprev(rev)
cmd = [SVNLOOK, 'history', '.', prefix, '-l1', '-r', str(rev)]
p = subprocess.Popen(
cmd, cwd=self.path, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
return p.returncode == 0
def __len__(self):
cmd = [SVNLOOK, 'history', '.']
output = self._command(cmd)
return len(output.splitlines()) - 3
def log(
self, revrange=None, limit=None, firstparent=False, merges=None,
path=None, follow=False
):
if not (revrange is None or isinstance(revrange, (tuple, list))):
# a single revision was given
rev, prefix = self._maprev(revrange)
h = self._history(rev, prefix, 1)
rev = h[0].rev
return self._logentry(rev, prefix)
if revrange is None:
results = self._history(self.youngest(), path or '/', limit)
else:
if revrange[1] is None:
include = set()
rev1 = self.youngest()
for head in self.heads():
if head == 'HEAD':
continue
if path:
p = head + '/' + path.lstrip('/')
else:
p = type(self).cleanPath(head)
include.update(self._mergehistory(rev1, p, limit))
else:
rev1, prefix1 = self._maprev(revrange[1])
if path:
p = type(self).cleanPath(prefix1 + '/' + path)
else:
p = prefix1
if firstparent:
include = self._history(rev1, p)
else:
include = self._mergehistory(rev1, p, limit)
if revrange[0] is None:
results = include
else:
rev0, prefix0 = self._maprev(revrange[0])
exclude = self._mergehistory(rev0, prefix0)
results = include - exclude
results = sorted(results, key=lambda x: x.rev, reverse=True)
results = map(lambda x: self._logentry(x.rev, x.path), results)
if merges is not None:
if merges:
results = filter(lambda x: len(x.parents) > 1, results)
else:
results = filter(lambda x: len(x.parents) <= 1, results)
return list(results)
def _logentry(self, rev, path, history=None):
import hashlib
revstr = str(rev)
cmd = [SVNLOOK, 'info', '.', '-r', revstr]
cachekey = hashlib.sha1(revstr.encode()).hexdigest()
entry = self._commit_cache.get(cachekey)
if entry:
entry._cached = True
return entry
output = self._command(cmd).decode(self.encoding, 'replace')
author, date, logsize, message = output.split('\n', 3)
date = parse_isodate(date)
if history is None:
history = self._history(rev, path, 2)
parents = []
if len(history) > 1:
prev = history[1].rev
if path == '/':
parents.append(prev)
else:
parents.append('%s:%d' % (path, prev))
for head, minrev, maxrev in self._mergeinfo(rev, path):
if prev < maxrev:
h = self._history(maxrev, head, 1)
if head == '/':
parents.append(h[0].rev)
else:
parents.append('%s:%d' % (head, h[0].rev))
entry = CommitLogEntry(rev, parents, date, author, message)
if cachekey not in self._commit_cache:
self._commit_cache[cachekey] = entry
return entry
def pdiff(self, rev):
rev, prefix = self._maprev(rev)
if rev == 0:
return ''
cmd = [SVNLOOK, 'diff', '.', '-r', str(rev)]
output = self._command(cmd)
return _add_diff_prefix(output.decode(self.encoding))
def _compose_url(self, rev=None, path=None, proto='file'):
url = '%s://%s' % (proto, self.path)
rev, prefix = self._maprev(rev)
path = path or ''
path = path.lstrip('/')
prefix = prefix.lstrip('/')
if prefix:
url = '%s/%s' % (url, prefix)
if path:
url = '%s/%s' % (url, path)
if not rev is None:
url = '%s@%d' % (url, rev)
return url
def _exists(self, rev, path):
try:
return self.ls(rev, path, directory=True)[0]
except PathDoesNotExist:
return False
def _diff_read(self, rev, path):
try:
entry = self.ls(rev, path, directory=True)[0]
if entry.type == 'f':
contents = self.cat(rev, path)
h = hashlib.sha1(contents).hexdigest
# Catch the common base class of encoding errors which is
# unfortunately ValueError.
try:
return contents.decode(self.encoding), h
except ValueError:
return None, h
elif entry.type == 'l':
return 'link %s\n' % self.readlink(rev, path), None
else:
assert entry.type == 'd'
return 'directory\n', None
except PathDoesNotExist:
return '', None
def _diff(self, rev_a, rev_b, path, diff_a='a', diff_b='b'):
entry_a = not path or self._exists(rev_a, path)
entry_b = not path or self._exists(rev_b, path)
if not entry_a and not entry_b:
return ''
elif not entry_a or not entry_b:
if (
entry_a and entry_a.type != 'd' or
entry_b and entry_b.type != 'd'
):
_, prefix_a = self._maprev(rev_a)
_, prefix_b = self._maprev(rev_b)
prefix_a, prefix_b = prefix_a.strip('/'), prefix_b.strip('/')
fromfile = _join(diff_a, prefix_a, path.lstrip('/')) \
if entry_a else os.devnull
tofile = _join(diff_b, prefix_b, path.lstrip('/')) \
if entry_b else os.devnull
a, hasha = self._diff_read(rev_a, path)
b, hashb = self._diff_read(rev_b, path)
if a is None or b is None:
if hasha == hashb:
return ''
else:
return BINARY_DIFF.format(fromfile=fromfile,
tofile=tofile)
a, b = a.splitlines(True), b.splitlines(True)
diff = difflib.unified_diff(a, b,
fromfile=fromfile,
tofile=tofile)
return ''.join(diff)
elif entry_a:
contents = self.ls(rev_a, path)
else: # entry_b
assert entry_b
contents = self.ls(rev_b, path)
return ''.join(
self._diff(rev_a, rev_b, entry.path, diff_a, diff_b)
for entry in contents
)
else:
url_a = self._compose_url(rev=rev_a, path=path)
url_b = self._compose_url(rev=rev_b, path=path)
cmd = [SVN, 'diff', url_a, url_b]
output = self._command(cmd).decode(self.encoding)
return _add_diff_prefix(output)
def diff(self, rev_a, rev_b, path=None):
return self._diff(rev_a, rev_b, path)
def changed(self, rev):
rev, prefix = self._maprev(rev)
if rev == 0:
return []
cmd = [SVNLOOK, 'changed', '.', '-r', str(rev), '--copy-info']
output = self._command(cmd).decode(self.encoding, 'replace')
lines = output.splitlines()
lines.reverse()
results = []
while lines:
line = lines.pop()
status = line[:3]
path = line[4:].lstrip('/')
copy = None
if status.endswith('+'):
line = lines.pop()
m = changed_copy_info_rx.match(line)
assert m
copy = m.group('src')
entry = FileChangeInfo(path, str(status), copy)
results.append(entry)
return results
def _history(self, rev, path, limit=None):
cmd = [SVNLOOK, 'history', '.', '-r', str(rev), path]
if limit is not None:
cmd.extend(['-l', str(limit)])
output = self._command(cmd).decode(self.encoding, 'replace')
results = []
for line in output.splitlines()[2:]:
r, p = line.split(None, 1)
results.append(HistoryEntry(int(r), p))
return results
def _mergehistory(self, rev, path, limit=None):
results = set(self._history(rev, path, limit))
for head, minrev, maxrev in self._mergeinfo(rev, path):
l = maxrev - minrev + 1
if limit is not None:
l = min(l, limit)
h = self._history(maxrev, head, l)
for r, p in h:
if r < minrev:
break
results.add(HistoryEntry(r, p))
return results
def ancestor(self, rev1, rev2):
rev1, prefix1 = self._maprev(rev1)
rev2, prefix2 = self._maprev(rev2)
prefix1 = type(self).cleanPath(prefix1)
if prefix1 != '/':
prefix1 = prefix1.rstrip('/')
prefix2 = type(self).cleanPath(prefix2)
if prefix2 != '/':
prefix2 = prefix2.rstrip('/')
self.ls(rev1, prefix1, directory=True)
self.ls(rev2, prefix2, directory=True)
minrev = min(rev1, rev2)
if prefix1 == prefix2:
return '%s:%d' % (prefix1.lstrip('/'), minrev)
history1 = self._history(minrev, prefix1)
history2 = self._history(minrev, prefix2)
youngest = HistoryEntry(0, '/')
for head, minrev, maxrev in self._mergeinfo(rev1, prefix1):
for h in history2:
if h.rev < minrev or h.rev < youngest.rev:
break
if h.path == head and minrev <= h.rev <= maxrev:
youngest = h
for head, minrev, maxrev in self._mergeinfo(rev2, prefix2):
for h in history1:
if h.rev < minrev or h.rev < youngest.rev:
break
if h.path == head and minrev <= h.rev <= maxrev:
youngest = h
if youngest.rev > 0:
return '%s:%d' % (youngest.path.lstrip('/'), youngest.rev)
i1 = 0
i2 = 0
len1 = len(history1)
len2 = len(history2)
while i1 < len1 and i2 < len2:
if history1[i1].rev < history2[i2].rev:
i2 += 1
elif history1[i1].rev > history2[i2].rev:
i1 += 1
else:
if history1[i1].path == history2[i2].path:
return '%s:%d' % (history1[i1].path.lstrip('/'), history1[i1].rev)
else:
i1 += 1
i2 += 1
return None
def _blame(self, rev, path):
import os
import xml.etree.ElementTree as ET
url = 'file://' + os.path.abspath(self.path) + path
cmd = [SVN, 'blame', '--xml', '-r', rev, url]
output = self._command(cmd)
tree = ET.fromstring(output)
results = []
cat = self._cat(rev, path)
target = tree.find('target')
try:
iter = target.iter('entry')
except AttributeError: # added in python 2.7
iter = target.getiterator('entry')
for entry, text in zip(iter, cat.splitlines()):
commit = entry.find('commit')
rev = int(commit.attrib.get('revision'))
author = commit.find('author').text
date = commit.find('date').text
date = parse_isodate(date)
results.append(BlameInfo(rev, author, date, text))
return results
def blame(self, rev, path):
rev, prefix = self._maprev(rev)
path = type(self).cleanPath(_join(prefix, path))
ls = self.ls(rev, path, directory=True)
assert len(ls) == 1
if ls[0].get('type') != 'f':
raise BadFileType(rev, path)
return self._blame(str(rev), path)
def dump(
self, stream, progress=None, lower=None, upper=None,
incremental=False, deltas=False
):
"""Dump the repository to a dumpfile stream.
:param stream: A file stream to which the dumpfile is written
:param progress: A file stream to which progress is written
:param lower: Must be a numeric version number
:param upper: Must be a numeric version number
See ``svnadmin help dump`` for details on the other arguments.
"""
cmd = [SVNADMIN, 'dump', '.']
if progress is None:
cmd.append('-q')
if lower is not None:
cmd.append('-r')
if upper is None:
cmd.append(str(int(lower)))
else:
cmd.append('%d:%d' % (int(lower), int(upper)))
if incremental:
cmd.append('--incremental')
if deltas:
cmd.append('--deltas')
p = subprocess.Popen(cmd, cwd=self.path, stdout=stream, stderr=progress)
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd)
def load(
self, stream, progress=None, ignore_uuid=False, force_uuid=False,
use_pre_commit_hook=False, use_post_commit_hook=False, parent_dir=None
):
"""Load a dumpfile stream into the repository.
:param stream: A file stream from which the dumpfile is read
:param progress: A file stream to which progress is written
See ``svnadmin help load`` for details on the other arguments.
"""
cmd = [SVNADMIN, 'load', '.']
if progress is None:
cmd.append('-q')
if ignore_uuid:
cmd.append('--ignore-uuid')
if force_uuid:
cmd.append('--force-uuid')
if use_pre_commit_hook:
cmd.append('--use-pre-commit-hook')
if use_post_commit_hook:
cmd.append('--use-post-commit-hook')
if parent_dir:
cmd.extend(['--parent-dir', parent_dir])
p = subprocess.Popen(
cmd, cwd=self.path, stdin=stream, stdout=progress,
stderr=subprocess.PIPE
)
stderr = p.stderr.read()
p.stderr.close()
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd, stderr)
def tip(self, head):
if head == 'HEAD':
return self.youngest()
rev = self.log(limit=1, path=head)[0].rev
return '{head}:{rev}'.format(head=head, rev=rev)
# vi:set tabstop=4 softtabstop=4 shiftwidth=4 expandtab:
| {
"content_hash": "f8800d00d3e20a422b2cbbb86ee4dfad",
"timestamp": "",
"source": "github",
"line_count": 819,
"max_line_length": 86,
"avg_line_length": 35.66178266178266,
"alnum_prop": 0.5213818605128907,
"repo_name": "ScottDuckworth/python-anyvcs",
"id": "86991dd66337698664cf3398d9e080f1280671d1",
"size": "29207",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "anyvcs/svn.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "199795"
},
{
"name": "Shell",
"bytes": "5217"
}
],
"symlink_target": ""
} |
from django.conf import settings
WYSIHTML5_EDITOR = {
# Give the editor a name, the name will also be set as class
# name on the iframe and on the iframe's body
'name': 'null',
# Whether the editor should look like the textarea (by adopting styles)
'style': 'true',
# Id of the toolbar element, pass false if you don't want
# any toolbar logic
'toolbar': 'null',
# Whether urls, entered by the user should automatically become
# clickable-links
'autoLink': 'true',
# Object which includes parser rules (set this to
# examples/rules/spec.json or your own spec, otherwise only span
# tags are allowed!)
'parserRules': 'wysihtml5ParserRules',
# Parser method to use when the user inserts content via copy & paste
'parser': 'wysihtml5.dom.parse || Prototype.K',
# Class name which should be set on the contentEditable element in
# the created sandbox iframe, can be styled via the 'stylesheets' option
'composerClassName': '"wysihtml5-editor"',
# Class name to add to the body when the wysihtml5 editor is supported
'bodyClassName': '"wysihtml5-supported"',
# By default wysihtml5 will insert <br> for line breaks, set this to
# false to use <p>
'useLineBreaks': 'true',
# Array (or single string) of stylesheet urls to be loaded in the
# editor's iframe
'stylesheets': '["%s"]' % (settings.STATIC_URL +
"wysihtml5/css/stylesheet.css"),
# Placeholder text to use, defaults to the placeholder attribute
# on the textarea element
'placeholderText': 'null',
# Whether the composer should allow the user to manually resize
# images, tables etc.
'allowObjectResizing': 'true',
# Whether the rich text editor should be rendered on touch devices
# (wysihtml5 >= 0.3.0 comes with basic support for iOS 5)
'supportTouchDevices': 'true'
}
WYSIHTML5_TOOLBAR = {
"formatBlockHeader": {
"active": True,
"command_name": "formatBlock",
"render_icon": "wysihtml5.widgets.render_formatBlockHeader_icon"
},
"formatBlockParagraph": {
"active": True,
"command_name": "formatBlock",
"render_icon": "wysihtml5.widgets.render_formatBlockParagraph_icon"
},
"bold": {
"active": True,
"command_name": "bold",
"render_icon": "wysihtml5.widgets.render_bold_icon"
},
"italic": {
"active": True,
"command_name": "italic",
"render_icon": "wysihtml5.widgets.render_italic_icon"
},
"underline": {
"active": True,
"command_name": "underline",
"render_icon": "wysihtml5.widgets.render_underline_icon"
},
"justifyLeft": {
"active": True,
"command_name": "justifyLeft",
"render_icon": "wysihtml5.widgets.render_justifyLeft_icon"
},
"justifyCenter": {
"active": True,
"command_name": "justifyCenter",
"render_icon": "wysihtml5.widgets.render_justifyCenter_icon"
},
"justifyRight": {
"active": True,
"command_name": "justifyRight",
"render_icon": "wysihtml5.widgets.render_justifyRight_icon"
},
"insertOrderedList": {
"active": True,
"command_name": "insertOrderedList",
"render_icon": "wysihtml5.widgets.render_insertOrderedList_icon"
},
"insertUnorderedList": {
"active": True,
"command_name": "insertUnorderedList",
"render_icon": "wysihtml5.widgets.render_insertUnorderedList_icon"
},
"insertImage": {
"active": True,
"command_name": "insertImage",
"render_icon": "wysihtml5.widgets.render_insertImage_icon",
"render_dialog": "wysihtml5.widgets.render_insertImage_dialog"
},
"createLink": {
"active": True,
"command_name": "createLink",
"render_icon": "wysihtml5.widgets.render_createLink_icon",
"render_dialog": "wysihtml5.widgets.render_createLink_dialog"
},
"insertHTML": {
"active": True,
"command_name": "insertHTML",
"command_value": "<blockquote>quote</blockquote>",
"render_icon": "wysihtml5.widgets.render_insertHTML_icon"
},
"foreColor": {
"active": True,
"command_name": "foreColor",
"render_icon": "wysihtml5.widgets.render_foreColor_icon"
},
"changeView": {
"active": True,
"command_name": "change_view",
"render_icon": "wysihtml5.widgets.render_changeView_icon"
},
}
# This is necessary to protect the field of content in cases where
# the user disables JavaScript in the browser, so that Wysihtml5 can't
# do the filter job.
WYSIHTML5_ALLOWED_TAGS = ('h1 h2 h3 h4 h5 h6 div p b i u'
' ul ol li span img a blockquote')
| {
"content_hash": "098f4923b7f910d119b6165fcee5fb07",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 76,
"avg_line_length": 37.457364341085274,
"alnum_prop": 0.6218956953642384,
"repo_name": "danirus/django-wysihtml5",
"id": "3957f752a49e9fed33ab81dcc197e7f08498b9c3",
"size": "4856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wysihtml5/conf/defaults.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "7465"
},
{
"name": "JavaScript",
"bytes": "14624"
},
{
"name": "Python",
"bytes": "49217"
},
{
"name": "Shell",
"bytes": "117"
}
],
"symlink_target": ""
} |
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_param_EtherDevice', [dirname(__file__)])
except ImportError:
import _param_EtherDevice
return _param_EtherDevice
if fp is not None:
try:
_mod = imp.load_module('_param_EtherDevice', fp, pathname, description)
finally:
fp.close()
return _mod
_param_EtherDevice = swig_import_helper()
del swig_import_helper
else:
import _param_EtherDevice
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
import param_PciDevice
import param_Platform
import param_IntrControl
import param_System
import enum_MemoryMode
import AbstractMemory_vector
import param_AbstractMemory
import range
import param_MemObject
import param_SimObject
import param_DmaDevice
import param_PioDevice
class EtherDevice(param_PciDevice.PciDevice):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
EtherDevice_swigregister = _param_EtherDevice.EtherDevice_swigregister
EtherDevice_swigregister(EtherDevice)
class EtherDeviceParams(param_PciDevice.PciDeviceParams):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
port_interface_connection_count = _swig_property(_param_EtherDevice.EtherDeviceParams_port_interface_connection_count_get, _param_EtherDevice.EtherDeviceParams_port_interface_connection_count_set)
def __init__(self):
this = _param_EtherDevice.new_EtherDeviceParams()
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _param_EtherDevice.delete_EtherDeviceParams
__del__ = lambda self : None;
EtherDeviceParams_swigregister = _param_EtherDevice.EtherDeviceParams_swigregister
EtherDeviceParams_swigregister(EtherDeviceParams)
| {
"content_hash": "439ab92d5d9fb3b5062ea9755378db9b",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 200,
"avg_line_length": 35.53333333333333,
"alnum_prop": 0.666309300455642,
"repo_name": "silkyar/570_Big_Little",
"id": "7940f2277ace3bbedafe77c16c351a7918859a65",
"size": "3935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/ARM/python/m5/internal/param_EtherDevice.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "232078"
},
{
"name": "C",
"bytes": "887097"
},
{
"name": "C++",
"bytes": "52497889"
},
{
"name": "D",
"bytes": "13736198"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "Java",
"bytes": "3096"
},
{
"name": "JavaScript",
"bytes": "78818"
},
{
"name": "Perl",
"bytes": "13199821"
},
{
"name": "Prolog",
"bytes": "977139"
},
{
"name": "Python",
"bytes": "3831426"
},
{
"name": "Ruby",
"bytes": "19404"
},
{
"name": "Scilab",
"bytes": "14370"
},
{
"name": "Shell",
"bytes": "16704"
},
{
"name": "Visual Basic",
"bytes": "2884"
},
{
"name": "XML",
"bytes": "16048"
}
],
"symlink_target": ""
} |
from nanpy.arduinotree import ArduinoTree
from nanpy.i2c import I2C_Master
from nanpy.serialmanager import SerialManager
import logging
log = logging.getLogger(__name__)
def main():
connection = SerialManager(sleep_after_connect=2)
connection.open()
print (connection.device)
a = ArduinoTree(connection=connection)
master = I2C_Master(a.wire)
print (['0x%02x' % x for x in master.scan()])
if __name__ == '__main__':
main()
| {
"content_hash": "aedaaa2ad8a25b2dc47b3d7ba76f67ca",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 53,
"avg_line_length": 25.27777777777778,
"alnum_prop": 0.6967032967032967,
"repo_name": "joppi/nanpy",
"id": "3d2f2365fbd772b9a2bb4fc55be4ae4a3fd426ed",
"size": "494",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nanpy/examples/i2c_scanner.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "108146"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
import time
from contextlib import contextmanager
from traceback import format_exc
import logging
import argparse
import smokesignal
from gpiozero import Button as Button
from twisted.internet import reactor, task
from twisted.internet import stdio
from pixel_table.c_break_tty import Cbreaktty
from pixel_table.key_handler import KeyHandler
from pixel_table.modes.rain import Rain
from pixel_table.modes.pong import Pong
from pixel_table.modes.message import Message
from pixel_table.modes.game_of_life import GameOfLife
from pixel_table.modes.off import Off
from pixel_table.modes.invaders import Invaders
from pixel_table.modes.tetris import Tetris
from pixel_table.modes.noise import Noise
from pixel_table.modes.title_page import TitlePage
from pixel_table.pixel_grid import PixelGrid
from pixel_table.external.arduino import Arduino
from pixel_table.external.touch_buttons import TouchButtons
from pixel_table.output import Output
logging.basicConfig(filename='pixel-table.log',
filemode='w',
format='%(asctime)s %(name)s %(levelname)s: %(message)s',
level=logging.DEBUG)
_logger = logging.getLogger("pixel_table")
class PixelTable(object):
GPIO_MODE = 23
GPIO_STATE = 24
FPS = 30
def __init__(self, output):
self._pixel_grid = PixelGrid(output)
self._buttons = {}
self._modes = [Off, Rain, Pong, Tetris, Invaders, GameOfLife, Noise, Message]
self._now = time.time()
self._mode = None
self._event_queue = []
self._init_panel_buttons()
if output == Output.NEO_PIXELS:
self._touch_buttons = TouchButtons()
self._arduino = Arduino()
else:
self._touch_buttons = self._arduino = None
keyboard = KeyHandler(self)
stdio.StandardIO(keyboard, sys.stdin.fileno())
self.set_mode(Invaders)
task.LoopingCall(self.update).start(1 / self.FPS)
with self.setup_terminal():
reactor.run()
@contextmanager
def setup_terminal(self):
os.system("clear") # Clear terminal
os.system('setterm -cursor off')
# os.system("xset r rate 100 30")
try:
term_state = Cbreaktty(sys.stdin.fileno())
except IOError:
sys.stderr.write("Error: " + sys.argv[0] + " only for use on interactive ttys\n")
sys.exit(1)
try:
yield
finally:
os.system("clear")
os.system('setterm -cursor on')
# os.system("xset r rate 500 33")
term_state.return_to_original_state()
def _init_panel_buttons(self):
self._mode_button = Button(self.GPIO_MODE)
self._mode_button.when_pressed = lambda: self.add_to_event_queue("panel_button_press", "mode")
self._state_button = Button(self.GPIO_STATE)
self._state_button.when_pressed = lambda: self.add_to_event_queue("panel_button_press", "state")
def on_mode_button_press(self):
index = (self._mode.index + 1) % len(self._modes)
self.set_mode(self._modes[index])
def on_state_button_press(self):
self.set_mode(self._mode.mode, self._mode.state_index + 1)
def set_mode(self, mode, state_index=None, transition=True):
self._pixel_grid.clear()
index = self._modes.index(mode)
smokesignal.clear_all() # Clear all events in ephemeral objects.
if transition:
self._mode = TitlePage(self, mode, index, state_index or mode.DEFAULT_STATE_INDEX)
else:
self._mode = mode(index, state_index or mode.DEFAULT_STATE_INDEX)
def add_to_event_queue(self, event, *args):
"""Store real-time events and pass them out once per frame"""
self._event_queue.append((event, args))
def update(self):
try:
now = time.clock()
dt = now - self._now
self._now = now
self._emit_pending_events(dt)
if self._touch_buttons is not None:
self._touch_buttons.emit_events(dt)
self._mode.update(self._pixel_grid, dt)
self._mode.render(self._pixel_grid)
self._pixel_grid.write()
if self._arduino is not None:
_logger.debug(self._arduino.get_fft_buckets())
self._dump(1 / dt)
except:
_logger.error(format_exc())
raise
def _emit_pending_events(self, dt):
for event, args in self._event_queue:
if event == "panel_button_press":
getattr(self, "on_%s_button_press" % args[0])()
elif event.endswith("held"):
smokesignal.emit(event, *args, dt=dt)
else:
smokesignal.emit(event, *args)
self._event_queue = []
def _dump(self, fps):
lines = []
self._mode.dump(lines)
lines.append("%34s" % ("FPS: %2.1f " % fps))
print("\n".join(lines), file=sys.stderr)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Pixel table controller')
parser.add_argument('output',
metavar='OUTPUT',
type=str,
choices=[bytes(s) for s in Output.ALL],
help='output pixel data to %s/%s/%s' % Output.ALL)
args = parser.parse_args()
PixelTable(args.output)
| {
"content_hash": "4780c93676065c58b71f2172dd02cd16",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 104,
"avg_line_length": 33.09580838323353,
"alnum_prop": 0.6059345033472047,
"repo_name": "Spooner/pixel-table",
"id": "cf4a4aa81a3abdca3cab3a31787f933327568606",
"size": "5550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pixel-table.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3849"
},
{
"name": "C++",
"bytes": "8690"
},
{
"name": "Makefile",
"bytes": "169"
},
{
"name": "Python",
"bytes": "51762"
}
],
"symlink_target": ""
} |
import sys # For OS Platform Determination
import os # For Implementation of System Clear Terminal Screen Command,
# Command Line Argument Processing and Exit To OS Prompt Function
# ================================
# ==> Constant Initializations <==
# ================================
dashln = '-' * 78
hexfmtlen = 6
# ==========================
# ==> Main Program Logic <==
# ==========================
#
# >>> Platform Testing <<<
#
# >>> Test Whether Program Is Being Run Under A Flavor Of Linux or Unix? <<<
# >>> Known Linux Implementations Tested Were Red Hat V6.1 and SuSE V7.0 <<<
if sys.platform == 'linux-i386' or sys.platform == 'linux2':
SysCls = 'clear'
# >>> Test Whether Program Is Being Run Under Windows 32-bit Implementation <<<
elif sys.platform == 'win32' or sys.platform == 'dos' or sys.platform[0:5] == 'ms-dos':
SysCls = 'cls'
# >>> Otherwise the Clear Screen Func is 'unknown' to prevent erroneous command execution <<<
else:
SysCls = 'unknown'
def headings(argFile, argDashes, argSysCls): # Arguments are Filename, Line, Clr Scrn Fn Command
if argSysCls != 'unknown': # Is A Valid Clear Screen Command Known?
os.system(argSysCls) # Invoke System Clear Screen Display OS Command
print 'File: ', argFile
print argDashes
return 0
def GetDisplayChar(argCharVal, argInChar): # Arguments are ASCII Char Value & Character Read
if charval < 32 or (charval > 127 and charval < 161):
return '.' # Use '.' to denote Non-displayable character
else:
return argInChar
if SysCls != 'unknown': # Is A Valid Clear Screen Command Known?
os.system(SysCls) # Invoke System Clear Screen Display OS Command
# =================================
# ==> Print Program Info Banner <==
# =================================
print 'READBIN.PY -- Python Hex File Format Dump Command Line Utility For Binary Files'
print 'Version 1.0'
print 'By Tony Dycks'
print ' '
print 'Date Written: June 13, 2001'
print 'Date Last Revised: June 20, 2001'
print
print 'Submitted For Inclusion In The Python Cookbook'
print 'Open Source Contribution Under The GNU Public License'
print ' '
print ' '
print 'Running on System Platform:', sys.platform
print ' '
print 'Press <Enter> Key To Continue ... ',
response = sys.stdin.readline()
charln = ''
hexln = ''
offset = 0
offsetlen = 0
hexoffset = ''
charval = 0
charcnt = 0
# ################################################################################
# The following code will require mods if run from within the Python Interpreter
# ##############################################################################
# =========================================================================
# ==> No Command Line Argument Specified; Display Program Usage Message <==
# =========================================================================
if len(sys.argv) < 2:
print chr(7) # ==> Beep Once <==
print 'Incorrect Command Line Syntax ...'
print ' '
print 'Usage: Python readbin.py </Filepath/Filename.Ext> <Enter>'
print ' '
print sys.exit()
# =========================================================
# ==> Open File Specified On The Command Line For Input <==
# =========================================================
try:
infile = open(sys.argv[1], 'r')
lncnt = headings(sys.argv[1], dashln, SysCls) # Successful Return From Function Inits Line Counter
while 1: # ==> Read Until End Of File One Character At A Time <==
inchar = infile.read(1)
if not inchar:
break
charcnt = charcnt + 1
charval = ord(inchar)
hexval = hex(charval)
hexstrlen = len(hexval)
startpos = hexstrlen - 2
if hexval[startpos] == 'x':
startpos = startpos + 1
hexval = '0'+hexval[startpos]
else:
hexval = hexval[startpos:hexstrlen]
hexln = hexln+' '+hexval
charln = charln + GetDisplayChar(charval, inchar)
# ===================================================================
# ==> 16 Characters Appended To String; Time To Print A Dump Line <==
# ===================================================================
if charcnt == 16:
hexoffset = hex(offset)
offsetlen = len(hexoffset)
hexoffset = hexoffset[2:offsetlen]
while len(hexoffset) < hexfmtlen:
hexoffset = '0'+hexoffset
print hexoffset+': '+hexln, ' | ', charln
charln = ''
hexln = ''
charcnt = 0
offset = offset + 16
lncnt = lncnt + 1
# ========================================================================
# ==> A Screen-full of Info Displayed; Prompt for Continuation or Exit <==
# ========================================================================
if lncnt > 19:
print dashln
print 'Press "X" <Enter> To Exit; <Enter> To Continue ... ',
response = sys.stdin.readline()
# =============================================================
# ==> Take The First Character Slice of String, "response"; <==
# ==> Test for Uppercase or Lowercase E<x>it response <==
# ======================================================--=====
if response[0] == "X" or response[0] == 'x':
break # >>> Back to OS Console Prompt <<<
else:
# ====================================================
# ==> Display Headings; Reset Screen Lines Counter <==
# ====================================================
lncnt = headings(sys.argv[1], dashln, SysCls)
except:
print chr(7) # >>> Beep Once <<<
print 'Error Processing File:', sys.argv[1]
print 'Terminating Program -- readbin.py'
print ' '
sys.exit()
# ============================
# ==> End Of Program Logic <==
# ============================
if len(charln) > 0:
while len(charln) < 16:
hexln = hexln + ' '
charln = charln + ' '
hexoffset = hex(offset)
offsetlen = len(hexoffset)
hexoffset = hexoffset[2:offsetlen]
while len(hexoffset) < hexfmtlen:
hexoffset = '0'+hexoffset
print hexoffset+': '+hexln, ' | ', charln
# ===============================================
# ==> Successfully Processed The File Then <==
# ==> Show Message and Exit Back To OS Prompt <==
# ===============================================
print ' '
print '>>> End Of Program -- readbin.py <<<'
print ' '
sys.exit()
| {
"content_hash": "33ac0d050629d773eb39ebdc1041edbc",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 100,
"avg_line_length": 38.425,
"alnum_prop": 0.5213077423552375,
"repo_name": "ActiveState/code",
"id": "5e9c18ea1739f03e35fab4b0b5c46a3f7296aa65",
"size": "9834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/65257_READBINPY__CommLine_Utility_Reading_Printing/recipe-65257.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
""" Helper functions for visualizing """
import ee
def stretch_std(image, region, bands=None, std=1, scale=None):
""" Get mins and maxs values for stretching a visualization using standard
deviation """
if not bands:
names = image.bandNames()
bands = ee.List(ee.Algorithms.If(
names.size().gte(3), names.slice(0,3), names.slice(0)))
bands = bands.getInfo()
image = image.select(bands)
geom = region or image.geometry()
params = dict(geometry=geom, bestEffort=True)
if scale:
params['scale'] = scale
params['reducer'] = ee.Reducer.mean()
mean = image.reduceRegion(**params)
params['reducer'] = ee.Reducer.stdDev()
stdDev = image.reduceRegion(**params)
def minmax(band, val):
minv = ee.Number(val).subtract(ee.Number(stdDev.get(band)).multiply(std))
maxv = ee.Number(val).add(ee.Number(stdDev.get(band)).multiply(std))
return ee.List([minv, maxv])
if len(bands) == 1:
band = bands[0]
values = minmax(band, mean.get(band)).getInfo()
minv = values[0]
maxv = values[1]
else:
values = mean.map(minmax).select(bands).getInfo()
minv = [values[bands[0]][0], values[bands[1]][0], values[bands[2]][0]]
maxv = [values[bands[0]][1], values[bands[1]][1], values[bands[2]][1]]
return dict(bands=bands, min=minv, max=maxv)
def stretch_percentile(image, region, bands=None, percentile=90, scale=None):
""" Get mins and maxs values for stretching a visualization using
percentiles """
# Calculate start and end percentiles
startp = 50-(percentile/2)
endp = 50+(percentile/2)
if not bands:
names = image.bandNames()
bands = ee.List(ee.Algorithms.If(
names.size().gte(3), names.slice(0,3), names.slice(0)))
bands = bands.getInfo()
image = image.select(bands)
geom = region or image.geometry()
params = dict(geometry=geom, bestEffort=True)
if scale:
params['scale'] = scale
params['reducer'] = ee.Reducer.percentile([startp, endp])
percentiles = image.reduceRegion(**params)
def minmax(band):
minkey = ee.String(band).cat('_p').cat(ee.Number(startp).format())
maxkey = ee.String(band).cat('_p').cat(ee.Number(endp).format())
minv = ee.Number(percentiles.get(minkey))
maxv = ee.Number(percentiles.get(maxkey))
return ee.List([minv, maxv])
if len(bands) == 1:
band = bands[0]
values = minmax(band).getInfo()
minv = values[0]
maxv = values[1]
else:
values = ee.List(bands).map(minmax).getInfo()
minv = [values[0][0], values[1][0], values[2][0]]
maxv = [values[0][1], values[1][1], values[2][1]]
return dict(bands=bands, min=minv, max=maxv)
| {
"content_hash": "395c9eb4df497af17690fccb0ab5af68",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 81,
"avg_line_length": 33.2,
"alnum_prop": 0.6059532246633593,
"repo_name": "gee-community/gee_tools",
"id": "c0b8ea355d3d9dfe7220e8a01ddd79744b34501d",
"size": "2837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geetools/visualization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "254578"
},
{
"name": "Python",
"bytes": "290230"
}
],
"symlink_target": ""
} |
import simuvex
from simuvex.s_type import SimTypeInt
######################################
# getchar
######################################
class getchar(simuvex.SimProcedure):
def run(self):
self.return_type = SimTypeInt(32, True)
data = self.inline_call(
simuvex.SimProcedures['libc.so.6']['_IO_getc'], 0).ret_expr # stdin
return data
| {
"content_hash": "e6d85b1210ab3d3f3860321a5c980d90",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 80,
"avg_line_length": 25.533333333333335,
"alnum_prop": 0.5143603133159269,
"repo_name": "chubbymaggie/simuvex",
"id": "afa760d06cb2bb5571d314fc466ffe4cdf32d4b7",
"size": "383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simuvex/procedures/libc___so___6/getchar.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6276"
},
{
"name": "C++",
"bytes": "34210"
},
{
"name": "Makefile",
"bytes": "599"
},
{
"name": "Python",
"bytes": "854125"
}
],
"symlink_target": ""
} |
import os, re, string, sys, commands
import logging
import urllib,urllib2
import warnings
import json
import boto3
import MySQLdb
import ConfigParser
from binascii import hexlify, unhexlify
from simplecrypt import encrypt, decrypt
from boto3.s3.transfer import S3Transfer
from botocore.client import Config
from sys import argv, exit, stderr
from optparse import OptionParser
sys.path.insert(0, sys.path[0]+"/../../src")
from config import *
from funcs import *
class botoDownload:
f=""
config = ConfigParser.ConfigParser()
url = ""
def __init__(self, url, f):
self.f = f
self.url = url
def getAmazonCredentials(self, username):
data = urllib.urlencode({'func':'getAmazonCredentials', 'username':username})
ret = self.f.queryAPI(self.url, data, "getAmazonCredentials:"+str(username))
if (len(ret)>2):
ret=json.loads(ret)[0]
else:
ret=''
return ret
def downloadFile(self, amazon, s3_filename, outfile):
try:
saltconfig = ConfigParser.ConfigParser()
saltconfig.readfp(open(sys.path[0]+'/../default_params/.salt'))
saltpass = saltconfig.get('Dolphin','AMAZON')
s3 = boto3.client('s3', 'us-east-1',
aws_access_key_id=decrypt(saltpass, unhexlify(amazon['aws_access_key_id'])),
aws_secret_access_key=decrypt(saltpass, unhexlify(amazon['aws_secret_access_key'])))
print 'Download started[%s]=>[%s]'%(s3_filename, outfile)
inputarr = s3_filename.split("/")
amazon_bucket = inputarr[2]
inputfile = "/".join(inputarr[3:])
print 'Bucket[%s]=>[%s]'%(amazon_bucket, inputfile)
s3.download_file(amazon_bucket, inputfile, outfile)
except Exception, ex:
print ex
def main():
f = funcs()
#define options
parser = OptionParser()
parser.add_option("-i", "--inputfile", dest="filename")
parser.add_option("-o", "--outputfile", dest="output")
parser.add_option("-u", "--username", dest="username")
parser.add_option("-c", "--config", dest="config")
# parse
options, args = parser.parse_args()
# retrieve options
FILE = options.filename
OUTPUT = options.output
USERNAME = options.username
CONFIG = options.config
config = getConfig(CONFIG)
boto = botoDownload(config['url'], f)
amazon = boto.getAmazonCredentials(USERNAME)
boto.downloadFile(amazon, FILE, OUTPUT)
sys.exit(0)
if __name__ == "__main__":
main()
| {
"content_hash": "17007d23d801f657527fe8a6c19f937d",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 96,
"avg_line_length": 31.650602409638555,
"alnum_prop": 0.6113437381043015,
"repo_name": "nephantes/dolphin-tools",
"id": "5453dd8b431635324ae12d331e3af1a0129021ab",
"size": "2627",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tools/Dolphinv1.3/downloadS3.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "272964"
},
{
"name": "Python",
"bytes": "93507"
},
{
"name": "Shell",
"bytes": "591"
}
],
"symlink_target": ""
} |
import luigi
import os
import sys
import itertools
from ratatosk import backend
from ratatosk.log import setup_logging
from ratatosk.config import setup_config
from ratatosk.handler import setup_global_handlers
import ratatosk.lib.align.bwa
import ratatosk.lib.tools.gatk
import ratatosk.lib.tools.samtools
import ratatosk.lib.tools.picard
from ratatosk.pipeline.haloplex import HaloPlex, HaloPlexSummary
from ratatosk.pipeline.seqcap import SeqCap, SeqCapSummary
from ratatosk.pipeline.align import Align, AlignSummary
from ratatosk.pipeline import config_dict
from ratatosk.report.sphinx import SphinxReport
if __name__ == "__main__":
task_cls = None
if len(sys.argv) > 1:
task = sys.argv[1]
task_args = sys.argv[2:]
if task in config_dict.keys():
# Reset config-file if present
if "--config-file" in task_args:
i = task_args.index("--config-file")
task_args[i+1] = config_dict[task]['config']
else:
task_args.append("--config-file")
task_args.append(config_dict[task]['config'])
task_cls = config_dict[task]['cls']
elif len(sys.argv) == 1:
luigi.run(['-h'])
else:
task = None
config_file = None
custom_config_file = None
if "--config-file" in task_args:
config_file = task_args[task_args.index("--config-file") + 1]
if "--custom-config" in task_args:
custom_config_file = task_args[task_args.index("--custom-config") + 1]
setup_logging()
setup_config(config_file=config_file, custom_config_file=custom_config_file)
setup_global_handlers()
if task_cls:
luigi.run(task_args, main_task_cls=task_cls)
else:
# Whatever other task/config the user wants to run
luigi.run()
| {
"content_hash": "5d90fe30daebc1af1398ca732703f663",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 80,
"avg_line_length": 33.74074074074074,
"alnum_prop": 0.6531284302963776,
"repo_name": "percyfal/ratatosk",
"id": "a5f743b0ed3ae31bf5ff7b336229e1970bdb3736",
"size": "1822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/ratatosk_run.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "DOT",
"bytes": "11114"
},
{
"name": "JavaScript",
"bytes": "7718"
},
{
"name": "Python",
"bytes": "280738"
}
],
"symlink_target": ""
} |
from setuptools import setup
requirements = [
'click>=3.2',
'icalendar',
'urwid',
'pyxdg',
'pytz',
'vdirsyncer',
'pkginfo',
'python-dateutil',
'configobj',
'tzlocal>=1.0',
]
extra_requirements = {
'proctitle': ['setproctitle'],
}
setup(
name='khal',
description='A CalDAV based calendar',
long_description=open('README.rst').read(),
author='Christian Geier',
author_email='khal@lostpackets.de',
url='http://lostpackets.de/khal/',
license='Expat/MIT',
packages=['khal', 'khal/ui', 'khal/khalendar', 'khal/settings'],
package_data={'khal': [
'settings/default.khal',
'settings/khal.spec',
]},
entry_points={
'console_scripts': [
'khal = khal.cli:main_khal',
'ikhal = khal.cli:main_ikhal'
]
},
install_requires=requirements,
extras_require=extra_requirements,
setup_requires=['setuptools_scm'],
use_scm_version={'write_to': 'khal/version.py'},
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Environment :: Console :: Curses",
"Intended Audience :: End Users/Desktop",
"Operating System :: POSIX",
"Programming Language :: Python :: 2 :: Only",
"Topic :: Utilities",
"Topic :: Communications",
],
)
| {
"content_hash": "a595b726a1b6afbd07dc6846f9ad8146",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 68,
"avg_line_length": 26,
"alnum_prop": 0.5732946298984035,
"repo_name": "sheeprine/khal",
"id": "80ecba29fb39827325c46e7e486847008443a05b",
"size": "1459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "295828"
},
{
"name": "Shell",
"bytes": "165"
}
],
"symlink_target": ""
} |
"""
@brief test log(time=21s)
"""
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, add_missing_development_version
import ensae_teaching_cs
class TestNotebook123CoverageClustering(unittest.TestCase):
def setUp(self):
add_missing_development_version(["pymyinstall", "pyensae", "pymmails", "jyquickhelper"],
__file__, hide=True)
def a_test_notebook_runner_1a(self, name, folder):
from ensae_teaching_cs.automation.notebook_test_helper import ls_notebooks, execute_notebooks, clean_function_1a
from ensae_teaching_cs.helpers.size_helper import total_size
self.assertTrue(total_size)
temp = get_temp_folder(__file__, f"temp_notebook_123_{name}")
keepnote = ls_notebooks(folder)
self.assertTrue(len(keepnote) > 0)
replacements = {'input("Entrez un nombre")': 'random.randint(0, 100)',
'input(message)': 'random.randint(0, 100)'}
execute_notebooks(temp, keepnote,
lambda i, n: name in n,
fLOG=fLOG, replacements=replacements,
clean_function=clean_function_1a,
dump=ensae_teaching_cs)
def test_notebook_runner_clustering(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
self.a_test_notebook_runner_1a("td2a_clustering", "td2a_ml")
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "52478e59fbc26e1f9e1975257af303bf",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 120,
"avg_line_length": 36.74418604651163,
"alnum_prop": 0.6012658227848101,
"repo_name": "sdpython/ensae_teaching_cs",
"id": "ca4e83eba8acfdfd118fd88d2a0a6fe88c77578c",
"size": "1604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_unittests/ut_dnotebooks/test_1_2_3_coverage_notebook_clustering.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "382"
},
{
"name": "C#",
"bytes": "26850"
},
{
"name": "CSS",
"bytes": "220769"
},
{
"name": "HTML",
"bytes": "44390"
},
{
"name": "JavaScript",
"bytes": "31077"
},
{
"name": "Jupyter Notebook",
"bytes": "45255629"
},
{
"name": "PostScript",
"bytes": "169142"
},
{
"name": "Python",
"bytes": "1770141"
},
{
"name": "R",
"bytes": "339"
},
{
"name": "Shell",
"bytes": "3675"
},
{
"name": "TeX",
"bytes": "593824"
}
],
"symlink_target": ""
} |
"""
MoinMoin - CAS authentication
Jasig CAS (see http://www.jasig.org/cas) authentication module.
@copyright: 2009 MoinMoin:RichardLiao
@license: GNU GPL, see COPYING for details.
"""
import time, re
import urlparse
import urllib, urllib2
from MoinMoin import log
logging = log.getLogger(__name__)
from MoinMoin.auth import BaseAuth
from MoinMoin import user, wikiutil
class PyCAS(object):
"""A class for working with a CAS server."""
def __init__(self, server_url, renew=False, login_path='/login', logout_path='/logout',
validate_path='/validate', coding='utf-8'):
self.server_url = server_url
self.renew = renew
self.login_path = login_path
self.logout_path = logout_path
self.validate_path = validate_path
self.coding = coding
def login_url(self, service):
"""Return the login URL for the given service."""
url = self.server_url + self.login_path + '?service=' + urllib.quote_plus(service)
if self.renew:
url += "&renew=true"
return url
def logout_url(self, redirect_url=None):
"""Return the logout URL."""
url = self.server_url + self.logout_path
if redirect_url:
url += '?url=' + urllib.quote_plus(redirect_url)
return url
def validate_url(self, service, ticket):
"""Return the validation URL for the given service. (For CAS 1.0)"""
url = self.server_url + self.validate_path + '?service=' + urllib.quote_plus(service) + '&ticket=' + urllib.quote_plus(ticket)
if self.renew:
url += "&renew=true"
return url
def validate_ticket(self, service, ticket):
"""Validate the given ticket against the given service."""
f = urllib2.urlopen(self.validate_url(service, ticket))
valid = f.readline()
valid = valid.strip() == 'yes'
user = f.readline().strip()
user = user.decode(self.coding)
return valid, user
class CASAuth(BaseAuth):
""" handle login from CAS """
name = 'CAS'
login_inputs = ['username', 'password']
logout_possible = True
def __init__(self, auth_server, login_path="/login", logout_path="/logout", validate_path="/validate"):
BaseAuth.__init__(self)
self.cas = PyCAS(auth_server, login_path=login_path,
validate_path=validate_path, logout_path=logout_path)
def request(self, request, user_obj, **kw):
ticket = request.args.get('ticket')
action = request.args.get("action", [])
logoutRequest = request.args.get('logoutRequest', [])
url = request.url_root + urllib.quote_plus(request.path.encode('utf-8'))
# # handle logout request from CAS
# if logoutRequest:
# logoutRequestMatch = re.search("<samlp:SessionIndex>(.*)</samlp:SessionIndex>", logoutRequest[0])
# service_ticket = logoutRequestMatch.group(1)
# if service_ticket:
# # TODO: logout
# return self.logout(request, user_obj)
# authenticated user
if user_obj and user_obj.valid:
return user_obj, True
# anonymous
if not ticket and not "login" in action:
return user_obj, True
# valid ticket on CAS
if ticket:
valid, username = self.cas.validate_ticket(url, ticket[0])
if valid:
u = user.User(request, auth_username=username, auth_method=self.name)
u.valid = valid
# auto create user
u.create_or_update(True)
return u, True
# login
request.http_redirect(self.cas.login_url(url))
return user_obj, True
def logout(self, request, user_obj, **kw):
if self.name and user_obj and user_obj.auth_method == self.name:
url = request.url_root + urllib.quote_plus(request.path.encode('utf-8'))
request.http_redirect(self.cas.logout_url(url))
user_obj.valid = False
return user_obj, True
| {
"content_hash": "cae824c67ddfc64566bd6902c9ba715e",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 134,
"avg_line_length": 35.166666666666664,
"alnum_prop": 0.578436018957346,
"repo_name": "Glottotopia/aagd",
"id": "68a3e768e9f2417ee1a5fb227ca24b21397a6647",
"size": "4250",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "moin/local/moin/MoinMoin/auth/cas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "152885"
},
{
"name": "CSS",
"bytes": "454208"
},
{
"name": "ColdFusion",
"bytes": "438820"
},
{
"name": "HTML",
"bytes": "1998354"
},
{
"name": "Java",
"bytes": "510468"
},
{
"name": "JavaScript",
"bytes": "6505329"
},
{
"name": "Lasso",
"bytes": "72399"
},
{
"name": "Makefile",
"bytes": "10216"
},
{
"name": "PHP",
"bytes": "259528"
},
{
"name": "Perl",
"bytes": "137186"
},
{
"name": "Python",
"bytes": "13713475"
},
{
"name": "Shell",
"bytes": "346"
},
{
"name": "XSLT",
"bytes": "15970"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.